query_id
stringlengths
32
32
query
stringlengths
9
4.01k
positive_passages
listlengths
1
1
negative_passages
listlengths
88
101
3c3766f1297fd85edb092f78192a84df
Update a Stateful Node State Arguments
[ { "docid": "8cdeebced06899fc39392bbc52dd47c3", "score": "0.0", "text": "def update_stateful_node_state(self, node_id: str, state: str):\n body_json = json.dumps(dict(state=state))\n\n response = self.send_put(\n body=body_json,\n url=self.__base_stateful_node_url + \"/\" + node_id + \"/state\",\n entity_name=self.ENTITY_NAME)\n\n formatted_response = self.convert_json(\n response, self.camel_to_underscore)\n\n return formatted_response[\"response\"][\"status\"]", "title": "" } ]
[ { "docid": "20470c312404ef6ea44cc12a2a14db5c", "score": "0.7455913", "text": "def update_state(self, state, args):\n # TODO: Implement state update\n # This will be a big function\n state(self, *args)", "title": "" }, { "docid": "813bf3aaef1bb46886d0893211b44434", "score": "0.73417944", "text": "def update_state(self, *args, **kwargs):\n NotImplementedError('Must be implemented in subclasses.')", "title": "" }, { "docid": "01319db0b23e2a09a24bdbe7646aa631", "score": "0.66725576", "text": "def update_action_state(*args):\n return _idaapi.update_action_state(*args)", "title": "" }, { "docid": "45028962b1231627c9373a099c7ea1b0", "score": "0.6425275", "text": "def update_state(self, context: Context) -> None:", "title": "" }, { "docid": "d52b32d8b2baf1c3dd5d1e8b0afb4ba9", "score": "0.61812276", "text": "def Update(*args, **kwargs):\n pass", "title": "" }, { "docid": "972e7dd9c0b83a2322df6c3f260bc1bb", "score": "0.61624795", "text": "def do_State(self, arg):\n if arg == '.all()':\n self.do_all('State')\n if arg == '.count()':\n self.count('State')\n if arg.startswith('.show(') is True:\n idx_1 = arg.index('\"') + 1\n str_pos1 = arg[idx_1:]\n idx_2 = str_pos1.index('\"')\n id_str = str_pos1[:idx_2]\n self.do_show(\"State {}\".format(id_str))\n if arg.startswith('.update(') is True:\n idx_1 = arg.index('\"') + 1\n str_pos1 = arg[idx_1:]\n idx_2 = str_pos1.index('\"')\n id_str = str_pos1[:idx_2]\n\n idx_1 = str_pos1.index('\"') + 1\n str_pos1 = str_pos1[idx_1:]\n idx_1 = str_pos1.index('\"') + 1\n str_pos1 = str_pos1[idx_1:]\n idx_1 = str_pos1.index('\"')\n key = str_pos1[:idx_1]\n\n idx_1 = str_pos1.index('\"') + 1\n str_pos1 = str_pos1[idx_1:]\n idx_1 = str_pos1.index('\"') + 1\n str_pos1 = str_pos1[idx_1:]\n idx_1 = str_pos1.index('\"')\n value = str_pos1[:idx_1]\n self.do_update(\"State {} {} {}\".format(id_str, key, value))", "title": "" }, { "docid": "6f566f56d37afdc589ac564582dd8ec5", "score": "0.613339", "text": "def update(*args, **kwargs):\n return", "title": "" }, { "docid": "5ea5dde1c305f815645abb804d0ca3c8", "score": "0.6110607", "text": "def __call__(self, *args, **kwargs):\n update_op = self.update_state(*args, **kwargs) # pylint: disable=not-callable\n with ops.control_dependencies([update_op]):\n return self.result() # pylint: disable=not-callable", "title": "" }, { "docid": "9cafcca2ba61d86c8d235c7dc589802d", "score": "0.60163367", "text": "def update(self, *args, **kwargs):\n attr = [\"id\", \"size\", \"x\", \"y\"]\n my_len = len(attr)\n if len(args) > 0:\n for i, argv in enumerate(args):\n if i < my_len:\n\n self.__setattr__(attr[i], argv)\n else:\n for k, v in kwargs.items():\n self.__setattr__(k, v)", "title": "" }, { "docid": "3818f14d1322ae343682643d63445309", "score": "0.59552693", "text": "def update_task_state(self, args):\n conn = self.get_conn(self.db_name)\n cursor = self.get_cursor(conn)\n try:\n query_str = \"\"\"\n UPDATE task\n SET task.current_state=%s\n WHERE task.task_id=%s\n \"\"\"\n data = (\n args['new_state'],\n args['task_id'],\n )\n cursor.execute(query_str, data)\n conn.commit()\n except Exception as e:\n self.logger.error(\"[ update_task_state ] Query=%s ; Exception=%s\"\\\n %(cursor._last_executed, str(e)))\n return -1", "title": "" }, { "docid": "70c5a0182acf4262ca589d0024645d3f", "score": "0.5945776", "text": "def __setstate__(self, state: dict) -> None:\n\n self.update(state)", "title": "" }, { "docid": "84534da3eefc827433e2ccebdc80631d", "score": "0.59405214", "text": "def set_process_state(*args):\n return _idaapi.set_process_state(*args)", "title": "" }, { "docid": "65a724fb8f367626e472b52896508a3d", "score": "0.59106445", "text": "def __setstate__(self, d):\n self.__dict__.update(d)\n self._update_params()", "title": "" }, { "docid": "90f89dd3d643e7cbe84273d59caecf1f", "score": "0.59053916", "text": "def update(self, *args, **kwargs):\n if args and len(args) != 0:\n for i in range(len(args)):\n if i == 0:\n self.id = args[i]\n if i == 1:\n self.size = args[i]\n if i == 2:\n self.x = args[i]\n if i == 3:\n self.y = args[i]\n else:\n for kw in kwargs:\n if kw == \"id\":\n self.id = (kwargs[kw])\n if kw == \"size\":\n self.size = (kwargs[kw])\n if kw == \"x\":\n self.x = (kwargs[kw])\n if kw == \"y\":\n self.y = (kwargs[kw])", "title": "" }, { "docid": "d7dc197777e6d6932e9593b70daa9ba4", "score": "0.5901639", "text": "def update(self, *args, **kwargs):\n if (len(args) > 0) and (args is not None):\n for post, arg in enumerate(args):\n if post is 0:\n self.id = arg\n if post is 1:\n self.size = arg\n if post is 2:\n self.x = arg\n if post is 3:\n self.y = arg\n else:\n if \"id\" in kwargs:\n self.id = kwargs[\"id\"]\n if \"size\" in kwargs:\n self.size = kwargs[\"size\"]\n if \"x\" in kwargs:\n self.x = kwargs[\"x\"]\n if \"y\" in kwargs:\n self.y = kwargs[\"y\"]", "title": "" }, { "docid": "e7cccdacd01da059eb32306cebaa5aed", "score": "0.58919305", "text": "def update(self, *args, **kwargs):\n if len(args):\n for i, arg in enumerate(args):\n if i == 0:\n self.id = arg\n if i == 1:\n self.size = arg\n if i == 2:\n self.x = arg\n if i == 3:\n self.y = arg\n else:\n if 'id' in kwargs:\n self.id = kwargs.get(\"id\")\n if 'size' in kwargs:\n self.size = kwargs.get(\"size\")\n if 'x' in kwargs:\n self.x = kwargs.get(\"x\")\n if 'y' in kwargs:\n self.y = kwargs.get(\"y\")", "title": "" }, { "docid": "f66b509b92bccb0fafe5dc573f2bd282", "score": "0.58502334", "text": "def __init__(self, update_task_state, task_state, expected_state=None):\n self.update_task_state = update_task_state\n self.task_state = task_state\n self.kwargs = {}\n if expected_state is not None:\n # We only want to pass expected state if it's not None! That's so\n # we take the update_task_state method's default.\n self.kwargs['expected_state'] = expected_state\n super(UpdateTaskState, self).__init__(\n name='update_task_state_%s' % task_state)", "title": "" }, { "docid": "3dafcc468e0011ac8cc6e1d5b28b4194", "score": "0.5836385", "text": "def update(self, **kwargs):\n pass", "title": "" }, { "docid": "17637bef05ebc6953fd709219b805bfe", "score": "0.58158404", "text": "def update(self, *args, **kwargs):\n my_list = [None, None, None, None]\n\n for i in range(len(args)):\n my_list[i] = args[i]\n\n if my_list[0] is not None:\n self.id = args[0]\n elif 'id' in kwargs:\n self.id = kwargs['id']\n if my_list[1] is not None:\n self.size = args[1]\n elif 'size' in kwargs:\n self.size = kwargs['size']\n if my_list[2] is not None:\n self.x = args[2]\n elif 'x' in kwargs:\n self.x = kwargs['x']\n if my_list[3] is not None:\n self.y = args[3]\n elif 'y' in kwargs:\n self.y = kwargs['y']", "title": "" }, { "docid": "17bf95ab3d59114a92f42010bb09c75e", "score": "0.5802156", "text": "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "title": "" }, { "docid": "8ea910937bcab270f1ead73b3a4c638e", "score": "0.57930785", "text": "def update(self, **args):\n for key, value in args.items():\n setattr(self, key, value)", "title": "" }, { "docid": "be3c8fcb45c84d124a8b7b53a374dbf5", "score": "0.5783724", "text": "def __setstate__(self, state):\n self.__dict__, self.params, self.dispersion = state", "title": "" }, { "docid": "345bbaffef60857f3eef03397414cdf3", "score": "0.5781324", "text": "def _updateState(self):\n pass", "title": "" }, { "docid": "48294cde283458573b2acbba41da6bb6", "score": "0.5768292", "text": "def update(self, state, action, nextState, reward):\n util.raiseNotDefined()", "title": "" }, { "docid": "edac0d0a48e3c9eb04bd9cc2ad1aa562", "score": "0.576673", "text": "def update(self, *args, **kwargs):\n raise NotImplementedError", "title": "" }, { "docid": "fe2651ff41dd284964f78c0e0d852cd7", "score": "0.5759295", "text": "def set_internal_state(self, state_dict):", "title": "" }, { "docid": "96e4431399c163d9b6257d788ec52e44", "score": "0.57470274", "text": "def update(self, updates):\n self.context_arguments.update(updates)", "title": "" }, { "docid": "8c204dfe696d7bdb8cb1662f23b116f6", "score": "0.5742386", "text": "def update(self, *args, **kwargs): # pylint: disable=arguments-differ\n pass", "title": "" }, { "docid": "6636f3dce903d563f6c50b616d2c0445", "score": "0.57352924", "text": "def update(self, state, action, reward, next_state, is_done=False):\n pass", "title": "" }, { "docid": "b46ce9f99fd305da3f13fa68a1f7f719", "score": "0.5732188", "text": "def _update_random_state_parameter(self, function, parameters):\n all_params = (\n getfullargspec(function).args +\n getfullargspec(function).kwonlyargs\n )\n if 'random_state' in all_params:\n if 'random_state' in parameters:\n logger.warning(\n \"Parameter 'random_state=%s' is ignored for '%s', use the \"\n \"'random_state' option to initialize the MLRModel class \"\n \"instead\",\n parameters['random_state'],\n self._CLF_TYPE,\n )\n parameters['random_state'] = self.random_state\n logger.debug(\n \"Updated 'random_state' parameter of '%s' to '%s'\",\n self._CLF_TYPE,\n self.random_state,\n )\n return parameters", "title": "" }, { "docid": "08bc46835aedfe855b46754e1a92f01b", "score": "0.5725581", "text": "def __setstate__(self, state: Dict):\n self.__dict__.update(state)", "title": "" }, { "docid": "d9cf3515b3245752b94c3147c79cc746", "score": "0.57240474", "text": "def update(self, *args, **kwargs):\n\n list_attr = [\"id\", \"size\", \"x\", \"y\"]\n\n if len(args) != 0:\n for index, arg in enumerate(args):\n setattr(self, list_attr[index], arg)\n else:\n for key, value in kwargs.items():\n setattr(self, key, value)", "title": "" }, { "docid": "4840749305da75d5739fd3bb95738fad", "score": "0.5700227", "text": "def _set_state_variables(self, updates):\n # TODO(momernick): Do we need to do any more input sanitization?\n if not self.built:\n raise RuntimeError('_set_state_variables() must be called after build().')\n\n assignments = []\n for var_name, value in updates.items():\n assignments.append(\n state_ops.assign(self.state_variables[var_name], value))\n K.get_session().run(assignments)", "title": "" }, { "docid": "4840749305da75d5739fd3bb95738fad", "score": "0.5700227", "text": "def _set_state_variables(self, updates):\n # TODO(momernick): Do we need to do any more input sanitization?\n if not self.built:\n raise RuntimeError('_set_state_variables() must be called after build().')\n\n assignments = []\n for var_name, value in updates.items():\n assignments.append(\n state_ops.assign(self.state_variables[var_name], value))\n K.get_session().run(assignments)", "title": "" }, { "docid": "9760d5832063907743cc47d5933eb0da", "score": "0.56991756", "text": "def set_state(self, state_dict: Mapping[str, Any]) -> None:", "title": "" }, { "docid": "260dde66273213041b29f42fc126754b", "score": "0.56911874", "text": "def update(self, state, action, reward, next_state):\n raise NotImplementedError()", "title": "" }, { "docid": "dfaeed5606348cbb06d36a577ff7b5dc", "score": "0.5679298", "text": "def update_state(self, state, meta=\"\"):\n self.state = state\n self.info = meta\n self.save()", "title": "" }, { "docid": "1bd1c40368d6f4bcd5a6471654d1bf50", "score": "0.567693", "text": "def receive_state(self, parameters):", "title": "" }, { "docid": "67bcd8373d3a3f43ee79270a715982a5", "score": "0.56762755", "text": "def arg_node(key):\n return _ivy_ag.states[key]", "title": "" }, { "docid": "3bdd0e760ee78dcb7e4a72b519abbd1f", "score": "0.5662069", "text": "def __setstate__(self, state):\n self.update(state)", "title": "" }, { "docid": "d7535604ba2b7bfe3ff14eb5f54efdc3", "score": "0.5648154", "text": "def set_state(self, state):", "title": "" }, { "docid": "d4bb6a8e13f68e3ee722d2beaa80a6ba", "score": "0.56418604", "text": "def __setstate__(self, newstate):\n for key, val in list(newstate.items()):\n self[key] = val[1]", "title": "" }, { "docid": "0240ce5f18c81cd78b4880fe2cb77eef", "score": "0.5629445", "text": "def update_state(self, sia_event: SIAEvent) -> None:", "title": "" }, { "docid": "deea063cdf85c554d0be10f112a04256", "score": "0.5607898", "text": "def makestate(self,*a,**ka):\n raise NotImplementedError()", "title": "" }, { "docid": "a386be0cf9e3b48064d87fe19b358c20", "score": "0.5604072", "text": "def __setstate__(self, state):\n self.__dict__.update(state)", "title": "" }, { "docid": "d19334d3ec558db6e2190df8aed187bf", "score": "0.56017345", "text": "def update(self, *args, **kwargs):\n\n attr = [\"id\", \"size\", \"x\", \"y\"]\n if args is not None:\n for i in range(len(args)):\n setattr(self, attr[i], args[i])\n if kwargs is not None or args is None:\n for k, v in kwargs.items():\n setattr(self, k, v)", "title": "" }, { "docid": "776194394682af2e981937a4116b0815", "score": "0.55786127", "text": "def update(self, *args, **kwargs):\n\n attrs_sq = [\"id\", \"size\", \"x\", \"y\"]\n\n for position_sq, var in enumerate(args):\n if position_sq > (len(attrs_sq) - 1):\n break\n setattr(self, attrs_sq[position_sq], var)\n\n if len(args) == 0:\n for key, value in kwargs.items():\n setattr(self, key, value)", "title": "" }, { "docid": "7f27501e841f8872dac5bf0fd33bbc1b", "score": "0.55750257", "text": "def __setstate__(self, state):\n print(\"setting state from {}\".format(state))\n # Swap x and y, just for fun\n self.x = state[1]['y']\n self.y = state[0]['x']", "title": "" }, { "docid": "c2e051c8e9775f389775cade3876ba1e", "score": "0.55701226", "text": "def do_update(self, args):\n if self.context.get(\"resource\") is None:\n print(\"Not in the context of a resource.\")\n return\n parser = update_parser()\n try:\n parsed = vars(parser.parse_args(shlex.split(args)))\n except:\n return\n nodekey = parsed[\"nodekey\"]\n data = parsed.get(\"data\")\n if data is None:\n with open(parsed[\"data_path\"]) as f:\n data = f.read()\n etag = parsed.get(\"etag\")\n insert = parsed.get(\"insert\")\n if insert is None:\n insert = Insert.CHILD\n else:\n insert = Insert[insert.upper()]\n try:\n self.resource.update(nodekey, data, insert, etag)\n except SirixServerError as e:\n print(e)", "title": "" }, { "docid": "5c4e6c43838f4978d54eca932cb44746", "score": "0.5560076", "text": "def do_State(self, arg):\n cmd, line = pattern(arg)\n self.onecmd(' '.join([cmd, 'State', line]))", "title": "" }, { "docid": "796b0f300dda579dab3d49a64427cdec", "score": "0.5553489", "text": "def update(self, sess, states, actions, targets, summary): # after states actions\n return sess.run((self.loss, self.outputs, self.step),\n feed_dict={self.inp: states, self.action: actions, self.y_: targets})[0] # self.action: actions,", "title": "" }, { "docid": "b920aea09e24cddb49172cd0016de895", "score": "0.5553112", "text": "def _node_updates(self, connection):", "title": "" }, { "docid": "b72f31a474ebe48ff14ee4b234a326b4", "score": "0.55435884", "text": "def set_params(self, *args) -> None:\n self._update_data(self.t, *args)", "title": "" }, { "docid": "5ae7fcea7aa257b39d22990ba31c2661", "score": "0.5541279", "text": "def update(self, *args, **kwargs):\n if len(args) != 0 and args is not None:\n try:\n self.id = args[0]\n self.size = args[1]\n self.x = args[2]\n self.y = args[3]\n except IndexError:\n pass\n else:\n for i in kwargs.keys():\n try:\n getattr(self, i)\n except Exception as er:\n raise er\n setattr(self, i, kwargs[i])", "title": "" }, { "docid": "f312aafc1b1a44362a93635755890967", "score": "0.5538075", "text": "def __setstate__(self, state):\n for attr, value in state.iteritems():\n setattr(self, attr, value)", "title": "" }, { "docid": "dfe4187751503a2ea1750453ef811154", "score": "0.55257475", "text": "def update_rp(self, *args):\n # print(args)\n self.rp.update_data(*args)", "title": "" }, { "docid": "6054ff0415e8ee39ef7a3cef9bd5e771", "score": "0.5523327", "text": "def updateState(self) :\n x = 1", "title": "" }, { "docid": "3c83578423a70de10f645bce093cce1c", "score": "0.55228835", "text": "def update(self, *args, **kwargs):\n for k, v in dict(*args, **kwargs).items():\n self[k] = v", "title": "" }, { "docid": "68a68c7cb09ba99d373f2a736cadec1f", "score": "0.55179816", "text": "def update(self, *args, **kwargs):\n ls = [\"id\", \"width\", \"height\", \"x\", \"y\"]\n if args and len(args) != 0:\n for i in range(0, len(args)):\n setattr(self, ls[i], args[i])\n elif kwargs and len(kwargs) != 0:\n for key, value in kwargs.items():\n setattr(self, key, value)", "title": "" }, { "docid": "47c3e29a5f411dd4f98b77a736ff96c8", "score": "0.55165267", "text": "def update(self, **kwargs):\n pass", "title": "" }, { "docid": "a8296f4c4e77fc1fb64079a59256eae3", "score": "0.5508084", "text": "def model_update(self, state: Dict[str, Any], weight: Collection[tf.Variable],\n grad: Collection[tf.Tensor],\n round_idx: int) -> Dict[str, Any]:\n raise NotImplementedError", "title": "" }, { "docid": "8b5e1285c6205bfdc607040ad4ae7b73", "score": "0.5504167", "text": "def update_fn(grads, state, params):\n params_flat, treedef = jax.tree_flatten(params)\n stats_flat = treedef.flatten_up_to(state.stats)\n grads_flat = treedef.flatten_up_to(grads)\n stats_grads = grads_flat\n\n new_stats_flat = jax.tree_map(\n lambda g, s, p: _compute_stats(g, s, p, state.count),\n stats_grads,\n stats_flat,\n params_flat,\n )\n\n new_stats_flat = _compute_preconditioners(\n new_stats_flat, params_flat, state.count\n )\n outputs = jax.tree_map(\n lambda g, s, p: _transform_grad(g, s, p, state.count),\n grads_flat,\n new_stats_flat,\n params_flat,\n )\n updates_flat, new_stats_flat = list(zip(*outputs)) if outputs else ((), ())\n\n updates = jax.tree_unflatten(treedef, updates_flat)\n new_stats = jax.tree_unflatten(treedef, new_stats_flat)\n\n new_state = ShampooState(count=state.count + 1, stats=new_stats)\n return updates, new_state", "title": "" }, { "docid": "002f17e442efc5fd456aaa469b75b094", "score": "0.54880893", "text": "def State(*args):\n # Getter\n if len(args) == 0:\n return lib.SwtControls_Get_State()\n\n # Setter\n Value, = args\n lib.SwtControls_Set_State(Value)\n CheckForError()", "title": "" }, { "docid": "3f547ee5321d5014a19fb71e6a114f20", "score": "0.5485638", "text": "def update(openstack_resource, args):\n args = reset_dict_empty_keys(args)\n openstack_resource.update(args)", "title": "" }, { "docid": "ca461d8f4b6e235ebdda64a4345418d5", "score": "0.54811615", "text": "def update(self, *args, **kwargs):\n for key, value in dict(*args, **kwargs).items():\n self[key] = value", "title": "" }, { "docid": "c29aad2e078af4aed4ffbf510334e888", "score": "0.54752934", "text": "def __setstate__(self, state):\n # Don't mutate the original state\n state = dict(state)\n\n # Allow for loading of legacy states.\n # See https://github.com/openai/gym/pull/2470\n if \"start\" not in state:\n state[\"start\"] = 0\n\n super().__setstate__(state)", "title": "" }, { "docid": "b4ae6d905a9a9531c72913b4b9028391", "score": "0.54741424", "text": "def update_state(self, other):\n# DEBUG print(\"Self\", repr(self))\n# DEBUG print(\"Other\", repr(other))\n \n other.command = self.command # store the last used command\n \n # remove any of the items that we don't want to pass onto other commands\n for item in (\"comment\", \"unknown\", \"skeinforge\"):\n if item in other.parameters.keys():\n del other.parameters[item] \n\n if self.X is None:\n self.X = other.X\n else:\n other.X = self.X\n\n if self.Y is None:\n self.Y = other.Y\n else:\n other.Y = self.Y\n\n if self.Z is None:\n self.Z = other.Z\n else:\n other.Z = self.Z\n\n if self.E is None:\n self.E = other.E\n else:\n other.E = self.E\n\n if self.F is None:\n self.F = other.F\n else:\n other.F = self.F\n\n if self.T is None:\n self.T = other.T\n\n for key in other.parameters.keys(): # pass the other's settings to the command...\n self.parameters.setdefault(key, other.parameters[key]) # (ignore if the key is in parameters; otherwise, add the other key/value pair)\n for key in other.parameters.keys(): # .... and update other values with those of the latest command\n other.parameters[key] = self.parameters[key]", "title": "" }, { "docid": "7ac46258520335e20cd5888a8a1011d0", "score": "0.5470264", "text": "def __call__(self, **kwargs):\n for item, value in kwargs.items():\n if item not in self._state[\"data\"]:\n self._state[\"data\"][item] = value", "title": "" }, { "docid": "bdddd78b404331d195cd358e5327b560", "score": "0.54610866", "text": "def state(*args, **_):\n if args:\n raise DDFunctionException(\n 'state function does not accept args, {0} given'.format(len(args))\n )\n return choice(STATES)", "title": "" }, { "docid": "bfd355fd116a6bd5dae90a73f411d2c2", "score": "0.5452416", "text": "def _update_state(self):\n tmp: dict = self.state\n self.state = {}\n self.state = tmp", "title": "" }, { "docid": "36dfadecfb1083f2c5460ba6a8355155", "score": "0.54513544", "text": "def SetDiagnosticState(self, *args, **kwargs):\n payload = { \"Arg1\": self.href }\n for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]\n for item in kwargs.items(): payload[item[0]] = item[1]\n return self._execute('setDiagnosticState', payload=payload, response_object=None)", "title": "" }, { "docid": "8550f9f03820f68b9cfa9e8f37ed293e", "score": "0.5448489", "text": "def update(self, blah):\n pass", "title": "" }, { "docid": "38e3559a853d250d2ab1ba281d0cee42", "score": "0.5447636", "text": "def __call__(self, **kwargs):\r\n self._update(kwargs)", "title": "" }, { "docid": "63be0f9e00075966a311677796ffbe6a", "score": "0.54467285", "text": "def update(self, sess, s, a, y):\n feed_dict = { self.states_pl: s, self.targets_pl: y, self.actions_pl: a }\n sess.run(self.train_op, feed_dict)\n return loss, q_values", "title": "" }, { "docid": "d86e8d788f2ead4c2c397c2d6133ff65", "score": "0.5439849", "text": "def set_node_info(*args):\n return _idaapi.set_node_info(*args)", "title": "" }, { "docid": "c13a241ae6d3c1d5508b173ec414cf57", "score": "0.5433719", "text": "def update_state(state, kernel, learning_rate, x_i, y_i):\n # *** START CODE HERE ***\n beta = learning_rate * (y_i - predict(state, kernel, x_i))\n state.append((beta, x_i))\n # *** END CODE HERE ***", "title": "" }, { "docid": "1e6f6468f7daa47826614d1715c75a51", "score": "0.54236364", "text": "def update(self, sess, s, a, y, b):\n feed_dict = { self.states_pl: s, self.targets_pl: y, self.actions_pl: a, self.baseline_pl: b }\n sess.run(self.train_op, feed_dict)", "title": "" }, { "docid": "4fc9fdf64814ce0550c9a4868ff9fa3e", "score": "0.54223573", "text": "def __setstate__(self, state):", "title": "" }, { "docid": "4fc9fdf64814ce0550c9a4868ff9fa3e", "score": "0.54223573", "text": "def __setstate__(self, state):", "title": "" }, { "docid": "954e60cdfc88104199491ccf25606f1e", "score": "0.5420849", "text": "def pythonop_set_dataset_state(**kwargs) -> None:\n for arg in ['dataset_uuid_callable', 'http_conn_id', 'endpoint']:\n assert arg in kwargs, \"missing required argument {}\".format(arg)\n dataset_uuid = kwargs['dataset_uuid_callable'](**kwargs)\n http_conn_id = kwargs['http_conn_id']\n endpoint = kwargs['endpoint']\n ds_state = kwargs['ds_state'] if 'ds_state' in kwargs else 'Processing'\n message = kwargs['message'] if 'message' in kwargs else 'update state'\n method='PUT'\n crypt_auth_tok = (kwargs['crypt_auth_tok'] if 'crypt_auth_tok' in kwargs \n else kwargs['dag_run'].conf['crypt_auth_tok'])\n headers={\n 'authorization' : 'Bearer ' + decrypt_tok(crypt_auth_tok.encode()),\n 'content-type' : 'application/json'}\n # print('headers:')\n # pprint(headers) # reduce visibility of auth_tok\n extra_options=[]\n \n http = HttpHook(method,\n http_conn_id=http_conn_id)\n\n data = {'dataset_id' : dataset_uuid,\n 'status' : ds_state,\n 'message' : message,\n 'metadata': {}}\n print('data: ')\n pprint(data)\n\n response = http.run(endpoint,\n json.dumps(data),\n headers,\n extra_options)\n print('response: ')\n pprint(response.json())", "title": "" }, { "docid": "2abaa765b9845710d7ce4d73cf2d0977", "score": "0.53930366", "text": "def update_state(self, state: dict) -> None:\n if not isinstance(state, dict):\n raise TypeError(\"state must be a dict\")\n\n # Check job_id match\n if self._acc_state and \"job_id\" in state and state[\"job_id\"] != self.job_id:\n raise ValueError(\n \"Job ID mismatch in update_state: \"\n + f\"job ID: {self.job_id}; state ID: {state['job_id']}\"\n )\n\n if self._acc_state is None:\n self._acc_state = {}\n\n self._acc_state = {**self._acc_state, **state}", "title": "" }, { "docid": "d2f15ba82289321d24dd81fd6e1c2bb2", "score": "0.5388972", "text": "def update(self, *args, **kwargs):\n for next_dict in chain(args, (kwargs, )):\n for k, v in next_dict.items():\n self[k] = v", "title": "" }, { "docid": "3ed3d5be2d8e737f36724595a0563710", "score": "0.53857875", "text": "def set_params(self, *args, **kwargs):", "title": "" }, { "docid": "ad9d1a2569b03f14d688157415d3431a", "score": "0.538474", "text": "def update(self, *args, **kwargs):\n attrs = [\"id\", \"width\", \"height\", \"x\", \"y\"]\n if len(args) is not 0:\n for i in range(len(args)):\n setattr(self, attrs[i], args[i])\n else:\n for key in kwargs.keys():\n if key in attrs:\n setattr(self, key, kwargs[key])", "title": "" }, { "docid": "484447f2bab8e9485c529354ca28af03", "score": "0.5376361", "text": "def setState(self,state):\n assert type(state) == pyState.State\n\n self.state = state\n for var in self.variables:\n self.variables[var].setState(state)", "title": "" }, { "docid": "80c576bf6f5fcf433b631c596e8611ba", "score": "0.5365734", "text": "def test_09_update_kwargs_x(self):\n r1 = Rectangle(6, 7, 10, 10)\n r1.update(x=12)\n self.assertEqual(r1.x, 12)", "title": "" }, { "docid": "fffca7db956f0a854eb4f1c9d6d86065", "score": "0.5356845", "text": "def update_state(self, state):\n state[self.y, self.x] = self.idx", "title": "" }, { "docid": "8e00b456a49164a014b8560e43ae7a76", "score": "0.5353482", "text": "def update(self, *args, **kwargs):\n if args:\n keys = [\"id\", \"width\", \"height\", \"x\", \"y\"][0:len(args)]\n for value, key in zip(args, keys):\n setattr(self, key, value)\n else:\n for key in kwargs:\n setattr(self, key, kwargs[key])", "title": "" }, { "docid": "94665b90e066cd621e48fae84cff7396", "score": "0.5353245", "text": "def update(self, *args, **kwargs):\n for k, v in dict(*args, **kwargs).iteritems():\n self[k] = v", "title": "" }, { "docid": "592bb68962017442c6d997cc905fd659", "score": "0.5352894", "text": "def update(self, *args, **kwargs): # Public method\n args_list = ['id', 'width', 'height', 'x', 'y']\n if args:\n for arg in range(len(args)):\n setattr(self, args_list[arg], args[arg])\n\n for key, value in kwargs.items(): # kwargs implementation\n setattr(self, key, value)", "title": "" }, { "docid": "9eb2b00d9b90460832798a27e5840c39", "score": "0.53519535", "text": "def test_update_four_kwargs(self):\n self.r1.update(height=9, width=2, id=666, x=0)\n self.assertEqual(self.r1.height, 9)\n self.assertEqual(self.r1.width, 2)\n self.assertEqual(self.r1.id, 666)\n self.assertEqual(self.r1.x, 0)", "title": "" }, { "docid": "ea60b71c9fca3f15f00fd50a27ca65e6", "score": "0.53514427", "text": "def state(self, *args):\n\n if len(args) == 1:\n return self.S.T.cat(args[0])\n else:\n self.S.T.touch(args[0], args[1])", "title": "" }, { "docid": "2a48e9f92265ec51b52ce9a0b5da21e1", "score": "0.53398025", "text": "def args(self, args):\n\n self._args = args", "title": "" }, { "docid": "2a48e9f92265ec51b52ce9a0b5da21e1", "score": "0.53398025", "text": "def args(self, args):\n\n self._args = args", "title": "" }, { "docid": "485c5ed266fc49a6362a7363d74a4e43", "score": "0.53391325", "text": "def update(self, *args, **kwargs):\n if len(args) > 1:\n raise TypeError(\n \"update expected at most 1 arguments, got %d\" % len(args)\n )\n\n for key, value in dict(*args, **kwargs).items():\n self[key] = value", "title": "" }, { "docid": "0c42df2120df2bc40cf5d9f151857970", "score": "0.5336595", "text": "def __setstate__(self, state):\n self.__dict__ = state\n self._initialize_functions()\n self._update_functions()", "title": "" }, { "docid": "0af0427058c5fe6046c8982685586fb5", "score": "0.53318304", "text": "def setParameters(self, args):\n raise NotImplementedError(\"Please implement this method\")", "title": "" }, { "docid": "4fdd5b534b0daa3bb4fd88fea96d3214", "score": "0.5331815", "text": "def update(self, *args, **kwargs):\n self.async_schedule_update_ha_state()", "title": "" }, { "docid": "8cd390bbc57af985924c9ac5c50311af", "score": "0.53317577", "text": "def update(self, *args, **kwargs):\n for key, value in dict(*args, **kwargs).iteritems():\n self[key] = value", "title": "" }, { "docid": "8cd390bbc57af985924c9ac5c50311af", "score": "0.53317577", "text": "def update(self, *args, **kwargs):\n for key, value in dict(*args, **kwargs).iteritems():\n self[key] = value", "title": "" }, { "docid": "cc5e757825ee913ffbe94f6b803473b6", "score": "0.53306806", "text": "def update(self, *args, **kwargs):\n\n new_args = ['id', 'width', 'height', 'x', 'y']\n\n if args:\n for i in range(len(args)):\n setattr(self, new_args[i], args[i])\n\n if kwargs:\n for key in kwargs:\n setattr(self, key, kwargs[key])", "title": "" } ]
cbe92fc3d30239c40e6629120bd7c1e3
Publish a new ``event`` on a ``channel``
[ { "docid": "454c3492a91babb42f08db903337dc36", "score": "0.75689447", "text": "async def publish(self, channel, event, data=None):\n msg = {'event': event, 'channel': channel}\n if data:\n msg['data'] = data\n try:\n await self.pubsub.publish(self.prefixed(channel), msg)\n except ConnectionRefusedError:\n self.connection_error = True\n self.logger.critical(\n '%s cannot publish on \"%s\" channel - connection error',\n self,\n channel\n )\n else:\n self.connection_ok()", "title": "" } ]
[ { "docid": "9587ef13186e6db711317b41aeb40082", "score": "0.7104358", "text": "def publish(self, event: Event):\n self.events_queue.put(event)", "title": "" }, { "docid": "8e4f4c5f3697d39b1be9ad193465386c", "score": "0.70263994", "text": "def publish(self, channel, message):\n self.__r.publish(channel, message.dumps())", "title": "" }, { "docid": "1831c7cafe4e66ab0838b0b98b8ed592", "score": "0.68139124", "text": "def publish_to_channel(self, channel, msg):\n self.channel = channel\n redis_db.publish(channel, msg)\n subscriber_count = redis_db.execute_command('PUBSUB', 'NUMSUB', channel)\n print('Published new post to channel \"%s\". Subscribers: %s' % (channel, subscriber_count[1]))", "title": "" }, { "docid": "fb151539c0064c8d487536281b4dc252", "score": "0.68059105", "text": "def publish(self, channel, msg):\n return self.conn.publish(channel, msg)", "title": "" }, { "docid": "1bbe44ef3d9418afe333d278a814d766", "score": "0.67734283", "text": "def _publish_event(cls, event: 'BaseEvent'):\n from ..manager import PubSubManager\n\n pubsub = PubSubManager.get()\n pubsub.publish(event)", "title": "" }, { "docid": "ca841ec4bc427b49fb43215619d21ed0", "score": "0.6749738", "text": "def publish(self, channel, message):\n self.pubnub.publish().channel(channel).message(message).sync()", "title": "" }, { "docid": "3bca137a6cdcab1b9880d78cf0623de8", "score": "0.6704715", "text": "def _send(self):\n \n self.check_attr('channel')\n \n parameter = [self.channel]\n \n if hasattr(self, 'topic') and self.topic is not None:\n parameter.append(self.topic)\n \n return self.create_event(Topic, parameter)", "title": "" }, { "docid": "44a072e8eb23ccfd29d18e5cd667898a", "score": "0.65701497", "text": "def publish(channel_key, **kwargs):\n channel_key = str(channel_key)\n\n if channel_key not in _CHANNELS:\n return\n else:\n # create an Evt instance\n evt = Evt(channel_key, **kwargs)\n for func_call in _CHANNELS[channel_key]:\n # fire each of the callbacks.\n try:\n func_call(evt)\n except:\n # func not avaible at this time.\n # NEEDS A WARNING, NOT AN EXCEPTION\n pass", "title": "" }, { "docid": "77b41ea777a96adc2de0e28389b7622b", "score": "0.6543951", "text": "def publish(self, subchannel, message):\n self.bus.pub(self.config.get(\"publish\", \"pyircbot_{}\").format(subchannel),\n \"{} {}\".format(self.config.get(\"name\", \"default\"), message))", "title": "" }, { "docid": "2bfe7878ffbc32c6c6ebed6b970d14a7", "score": "0.6478365", "text": "def channel_created(self, channel):", "title": "" }, { "docid": "93a9f21efeadaed3e66022f91a9f0220", "score": "0.63509893", "text": "def on_channel_message(self, sender, channel, message):\n pass", "title": "" }, { "docid": "fdf45dae533c6c320f89acbb9e6cd592", "score": "0.6241765", "text": "def publish(self, channel, msg):\n super().publish(channel, msg.SerializeToString())", "title": "" }, { "docid": "4e251a64c2eca291646d7541cff7ccbf", "score": "0.622899", "text": "def handleEvent(self, event, header, channel): \n \n pass", "title": "" }, { "docid": "4832268c7ff4007b1e80e52a5bcafed3", "score": "0.6183731", "text": "def __on_remote(self, event, remote_event, connection_name, channel=None):\n node = self.__peers[connection_name]\n remote_event.channels = (channel,) if channel is not None \\\n else event.channels\n return node.send(remote_event)", "title": "" }, { "docid": "c05629e33ce1b09928da9d5458051d16", "score": "0.61417663", "text": "def consume(self, event, *args, **kwargs):\n if self._hub_event_filter(event):\n self.signal.send(event)", "title": "" }, { "docid": "e03be34f2b62840dad50fba5353dfd2a", "score": "0.6114326", "text": "def event_publish(self, cmd):\n for sub in self.subscribers:\n sub.event_receive(cmd)", "title": "" }, { "docid": "9b92e01239f5b2142711e5806f76386a", "score": "0.611418", "text": "def main():\n\n with pika.BlockingConnection(CONN_PARAMETERS) as conn:\n\n channel = conn.channel()\n\n # This time we use direct exchange.\n # The direct exchange passes messages to the queues whose bindings match the routing key.\n channel.exchange_declare(exchange=EXCHANGE, exchange_type='direct')\n\n # We create event message. It will contain timestamp and randomly chosen event type.\n event = {\n 'ts': time(),\n 'event_type': random.choice(('INFO', 'ALERT', 'ERROR'))\n }\n\n channel.basic_publish(\n exchange=EXCHANGE,\n routing_key=event['event_type'], # This time the routing key will not be ignored\n body=json.dumps(event)\n )", "title": "" }, { "docid": "b426ba658d71ad78e5a6bb4c36f685c8", "score": "0.6112572", "text": "def publish(client):\n client.publish(__channel.format(\"dummy_channel\"), \"dummy_value\")", "title": "" }, { "docid": "b89053a2912cb2e2551a30d48c0008f2", "score": "0.60084015", "text": "def publish(self, channel:NativeType, message:NativeType) -> int:\r\n return self._query(b'publish', self.encode_from_native(channel), self.encode_from_native(message))", "title": "" }, { "docid": "a58dc2db0b3436754909532da38e9894", "score": "0.6001202", "text": "def subscribe_handler(self, channel, data):\n log.debug(\"Subscribed to {}, data was {}\".format(channel, data))\n self.subscribed[channel] = True", "title": "" }, { "docid": "bc8e60e12dde84893ccad43b8b027fd4", "score": "0.5982181", "text": "def message(payload):\n\t# Get event data from payload\n\tevent = payload.get(\"event\", {})\n\t#Get the text\n\ttext=event.get(\"text\")\n\n\tif text.startswith(\"[traducir]\"):\n\t\tchannel.basic_publish(exchange=\"nestor\", routing_key=\"traducir\", body=text)\n\n\tif text.startswith(\"[wikipedia]\"):\n\t\tchannel.basic_publish(exchange=\"nestor\", routing_key=\"wikipedia\", body=text)", "title": "" }, { "docid": "e006debe98389e5c00ee30ecad5475d7", "score": "0.59794396", "text": "def publish(message, channel, exchange, routing, correlation_id=None):\n LOG.debug(f'Sending to exchange: {exchange} [routing key: {routing}]')\n channel.basic_publish(exchange, # exchange\n routing, # routing_key\n json.dumps(message), # body\n properties=pika.BasicProperties(correlation_id=correlation_id or str(uuid.uuid4()),\n content_type='application/json',\n delivery_mode=2))", "title": "" }, { "docid": "6d5eccf9abc58dd9f42ce87d412c0641", "score": "0.59779876", "text": "async def publish_events( # pylint: disable=inconsistent-return-statements\n self, topic_hostname: str, events: IO, *, content_type: str = \"application/json\", **kwargs: Any\n ) -> None:", "title": "" }, { "docid": "46bf81d64c9414c038ad72069cd9d522", "score": "0.5967627", "text": "def publish(self, channel, data, key=None):\n packet = _WSPSPublishPacket(channel, data, key)\n self._send_packet(packet)", "title": "" }, { "docid": "537a282907a3e607de8c0ae4ba81792d", "score": "0.59303397", "text": "def send_pusher_event(\n signal,\n sender,\n instance,\n channels,\n event_name,\n data,\n socket_id=None,\n **kwargs\n):\n\n push_provider_class = kwargs.get(\"provider_class\", PusherProvider)\n push_provider = push_provider_class()\n push_provider.configure()\n push_provider.trigger(channels, event_name, data, socket_id)", "title": "" }, { "docid": "190f9feb0f3b88190743a52ff42bc287", "score": "0.59172153", "text": "def joined(self, channel):\n log.msg('Joined %s' % channel)\n self.channels.add(channel)", "title": "" }, { "docid": "093e5face6f636a8c2c5875556a423a0", "score": "0.59138006", "text": "def channel(self, channels, message, event='base-event'):\n try:\n import pusher\n except ImportError:\n raise DriverLibraryNotFound(\n 'Could not find the \"pusher\" library. Please pip install this library running \"pip install pusher\"')\n\n configuration = config('broadcast.drivers.pusher')\n\n pusher_client = pusher.Pusher(\n app_id=str(configuration['app_id']),\n key=configuration['client'],\n secret=configuration['secret'],\n ssl=self.ssl_message\n )\n\n if isinstance(message, str):\n message = {'message': message}\n\n if isinstance(channels, list):\n for channel in channels:\n pusher_client.trigger(channel, event, message)\n else:\n pusher_client.trigger(channels, event, message)\n\n return message", "title": "" }, { "docid": "784a4a4e812e67f7e59d7a2eee4eedf4", "score": "0.5913621", "text": "def channel(self, channel):\n\n self._channel = channel", "title": "" }, { "docid": "1c809571cd8ca241fecc99d1f0a447c8", "score": "0.5911307", "text": "def send_pusher_event(\n signal, sender, instance, channel, event_name, data, socket_id=None, **kwargs\n):\n try:\n pusher_cluster = settings.PUSHER_CLUSTER\n except AttributeError:\n pusher_cluster = \"mt1\"\n\n pusher = Pusher(\n app_id=settings.PUSHER_APP_ID,\n key=settings.PUSHER_KEY,\n secret=settings.PUSHER_SECRET,\n cluster=pusher_cluster,\n )\n pusher.trigger([channel], event_name, data)", "title": "" }, { "docid": "c6d023f3877f4f46362229da7e6d0bfa", "score": "0.5910057", "text": "async def send_event(self, event: Event) -> None:\n pass", "title": "" }, { "docid": "a39f453df8746d59956a84dd07ba3d77", "score": "0.58993256", "text": "def register_channel(self, channel):\n self[channel.__name__] = channel", "title": "" }, { "docid": "946f6617f05e12b014302e65a7a73343", "score": "0.5871939", "text": "async def publish_event(self, event: Union[Event, EVENT_TYPE]) -> None:\n event = self.event_format(event)\n self.logger.debug(f\"Get {event}\")\n for listener in self._listeners[event.event_type]:\n res = await self._run_listener(event, listener)\n if res:\n # if listener return true. will break the loop\n self.logger.debug(f\"{listener.__name__} break the loop\")\n break", "title": "" }, { "docid": "5b1d983f27421e9968f1b35ba0db6b4c", "score": "0.58658206", "text": "def publish(message: str):\n channel.basic_publish(exchange=\"\",\n routing_key=\"integration\",\n body=message,\n properties=pika.BasicProperties(\n delivery_mode=2,\n ))", "title": "" }, { "docid": "b01174e7938fb3d8efceace1ac8c081a", "score": "0.586299", "text": "def broadcast(self, channel, msg):\n raise NotImplementedError()", "title": "" }, { "docid": "edbc31d57a33ac10f1ec1455da1222f4", "score": "0.5830413", "text": "async def publish_custom_event_events( # pylint: disable=inconsistent-return-statements\n self, topic_hostname: str, events: IO, *, content_type: str = \"application/json\", **kwargs: Any\n ) -> None:", "title": "" }, { "docid": "6ee78fbe54ae4defaf534e1e60692454", "score": "0.5826786", "text": "def channel_message(self, message_type, channel, data):\n pass", "title": "" }, { "docid": "982dc2f99301e375122332e26f2797ed", "score": "0.58212376", "text": "async def activate_event_channel(ctx):\n\n await ctx.message.delete()\n message = await ctx.send(f\"Channel activated for event voice \" f\"channel creation.\")\n scheduling.message_delayed_delete(message)\n\n db.event_channels.add(ctx.channel.id)", "title": "" }, { "docid": "772a07c148a44e38c938f188999de537", "score": "0.5809161", "text": "def transmit(self, event):\r\n self.__engine.insertEvent(event)", "title": "" }, { "docid": "c1ea017494fe618626a95eacad0abd99", "score": "0.5790748", "text": "def send_message(self, message, channel=None):", "title": "" }, { "docid": "206ed3eba6928de4e59fdfa73c30bb13", "score": "0.5774", "text": "def sendChanneledMessage(self, message, channel):\n self.sendMessage(message)", "title": "" }, { "docid": "6714f2a5eb194798251a09e085ae71ca", "score": "0.5772914", "text": "def on_channel_open(self, channel):\r\n LOGGER.info('Channel opened')\r\n self._channel = channel\r\n self.add_on_channel_close_callback()\r\n self.setup_exchange(self._exchange_name)", "title": "" }, { "docid": "4ed986a9331b3cc2f83c37b0d909c73d", "score": "0.57617205", "text": "def subscribe(self, channel):\n self.subscribe_client.subscribe(channel)\n self.subscribed[channel] = False", "title": "" }, { "docid": "1fcb3e119a2870f53c05ee6160b060d1", "score": "0.573853", "text": "def publish(self, message):\n self.channel.basic_publish(self.exchange, routing_key=self.queue, body=message, properties=PERSIST)", "title": "" }, { "docid": "3d952859b63773f997783ed6fd66c40b", "score": "0.5732303", "text": "def open_channel(self):\r\n LOGGER.info('Creating a new channel')\r\n self._connection.channel(on_open_callback=self.on_channel_open)", "title": "" }, { "docid": "de3973f0fd53833d493bddcd17cf3543", "score": "0.5708967", "text": "def newChannel(self, channel):\r\n self.dbmanager.createChannel((channel.id, channel.name, channel.permisions))", "title": "" }, { "docid": "c407c50796123da8480ae0ae2d143d34", "score": "0.5708925", "text": "def publish_event(dask_pub: distributed.Pub, event: BaseTaskEvent) -> None:\n with log_catch(logger, reraise=False):\n dask_pub.put(event.json())", "title": "" }, { "docid": "e795b6d0aa25f87af6a80425e6301ca2", "score": "0.57088953", "text": "def on_channel_open(self, channel):\n app_log.info('Channel opened')\n self._channel = channel\n self.add_on_channel_close_callback()\n self.setup_exchange(self.exchange, self.exchange_type)", "title": "" }, { "docid": "1f6139e77dab7153ba49336d16dd92a0", "score": "0.57003355", "text": "def produce_event(self, topic, event):\n try:\n self.producer.produce(\n topic=topic,\n value=json.dumps(event, default=self.default_json_encoder),\n callback=lambda err, msg, obj=event: self.callback_function(err, msg, obj),\n )\n self.producer.poll(1) # Callback function\n except ValueError as error:\n logger.error(error)", "title": "" }, { "docid": "e6ca547a1d780c498dff76d8dbaea318", "score": "0.5678286", "text": "def on(self, event, callback):\n if not self.connected:\n raise ConnectionClosed()\n\n if event not in self._callbacks:\n self._callbacks[event] = []\n channel, kwargs = decode_event(event)\n msg = {'event': 'subscribe', 'channel': channel}\n msg.update(kwargs)\n\n logger.info('Subscribing: {}', event)\n self._send(msg)\n\n self._callbacks[event].append(callback)\n logger.info('Add callback: \"{}\"', event)", "title": "" }, { "docid": "806995e838b5d3f63aac68159d8fd749", "score": "0.5657277", "text": "def publish(self, message):\n pass", "title": "" }, { "docid": "11982a6ebf9e09d2ef403206cbffc0db", "score": "0.5650312", "text": "async def on_channel_create(self, channel):\n if channel.is_private:\n return\n\n role = await self.get_role(channel.server)\n if not role:\n return\n\n await self.setup_channel(channel, role)", "title": "" }, { "docid": "e88f7e18540b067c0722c5fa652d1e2e", "score": "0.5632813", "text": "def __call__(self, handler=None, channel=None):\n if handler is None:\n def handler_with_channel(handler):\n return self.__call__(handler, channel)\n return handler_with_channel\n if channel:\n if not self.supports_channels:\n raise EventError(\"The %s event does not support channels so \"\n \"the handler `%s` could not be registered\" %\n self.name, handler.__name__)\n channel = re.compile(channel)\n self.handlers.append((handler, channel))", "title": "" }, { "docid": "8d5f1c3000c1886d29b9f268d3e49d58", "score": "0.56314766", "text": "async def publish_custom_event_events( # pylint: disable=inconsistent-return-statements\n self, topic_hostname: str, events: List[JSON], *, content_type: str = \"application/json\", **kwargs: Any\n ) -> None:", "title": "" }, { "docid": "1b8a6a48b6d45dd829328bdf71af9380", "score": "0.5629961", "text": "async def event_message(ctx):\n\n # make sure the bot ignores itself and the streamer\n if ctx.author.name.lower() == BOT_NICK.lower():\n return\n\n await bot.handle_commands(ctx)\n\n # await ctx.channel.send(ctx.content)\n\n if 'hello' in ctx.content.lower():\n await ctx.channel.send(f\"Hi, @{ctx.author.name}!\")", "title": "" }, { "docid": "808ec874946029084b1cb288b092c10b", "score": "0.56248283", "text": "def publish(payload, info):\n sessionid = sessionid_from_headers(info.context.channels_scope[\"headers\"])\n if payload[\"author_sessionid\"] == sessionid:\n return None\n\n return OnNewMessage(message=payload[\"message\"])", "title": "" }, { "docid": "ed4e2feaff4737a63810d357aaf70a3f", "score": "0.56216234", "text": "async def channel(self, ctx, channel: discord.TextChannel):\n await self.config.guild(ctx.guild).set_raw(\"channel\", value=channel.id)\n embed =Embed.create(\n self, ctx, title=\"Successful <:success:777167188816560168>\",\n description=f\"{channel.mention} will now receive notifications from users to notify the staff.\"\n )\n await ctx.send(embed=embed)", "title": "" }, { "docid": "83165ee1df661419e0a51f2382d17282", "score": "0.56185186", "text": "def on_pubmsg(self, connection, event, timestamp=None):\r\n\r\n text = self._get_event_text(event)\r\n channel = self._get_event_channel(event)\r\n nick = self._get_event_nick(event)\r\n\r\n if timestamp is None:\r\n timestamp = time()\r\n\r\n cmd = False\r\n command, args = self._get_command(text)\r\n\r\n if command:\r\n cmd = self.bot.irc_command(channel, nick, command, args, timestamp)\r\n\r\n if not cmd:\r\n self.bot.chat_message(channel, nick, text, timestamp)", "title": "" }, { "docid": "9e1c76f5e03fbf67f882b2c7eec9a8a3", "score": "0.5604509", "text": "def on_action(self, sender, channel, message):\n pass", "title": "" }, { "docid": "94d0f20cbdcc2daaccc6c64e1958d455", "score": "0.56012654", "text": "def publish(self, message):\n self.__ensure_started()\n\n message_def = self.messages.get_by_cls(type(message))\n context = OutgoingContext(message, message_def)\n self.outgoing_pipeline.execute(context)", "title": "" }, { "docid": "347171e6fc440b9ee0498d3cc3c45671", "score": "0.5581217", "text": "def _send(self):\n \n self.check_attr('channels')\n \n return self.create_event(Names, [','.join(self.channels)])", "title": "" }, { "docid": "7afc07bd9371768bfd329fc8d5e56817", "score": "0.5575236", "text": "def on_channel_open(self, channel):\n self._channel = channel\n self._channel.add_on_close_callback(self.on_channel_close)\n self.declare_exchange()", "title": "" }, { "docid": "73944f1d56033bd62bf51944394b3271", "score": "0.5555147", "text": "def handle_event(self, user, xcommand, channel):\n if xcommand and channel:\n print \"Received command: \" + xcommand + \" in channel: \" + channel +\\\n \" from user: \" + user\n response = self.command.handle_command(user, xcommand, channel)\n self.bot.slack_client.api_call(\"chat.postMessage\", channel=channel,\\\n text=response, as_user=True)", "title": "" }, { "docid": "6010ebb60bb27fdca307d192f2ef55ab", "score": "0.55208623", "text": "def open_channel(self):\n app_log.info('Creating a new channel')\n self._connection.channel(on_open_callback=self.on_channel_open)", "title": "" }, { "docid": "9dbaeafe2ab94a952d2646393c6c88d6", "score": "0.55133903", "text": "def _send(self, message):\n discovery_service = self.discovery_service\n if message.headers[\"message_type\"] == \"notification\":\n source = message.headers[\"source\"]\n ep = entry_point.EntryPointFactory().create(source)\n exchange = discovery_service.get_local_publisher(ep.service)\n elif message.headers[\"message_type\"] in (\"response\", \"error\"):\n rk = message.headers[\"destination\"]\n ep = entry_point.EntryPointFactory().create(rk)\n exchange = discovery_service.get_remote(ep.service)\n else:\n dst = message.headers[\"destination\"]\n ep = entry_point.EntryPointFactory().create(dst)\n exchange = discovery_service.get_remote(ep.service)\n routing_key = ep.to_routing_key()\n self._driver.publish_message(exchange, routing_key, message)", "title": "" }, { "docid": "611b96a1c1d435bbbd7a367fc993af7c", "score": "0.5510074", "text": "def Publisher(object):\n def __init__(self, channel_key):\n self.channel_key = str(channel_key)\n\n def publish(**kwargs):\n publish(self.channel_key, **kwargs)", "title": "" }, { "docid": "bd62faeb93659fe42e1ea2f833aa9856", "score": "0.55084723", "text": "def add_event(self, event):\n if not self._closed:\n self._event_queue.put(event)", "title": "" }, { "docid": "252030fabcca3125c63a3e5b919497aa", "score": "0.550363", "text": "async def publish(\n self,\n room: str,\n event: Events,\n data: Dict[str, str],\n plugin_url: str = \"messaging.zuri.chat\",\n ) -> Dict[str, Any]:\n data_publish = {\n \"status\": 200,\n \"event\": event,\n \"plugin_url\": plugin_url,\n \"data\": data,\n }\n\n command = {\n \"method\": \"publish\",\n \"params\": {\n \"channel\": room,\n \"data\": data_publish,\n },\n }\n try:\n response = await self._send_command(command)\n except requests.RequestException:\n return {\"status\": 400, \"message\": \"Invalid Request\"}\n else:\n if response and response.get(\"status_code\") == 200:\n return data_publish\n return {\"status\": 424, \"message\": \"centrifugo failed\"}", "title": "" }, { "docid": "0211758ea4b86b9559ed28bf3b615be5", "score": "0.54949546", "text": "def _send_event_to_connection(self, connection: EventWebsocketProtocol, event: BaseEvent) -> None:\n if not connection.can_receive_event(event.id):\n return\n\n assert self._latest_event_id is not None, '_latest_event_id must be set.'\n\n response = EventResponse(event=event, latest_event_id=self._latest_event_id)\n\n connection.send_event_response(response)", "title": "" }, { "docid": "f1d008c965f9a0d4bb8f5e4e9ef0da51", "score": "0.5479013", "text": "def handler(event, _context):\n # host, ssl based on env\n url = urlparse.urlparse(os.environ.get('MQTT_URL'))\n\n # check for secure scheme\n tls = None\n if url.scheme == 'mqtts':\n tls = {'ca_certs': './cacert.pem', 'tls_version': PROTOCOL_TLSv1}\n\n default_parameters = {\n 'username': url.username,\n 'password': url.password,\n 'topic': 'test_topic',\n 'payload': 'test_payload',\n 'retain': 'false',\n 'qos': 2\n }\n\n parameters = default_parameters.copy()\n parameters.update(json.loads(event['body']))\n\n auth = {'username': parameters['username'], 'password': parameters['password']}\n\n publish.single(\n topic=parameters['topic'],\n payload=parameters['payload'],\n qos=parameters['qos'],\n retain=parse_str_as_boolean(parameters['retain']),\n hostname=url.hostname,\n port=url.port,\n auth=auth,\n tls=tls\n )\n\n response = {\n \"statusCode\": 200,\n \"body\": json.dumps({'message':'delivered'})\n }\n\n return response", "title": "" }, { "docid": "819d73836d152e35b7ca45301a864c1a", "score": "0.54769045", "text": "def emit(self, channel, message, forward=False):\n validateType('message', Message, message)\n message.channel = channel\n message.forward = forward\n # message.source = self\n\n if self.validate(message):\n self.log_debug(\"Sending message: %s on channel: %s\" % (message, channel))\n\n for child in self.children:\n child.insert(message)\n\n # yields to event loop\n self.tick()", "title": "" }, { "docid": "73f6bdc1cd9254f246d6db7d1e3d6d5c", "score": "0.547602", "text": "def publish(self, event='message', data=None, delay=None):\n if delay is None:\n with self.lock:\n for target in self.subscriptions[event]:\n target(event, data)\n else:\n def task():\n self.publish(event, data)\n threading.Timer(delay, task).start()", "title": "" }, { "docid": "ed31c8765358eba325d808accabbe2ef", "score": "0.546317", "text": "def redis_publish(mgr, params):\n if params.get('channel') and params.get('message'):\n serv_redis.redis.publish(params['channel'], params['message'])", "title": "" }, { "docid": "395e76347b7a8c1b51d88279958e6bd9", "score": "0.5463066", "text": "def open(self, channel):\n self.channel = channel", "title": "" }, { "docid": "d900f87a145db903c4fd0eab09f8bfbe", "score": "0.54580224", "text": "def send(self, channel, data):\n self.net.push_packet(\"PLAY>Plugin Message\",\n {\"channel\": channel, \"data\": data})", "title": "" }, { "docid": "24d2f27054ceba2e4bdf7658731d01f8", "score": "0.54521906", "text": "def post_event(event: Event) -> None:\n post(event)", "title": "" }, { "docid": "012533981a6b9cf98bddb97592055778", "score": "0.5439084", "text": "def publish_message(self, message):\n if message:\n for channel in self._publishers:\n self._connection.publish(channel, message)\n if self._expire > 0:\n self._connection.set(channel, message, ex=self._expire)", "title": "" }, { "docid": "5facfd0bf9680f07678d67ee295672d9", "score": "0.5437993", "text": "def joined(self, channel):\n print channel", "title": "" }, { "docid": "0b2f151d5d37b4c2eb7bd327b1b16a6f", "score": "0.54372245", "text": "def on_channel_open(self, channel):\n LOGGER.info('Channel opened')\n self._channel = channel\n self.add_on_channel_close_callback()\n self.process_callbacks('on_channel_open')", "title": "" }, { "docid": "d1d3a80e7af095317239917bed5a5c58", "score": "0.5436543", "text": "def create_channel(self, channel_name, channel_id, user_id) -> None:", "title": "" }, { "docid": "e86bac3c7198df17ea2f089ab7245546", "score": "0.5434909", "text": "async def event(self, ctx: Context) -> None:\n assert ctx.server\n assert ctx.channel_settings\n if not ctx.message.attachments:\n await safe_send_channel(\n ctx.message, s(\"event_no_data\", reply=ctx.message.author.mention)\n )\n await safe_react_error(ctx.message)\n return\n if not ctx.params:\n await safe_send_channel(\n ctx.message, s(\"event_no_params\", reply=ctx.message.author.mention)\n )\n await safe_react_error(ctx.message)\n return\n\n opts = parse_opts(ctx.params, default_size=ctx.channel_settings.default_size)\n params, tag_names, opt_msg, system = (\n opts[\"params\"],\n opts[\"tags\"],\n opts[\"message\"],\n opts[\"system\"],\n )\n size = len(params)\n attachment = ctx.message.attachments[0]\n\n if len(tag_names) > 5:\n await safe_send_channel(\n ctx.message, s(\"tags_too_many\", reply=ctx.message.author.mention)\n )\n await safe_react_error(ctx.message)\n return\n if opt_msg and len(opt_msg) >= 255:\n await safe_send_channel(\n ctx.message,\n s(\n \"game_message_too_long\",\n reply=ctx.message.author.mention,\n ),\n )\n await safe_react_error(ctx.message)\n return\n if not (1 < size <= 4):\n await safe_send_channel(\n ctx.message,\n s(\n \"event_bad_play_count\",\n reply=ctx.message.author.mention,\n ),\n )\n await safe_react_error(ctx.message)\n return\n if not attachment.filename.lower().endswith(\".csv\"):\n await safe_send_channel(\n ctx.message, s(\"event_not_csv\", reply=ctx.message.author.mention)\n )\n await safe_react_error(ctx.message)\n return\n\n tags = Tag.create_many(ctx.session, tag_names)\n\n bdata = await ctx.message.attachments[0].read()\n try:\n sdata = self.decode_data(bdata)\n except UnicodeDecodeError:\n await safe_send_channel(\n ctx.message,\n s(\n \"event_not_utf\",\n reply=ctx.message.author.mention,\n ),\n )\n await safe_react_error(ctx.message)\n return\n\n event_error: Optional[str] = None\n warnings = set()\n event = Event()\n try:\n reader = csv.reader(StringIO(sdata))\n header = [column.lower().strip() for column in next(reader)]\n params = [param.lower().strip() for param in params]\n\n if any(param not in header for param in params):\n await safe_send_channel(\n ctx.message,\n s(\n \"event_no_header\",\n reply=ctx.message.author.mention,\n ),\n )\n await safe_react_error(ctx.message)\n return\n\n columns = [header.index(param) for param in params]\n\n ctx.session.add(event)\n ctx.session.commit()\n\n players_in_this_event: Set[str] = set()\n\n finder = create_member_finder(ctx.message)\n\n for i, row in enumerate(reader):\n csv_row_data = [row[column].strip() for column in columns]\n players_s = \", \".join([f'\"{value}\"' for value in csv_row_data])\n player_names = [\n re.sub(\"#.*$\", \"\", value.lower()).lstrip(\"@\")\n for value in csv_row_data\n ]\n\n for player_name in player_names:\n if not finder.find(player_name):\n warning = s(\"event_missing_player\", row=i + 1, players=players_s)\n await safe_send_channel(ctx.message, warning)\n continue\n\n player_discord_users: List[discord.Member] = []\n for csv_data, player_name in zip(csv_row_data, player_names):\n if player_name in players_in_this_event:\n await safe_send_channel(\n ctx.message,\n s(\n \"event_duplicate_user\",\n row=i + 1,\n name=csv_data,\n players=players_s,\n ),\n )\n await safe_react_error(ctx.message)\n return\n player_discord_user = finder.find(player_name)\n if player_discord_user:\n players_in_this_event.add(player_name)\n player_discord_users.append(player_discord_user)\n else:\n warnings.add(\n s(\n \"event_missing_user\",\n row=i + 1,\n name=csv_data,\n players=players_s,\n )\n )\n\n if len(player_discord_users) != size:\n continue\n\n player_users = [\n self.ensure_user_exists(ctx.session, player_discord_user)\n for player_discord_user in player_discord_users\n ]\n\n for player_discord_user, player_user in zip(\n player_discord_users, player_users\n ):\n if player_user.waiting:\n game_to_update = player_user.game\n player_user.game_id = None # type: ignore\n await self.try_to_update_game(ctx, game_to_update)\n trunc_name: str = player_discord_user.name[0:50]\n player_user.cached_name = trunc_name # type: ignore\n ctx.session.commit()\n\n now = datetime.utcnow()\n expires_at = now + timedelta(minutes=ctx.server.expire)\n game = Game(\n created_at=now,\n expires_at=expires_at,\n guild_xid=ctx.message.channel.guild.id,\n size=size,\n updated_at=now,\n status=\"ready\",\n system=system,\n message=opt_msg,\n users=player_users,\n event=event,\n tags=tags,\n )\n ctx.session.add(game)\n ctx.session.commit()\n except Exception as e:\n event_error = str(e)\n\n if event_error:\n await safe_send_channel(\n ctx.message,\n s(\"event_error\", reply=ctx.message.author.mention, error=event_error),\n )\n await safe_react_error(ctx.message)\n return\n\n def by_row(s: str) -> int:\n m = re.match(\"^.*row ([0-9]+).*$\", s)\n # TODO: Hopefully no one adds a strings.yaml warning\n # that doesn't fit this exact format!\n assert m is not None\n return int(m[1])\n\n warnings_s = \"\\n\".join(sorted(warnings, key=by_row))\n if warnings_s:\n for page in paginate(warnings_s):\n await safe_send_channel(ctx.message, page)\n\n if not event.games:\n ctx.session.delete(event)\n ctx.session.commit()\n await safe_send_channel(\n ctx.message, s(\"event_empty\", reply=ctx.message.author.mention)\n )\n await safe_react_error(ctx.message)\n return\n\n ctx.session.commit()\n count = len([game for game in event.games])\n await safe_send_channel(\n ctx.message,\n s(\n \"event_created\",\n reply=ctx.message.author.mention,\n prefix=ctx.prefix,\n event_id=event.id,\n count=count,\n ),\n )\n await safe_react_ok(ctx.message)", "title": "" }, { "docid": "01406f5e57b0c3b9353a705484604749", "score": "0.5431031", "text": "def __publish(self, channel):\n self._logger.info(\"Generating raw PV values\")\n while (True):\n # Generate random power value\n value = random.uniform(0, 9000)\n # Publish value to the queue\n try:\n channel.basic_publish(\n exchange='',\n routing_key='raw',\n body=f\"{value:.2f}\",\n mandatory=True,\n properties=pika.BasicProperties(\n delivery_mode=2, # make message persistent\n )\n )\n self._logger.info(f\"Sent {value:.2f}\")\n # wait some seconds to not flood the queue\n time.sleep(2)\n except pika.exceptions.UnroutableError:\n self._logger.warn('Message was returned')\n continue\n except KeyboardInterrupt as ex:\n self._logger.info(\"Operation stoped by user\")\n break", "title": "" }, { "docid": "73e3438f4dc1aa995287b0e6fa8809af", "score": "0.54253995", "text": "def publish_events(self, events):\n raise ceilometer.NotImplementedError", "title": "" }, { "docid": "73e3438f4dc1aa995287b0e6fa8809af", "score": "0.54253995", "text": "def publish_events(self, events):\n raise ceilometer.NotImplementedError", "title": "" }, { "docid": "680d4fe16612025f30bca7b713e2268e", "score": "0.5424714", "text": "def __emit_channel_msg_on_bus(self, msg: ChannelMessage):\n self._emit_protocol_msg_on_bus('Channel ID: {}'.format(msg.channel_id), msg)", "title": "" }, { "docid": "8afb2058dcf04841dc280975aca59bc6", "score": "0.54179215", "text": "def send_message_event(self):\r\n pass", "title": "" }, { "docid": "abca27748ea09487b98cac8742ae51b2", "score": "0.54167527", "text": "async def publish_events( # pylint: disable=inconsistent-return-statements\n self,\n topic_hostname: str,\n events: List[_models.EventGridEvent],\n *,\n content_type: str = \"application/json\",\n **kwargs: Any\n ) -> None:", "title": "" }, { "docid": "82db6d416c40eec06042bc7803dd7583", "score": "0.5416035", "text": "def api_post_channel():\n\treturn api_post(request.json, object_type=\"channel\")", "title": "" }, { "docid": "48511c04e7e692ea8d93c93e1a35f772", "score": "0.54094094", "text": "def queue_event(self, event):\n self.event_queue.put(event)", "title": "" }, { "docid": "b5d67e7a04202ab3d72625383e907ceb", "score": "0.54074836", "text": "def schedule_event(self, event):\n self.event_list.put(event)", "title": "" }, { "docid": "bea9a992aee0a395063b8e1f92f9f367", "score": "0.540114", "text": "def publish(self, msg: str) -> None:\n ...", "title": "" }, { "docid": "2bda23032697836dbf2546b44025e93d", "score": "0.5386202", "text": "def open_channel(self):\n self.lg(\"Creating a new channel\", 6)\n self.m_connection.channel(on_open_callback=self.on_channel_open)", "title": "" }, { "docid": "0bc1881b74a19d366fdcbe2aa3b45989", "score": "0.5383988", "text": "def _post_event(self, event, **kwargs):\n self.machine.events.post(event, **kwargs)", "title": "" }, { "docid": "6c05220bfb0032631e92790fa710c935", "score": "0.53712857", "text": "def emit(self, event, *args, **kwargs):\n self._emit(event, self, *args, **kwargs)", "title": "" }, { "docid": "394728125a8982c84ba33e57f3430371", "score": "0.5370906", "text": "def message(self, message, **kwargs):\n self.send_event('message', message=message, **kwargs) # message!", "title": "" }, { "docid": "e1f51cd5b91af94265b952a68cf6438e", "score": "0.5369799", "text": "def event(self, category: str, id: str, data: Any):\n log.debug(f\"pushing event - id:{id} category:{category} data:{data}\")\n self.push({'category': category, 'id': id, 'data': data})", "title": "" }, { "docid": "119a92c338cf82c16b49002913e0aba8", "score": "0.5362002", "text": "async def send_msg_to_channel(self, channel_id: str, msg: str):", "title": "" }, { "docid": "dad1d33624a339e9f6d9937be77515ab", "score": "0.5361305", "text": "def event(self, broker, event):\n pass", "title": "" }, { "docid": "bff45178f78d1a5431a7c8cf5c5886ef", "score": "0.534883", "text": "def publish(self, message) -> None:\n self._q.put(message)", "title": "" }, { "docid": "50b7915828414e264302e70921a3a35c", "score": "0.53468084", "text": "def open_channel(self):\n LOGGER.info('Creating a new channel.')\n self._connection.channel(on_open_callback=self.on_channel_open)", "title": "" }, { "docid": "cf55405139c42b5244321789642a33af", "score": "0.534461", "text": "def message(self, user, channel, message):\n pass", "title": "" } ]
56515db8662221a8a65b82e7a7d4560f
Mock a Aeotec thermostat node.
[ { "docid": "11777d6e6bccc97f7762bdcb5eed140d", "score": "0.65429515", "text": "def aeotec_radiator_thermostat_fixture(client, aeotec_radiator_thermostat_state):\n node = Node(client, aeotec_radiator_thermostat_state)\n client.driver.controller.nodes[node.node_id] = node\n return node", "title": "" } ]
[ { "docid": "aaecca4a15eea6143cbb16b138d506c4", "score": "0.6052555", "text": "def setUp(self):\n vals = {\n \"name\": \"Ecobee\",\n \"program\": {\n \"climates\": [\n {\"name\": \"Climate1\", \"climateRef\": \"c1\"},\n {\"name\": \"Climate2\", \"climateRef\": \"c2\"},\n ],\n \"currentClimateRef\": \"c1\",\n },\n \"runtime\": {\n \"actualTemperature\": 300,\n \"actualHumidity\": 15,\n \"desiredHeat\": 400,\n \"desiredCool\": 200,\n \"desiredFanMode\": \"on\",\n },\n \"settings\": {\n \"hvacMode\": \"auto\",\n \"heatStages\": 1,\n \"coolStages\": 1,\n \"fanMinOnTime\": 10,\n \"heatCoolMinDelta\": 50,\n \"holdAction\": \"nextTransition\",\n },\n \"equipmentStatus\": \"fan\",\n \"events\": [\n {\n \"name\": \"Event1\",\n \"running\": True,\n \"type\": \"hold\",\n \"holdClimateRef\": \"away\",\n \"endDate\": \"2017-01-01 10:00:00\",\n \"startDate\": \"2017-02-02 11:00:00\",\n }\n ],\n }\n\n self.ecobee = mock.Mock()\n self.ecobee.__getitem__ = mock.Mock(side_effect=vals.__getitem__)\n self.ecobee.__setitem__ = mock.Mock(side_effect=vals.__setitem__)\n\n self.data = mock.Mock()\n self.data.ecobee.get_thermostat.return_value = self.ecobee\n self.thermostat = ecobee.Thermostat(self.data, 1)", "title": "" }, { "docid": "1ed85a7cd74a93e7ff3b401730614a7d", "score": "0.5971485", "text": "def test_get_node(self):\n pass", "title": "" }, { "docid": "8575d98cf29762828723a99126b01023", "score": "0.5894163", "text": "def test_nfxnode_init(self):\n nobject = MagicMock(spec=NfxNode)\n self.assertIsInstance(nobject, NfxNode)", "title": "" }, { "docid": "737d9ab3dcd225abef47c5ba9e25c7f8", "score": "0.5881777", "text": "def nortek_thermostat_fixture(client, nortek_thermostat_state):\n node = Node(client, copy.deepcopy(nortek_thermostat_state))\n client.driver.controller.nodes[node.node_id] = node\n return node", "title": "" }, { "docid": "d1f6a7969a6e3eeff02d51a0e47343b0", "score": "0.5874753", "text": "def zha_device():\n dev = mock.MagicMock()\n dev.manufacturer = MANUFACTURER\n dev.model = MODEL\n return dev", "title": "" }, { "docid": "c5721f31eb948131e065b436f8c89d32", "score": "0.585254", "text": "def test_thermostat(client):\n \n rv = client.post('/thermostat', data=dict(\n status=1,\n settemp=42,\n ), follow_redirects=True)\n assert b'<h1 class=\"current-temperature\">42' in rv.data", "title": "" }, { "docid": "2ebe505be8140aeaf732410cd751fa68", "score": "0.58307475", "text": "def cna(mac):\n nic = mock.MagicMock()\n nic.mac = mac\n nic.vswitch_uri = 'fake_href'\n return nic", "title": "" }, { "docid": "0d7a2268a67e1f709f771834ae79d2dd", "score": "0.5753022", "text": "def climate_airzone_aidoo_control_hvac_unit_fixture(\n client, climate_airzone_aidoo_control_hvac_unit_state\n):\n node = Node(client, copy.deepcopy(climate_airzone_aidoo_control_hvac_unit_state))\n client.driver.controller.nodes[node.node_id] = node\n return node", "title": "" }, { "docid": "06b0dc32c9b2bfc0fa0e33cd5634d290", "score": "0.57523584", "text": "def test_handle_nodes(self):\n node = Node(PUBLIC_KEY, PRIVATE_KEY, self.event_loop, self.connector,\n self.reply_port)\n node.trigger_task = MagicMock()\n msg_dict = {\n 'uuid': str(uuid.uuid4()),\n 'recipient': PUBLIC_KEY,\n 'sender': PUBLIC_KEY,\n 'reply_port': 1908,\n 'version': self.version,\n 'nodes': [[PUBLIC_KEY, self.version,\n 'http://192.168.0.1:1908/'], ]\n }\n seal = get_seal(msg_dict, PRIVATE_KEY)\n msg_dict['seal'] = seal\n msg_dict['message'] = 'nodes'\n message = from_dict(msg_dict)\n node.handle_nodes(message)\n node.trigger_task.assert_called_once_with(message)", "title": "" }, { "docid": "60a629243c4380f1145a615fb199a689", "score": "0.5724476", "text": "def test_reboot_node(self):\n node = Node(name='node')\n node.node_sal.client.raw = MagicMock()\n node.reboot()\n\n node.node_sal.reboot.assert_called_with()", "title": "" }, { "docid": "2c7e067e427e40c84207694112acd109", "score": "0.5702731", "text": "def aeon_smart_switch_6_fixture(client, aeon_smart_switch_6_state):\n node = Node(client, aeon_smart_switch_6_state)\n client.driver.controller.nodes[node.node_id] = node\n return node", "title": "" }, { "docid": "90ac1e95f992604882bd2f92876a1975", "score": "0.57002544", "text": "def test_is_smart_sleep_node():\n const = get_const(\"1.4\")\n sensor_id = 1\n\n sensor = Sensor(sensor_id)\n sensor.add_child_sensor(0, const.Presentation.S_LIGHT_LEVEL)\n\n assert not sensor.is_smart_sleep_node\n\n sensor.new_state[sensor_id] = {}\n\n assert sensor.is_smart_sleep_node", "title": "" }, { "docid": "bf4d1bfe69145a9a663d3d5f755136ab", "score": "0.56195694", "text": "def test_node_info(self):\n\n class MockElasticNodes:\n \"\"\"\n Mock of Elasticsearch NodesClient\n \"\"\"\n\n def info(self, node_id=None, flat_settings=None):\n \"\"\"\n Mock of info method\n \"\"\"\n return [{\"test\": \"key\"}]\n\n class MockElastic:\n \"\"\"\n Mock of Elasticsearch client\n \"\"\"\n\n nodes = MockElasticNodes()\n\n with patch.object(\n elasticsearch, \"_get_instance\", MagicMock(return_value=MockElastic())\n ):\n self.assertListEqual(elasticsearch.node_info(), [{\"test\": \"key\"}])", "title": "" }, { "docid": "96e7b1b979b826aff30e57f6b90c202a", "score": "0.5606814", "text": "def fan_generic_fixture(client, fan_generic_state):\n node = Node(client, copy.deepcopy(fan_generic_state))\n client.driver.controller.nodes[node.node_id] = node\n return node", "title": "" }, { "docid": "7887e5d61aeefc5125722804b72a4712", "score": "0.5577049", "text": "def test_node(self):\n value = Node(Mock())\n init_value = self.context.node\n\n self.context.node = value\n\n assert init_value is None\n assert self.context.node == value\n assert self.context['node'] == value", "title": "" }, { "docid": "7a5d9c6dc5e9256a2856c0f8e7fbbcd4", "score": "0.556231", "text": "def simulate_tower(tower_j):\n tower = towers.simple_tower.load(tower_j)\n tower_s = tower.serialize()\n keys = list(tower.blocks.keys())[1:]\n with tower_scene.TowerPhysics(tower_s) as scene:\n trace = scene.get_trace(120, keys)\n return trace", "title": "" }, { "docid": "57c3ba1462a3ce2f88b7da68a89f9578", "score": "0.55549103", "text": "async def test_anna_climate_sensor_entities(\n hass: HomeAssistant, mock_smile_anna: MagicMock, init_integration: MockConfigEntry\n) -> None:\n state = hass.states.get(\"sensor.opentherm_outdoor_air_temperature\")\n assert state\n assert float(state.state) == 3.0", "title": "" }, { "docid": "8aceffa44487e3ab3ce421d47cb3e6ce", "score": "0.5524825", "text": "def test_node_sal(self):\n vm = Vm('vm', data=self.valid_data)\n node_sal_return = 'node_sal'\n patch('js9.j.clients.zero_os.sal.get_node', MagicMock(return_value=node_sal_return)).start()\n node_sal = vm.node_sal\n\n assert node_sal == node_sal_return\n j.clients.zero_os.sal.get_node.assert_called_with(NODE_CLIENT)", "title": "" }, { "docid": "1b41fbb2fea88f81587fe0fed89cf225", "score": "0.55097455", "text": "def test_thermocycler_set_target_block_temperature(\n decoy: Decoy,\n transport: ChildThreadTransport,\n subject: SyncClient,\n) -> None:\n request = commands.thermocycler.SetTargetBlockTemperatureCreate(\n params=commands.thermocycler.SetTargetBlockTemperatureParams(\n moduleId=\"module-id\",\n celsius=45.6,\n blockMaxVolumeUl=12.3,\n holdTimeSeconds=123.4,\n )\n )\n response = commands.thermocycler.SetTargetBlockTemperatureResult(\n targetBlockTemperature=45.6\n )\n decoy.when(transport.execute_command(request=request)).then_return(response)\n result = subject.thermocycler_set_target_block_temperature(\n module_id=\"module-id\",\n celsius=45.6,\n block_max_volume=12.3,\n hold_time_seconds=123.4,\n )\n\n assert result == response", "title": "" }, { "docid": "2e1a9ad0290f3c4e8c53c42975dc9c7a", "score": "0.54932654", "text": "def test_set_hvac_mode(self):\n self.data.reset_mock()\n self.thermostat.set_hvac_mode(\"heat_cool\")\n self.data.ecobee.set_hvac_mode.assert_has_calls([mock.call(1, \"auto\")])\n self.data.reset_mock()\n self.thermostat.set_hvac_mode(\"heat\")\n self.data.ecobee.set_hvac_mode.assert_has_calls([mock.call(1, \"heat\")])", "title": "" }, { "docid": "2f799e8d97b3c6798677ba5117fc754b", "score": "0.548459", "text": "def test_02_get_teapot(self):\n\n the_heart = Heartbeat()\n the_heart.get_teapot()", "title": "" }, { "docid": "81ba41e8e27d392be9a0a3cd050bedb4", "score": "0.548245", "text": "async def test_sensors(hass: HomeAssistant) -> None:\n mock_entry = _mock_config_entry()\n\n with patch(\"aurorapy.client.AuroraSerialClient.connect\", return_value=None), patch(\n \"aurorapy.client.AuroraSerialClient.measure\",\n side_effect=_simulated_returns,\n ), patch(\n \"aurorapy.client.AuroraSerialClient.serial_number\",\n return_value=\"9876543\",\n ), patch(\n \"aurorapy.client.AuroraSerialClient.version\",\n return_value=\"9.8.7.6\",\n ), patch(\n \"aurorapy.client.AuroraSerialClient.pn\",\n return_value=\"A.B.C\",\n ), patch(\n \"aurorapy.client.AuroraSerialClient.firmware\",\n return_value=\"1.234\",\n ), patch(\n \"aurorapy.client.AuroraSerialClient.cumulated_energy\",\n side_effect=_simulated_returns,\n ):\n mock_entry.add_to_hass(hass)\n await hass.config_entries.async_setup(mock_entry.entry_id)\n await hass.async_block_till_done()\n\n power = hass.states.get(\"sensor.mydevicename_power_output\")\n assert power\n assert power.state == \"45.7\"\n\n temperature = hass.states.get(\"sensor.mydevicename_temperature\")\n assert temperature\n assert temperature.state == \"9.9\"\n\n energy = hass.states.get(\"sensor.mydevicename_total_energy\")\n assert energy\n assert energy.state == \"12.35\"", "title": "" }, { "docid": "b8af316378206e2d9e0789a327348b3b", "score": "0.54782593", "text": "def test_node_info_failure(self):\n\n class MockElasticNodes:\n \"\"\"\n Mock of Elasticsearch NodesClient\n \"\"\"\n\n def info(self, node_id=None, flat_settings=None):\n \"\"\"\n Mock of info method\n \"\"\"\n raise TransportError(\"custom error\", 123)\n\n class MockElastic:\n \"\"\"\n Mock of Elasticsearch client\n \"\"\"\n\n nodes = MockElasticNodes()\n\n with patch.object(\n elasticsearch, \"_get_instance\", MagicMock(return_value=MockElastic())\n ):\n self.assertRaises(CommandExecutionError, elasticsearch.node_info)", "title": "" }, { "docid": "c9cffd6a9f197e1d2924a793018767f1", "score": "0.5466874", "text": "def test_hvac_mode2(self):\n assert self.thermostat.hvac_mode == \"heat_cool\"\n self.ecobee[\"settings\"][\"hvacMode\"] = \"heat\"\n assert self.thermostat.hvac_mode == \"heat\"", "title": "" }, { "docid": "f1857d261f8bdd6f01312f5e7a5a8854", "score": "0.5454761", "text": "def test_target_temperature_low(self):\n assert 40 == self.thermostat.target_temperature_low\n self.ecobee[\"runtime\"][\"desiredHeat\"] = 502\n assert 50.2 == self.thermostat.target_temperature_low", "title": "" }, { "docid": "86057d4acc5b300baf94baa31da58693", "score": "0.5442183", "text": "def test_handleEvents_DoorOpening_Event(self):\n\n\n self.myNode = Node('ENTRANCE',self.myHub,'','http://192.168.0.167','ENV')\n\n emailsBefore = numberOfEmailsInbox('[email protected]','miller12')\n\n self.myNode.addSensor(IPDoorSwitch('SW-1',0,self.myNode,'LOW'))\n self.myNode.scanSensors('LOW') # scan once all sensors\n self.myNode.getHub().getEventQ().handleEvents()\n\n self.assertEqual(self.myNode.getHub().getEventQ().getEventQLength(),0)\n\n emailsAfter = numberOfEmailsInbox('[email protected]','miller12')\n\n self.assertGreater(emailsAfter,emailsBefore)", "title": "" }, { "docid": "833814a6655a2b2008ca8408aff4dfe8", "score": "0.5435297", "text": "def test_tos_fix():\n assert Tos is OceanFixGrid", "title": "" }, { "docid": "7b3fd94855d4cd13b4682f06b124baf6", "score": "0.5419877", "text": "async def test_lawn_mower_default(hass: HomeAssistant) -> None:\n lawn_mower = MockLawnMowerEntity()\n lawn_mower.hass = hass\n\n assert lawn_mower.state is None", "title": "" }, { "docid": "e1a721955e8c54bf6e3612fd451ef461", "score": "0.54157954", "text": "def test_monitor_node(self):\n node = Node(name='node')\n node.install = MagicMock()\n node._start_all_containers = MagicMock()\n node._start_all_vms = MagicMock()\n node.state.set('actions', 'install', 'ok')\n node.node_sal.is_running = MagicMock(return_value=True)\n node.node_sal.uptime = MagicMock(return_value=40.0)\n node._monitor()\n\n assert not node._start_all_containers.called\n assert not node._start_all_vms.called\n assert not node.install.called", "title": "" }, { "docid": "324f5dd7dc78a7f79463e1c3e73ef382", "score": "0.5400927", "text": "def test_get_node(self, client):\n # get with node with ID == 1\n node = client.api.get_node(LAB_PATH, '1')\n assert node['type'] is not None", "title": "" }, { "docid": "d695a002164b3b6aac7fe4f36163f20c", "score": "0.53779376", "text": "async def test_with_thermocycler(\n setup_klass: Type[simulator_setup.SimulatorSetup],\n) -> None:\n setup = setup_klass(\n attached_modules={\n \"thermocycler\": [\n simulator_setup.ModuleCall(\n \"set_temperature\",\n kwargs={\n \"temperature\": 3,\n \"hold_time_seconds\": 1,\n \"hold_time_minutes\": 2,\n \"volume\": 5,\n },\n )\n ]\n }\n )\n simulator = await simulator_setup.create_simulator(setup)\n\n assert type(simulator.attached_modules[0]) == Thermocycler\n assert simulator.attached_modules[0].live_data == {\n \"data\": {\n \"currentCycleIndex\": None,\n \"currentStepIndex\": None,\n \"currentTemp\": 3,\n \"holdTime\": 0,\n \"lid\": \"open\",\n \"lidTarget\": None,\n \"lidTemp\": 23,\n \"lidTempStatus\": \"idle\",\n \"rampRate\": None,\n \"targetTemp\": 3,\n \"totalCycleCount\": None,\n \"totalStepCount\": None,\n },\n \"status\": \"holding at target\",\n }", "title": "" }, { "docid": "f125b54f731b417e11ebd7fef6370102", "score": "0.53572214", "text": "def test_Xiaomi_MHO_C303(self):", "title": "" }, { "docid": "8f3493e93572bcd35aaf0e7b0a1b098f", "score": "0.5348399", "text": "def test_set_fan_min_on_time(self):\n self.data.reset_mock()\n self.thermostat.set_fan_min_on_time(15)\n self.data.ecobee.set_fan_min_on_time.assert_has_calls([mock.call(1, 15)])\n self.data.reset_mock()\n self.thermostat.set_fan_min_on_time(20)\n self.data.ecobee.set_fan_min_on_time.assert_has_calls([mock.call(1, 20)])", "title": "" }, { "docid": "df7927f494978d07116eb7daf45ab0a6", "score": "0.532617", "text": "def test_hvac_mode(self):\n assert self.thermostat.hvac_mode == \"heat_cool\"\n self.ecobee[\"settings\"][\"hvacMode\"] = \"heat\"\n assert self.thermostat.hvac_mode == \"heat\"\n self.ecobee[\"settings\"][\"hvacMode\"] = \"cool\"\n assert self.thermostat.hvac_mode == \"cool\"\n self.ecobee[\"settings\"][\"hvacMode\"] = \"auxHeatOnly\"\n assert self.thermostat.hvac_mode == \"heat\"\n self.ecobee[\"settings\"][\"hvacMode\"] = \"off\"\n assert self.thermostat.hvac_mode == \"off\"", "title": "" }, { "docid": "6272dd35c8a6b0c32cd3abd34cca47ee", "score": "0.53018737", "text": "def test_at_anything_but_node(at):\n mg = landlab.RasterModelGrid((5, 3))\n mg.add_empty(\"topographic__elevation\", at=at)\n with pytest.raises(NotImplementedError):\n landlab.plot.imshowhs_grid(mg, \"topographic__elevation\", at=at)", "title": "" }, { "docid": "10519c91fbf7101112ed4dfb10b635ad", "score": "0.52956605", "text": "def test__configure_veth(self):\n # Access protected _configure_veth\n # pylint: disable=W0212\n newnet._configure_veth(\n 'test1234', '192.168.0.100', '192.168.254.254'\n )\n\n treadmill.netdev.link_set_up.assert_has_calls(\n [\n mock.call('lo'),\n mock.call('eth0'),\n ]\n )\n treadmill.netdev.dev_conf_arp_ignore_set.assert_called_with('eth0', 3)\n treadmill.netdev.addr_add.assert_called_with(\n '192.168.0.100/32', 'eth0', addr_scope='link'\n )\n treadmill.netdev.route_add.assert_has_calls(\n [\n mock.call(\n '192.168.254.254',\n devname='eth0',\n route_scope='link'\n ),\n mock.call(\n 'default',\n via='192.168.254.254',\n src='192.168.0.100',\n )\n ]\n )\n self.assertTrue(treadmill.iptables.initialize_container.called)", "title": "" }, { "docid": "295aff52a39fb5d563f23360401996de", "score": "0.5294111", "text": "def test_api_none():\n eth_api = EthereumApi(**ETHEREUM_TESTNET_CONFIG)\n assert eth_api.api is not None, \"The api property is None.\"", "title": "" }, { "docid": "c4f38b07ce10b90acff44259c605e25d", "score": "0.5284805", "text": "def test_target_temperature_high(self):\n assert 20 == self.thermostat.target_temperature_high\n self.ecobee[\"runtime\"][\"desiredCool\"] = 103\n assert 10.3 == self.thermostat.target_temperature_high", "title": "" }, { "docid": "303c8aee09e692abaade5d9ddf6b0d21", "score": "0.5263437", "text": "def __test_laser(self):", "title": "" }, { "docid": "ce70380e91a9bdc9fa841a16245418e7", "score": "0.5233513", "text": "def test_thermocycler_wait_for_block_temperature(\n decoy: Decoy,\n transport: ChildThreadTransport,\n subject: SyncClient,\n) -> None:\n request = commands.thermocycler.WaitForBlockTemperatureCreate(\n params=commands.thermocycler.WaitForBlockTemperatureParams(moduleId=\"module-id\")\n )\n response = commands.thermocycler.WaitForBlockTemperatureResult()\n decoy.when(transport.execute_command(request=request)).then_return(response)\n result = subject.thermocycler_wait_for_block_temperature(module_id=\"module-id\")\n\n assert result == response", "title": "" }, { "docid": "158f795e40816bb1a0a14b347c583b06", "score": "0.5226427", "text": "async def test_anna_as_smt_climate_sensor_entities(\n hass: HomeAssistant, mock_smile_anna: MagicMock, init_integration: MockConfigEntry\n) -> None:\n state = hass.states.get(\"sensor.opentherm_outdoor_air_temperature\")\n assert state\n assert float(state.state) == 3.0\n\n state = hass.states.get(\"sensor.opentherm_water_temperature\")\n assert state\n assert float(state.state) == 29.1\n\n state = hass.states.get(\"sensor.opentherm_dhw_temperature\")\n assert state\n assert float(state.state) == 46.3\n\n state = hass.states.get(\"sensor.anna_illuminance\")\n assert state\n assert float(state.state) == 86.0", "title": "" }, { "docid": "15056a22247fea76a46dab2578b77e44", "score": "0.5219661", "text": "def aeotec_nano_shutter_cover_fixture(client, aeotec_nano_shutter_state):\n node = Node(client, copy.deepcopy(aeotec_nano_shutter_state))\n client.driver.controller.nodes[node.node_id] = node\n return node", "title": "" }, { "docid": "a885ece70b2e09b18c269cf157d97a81", "score": "0.52195865", "text": "def test_observatory():\n assert xrt.observatory == \"Hinode\"", "title": "" }, { "docid": "19af0fbc59af82fd2112f870849e62c8", "score": "0.5212078", "text": "def test_02_OneDevice0(self):\n l_xml = XmlConfigTools.find_xml_section(self.m_pyhouse_obj, 'HouseDivision/EntertainmentSection/OnkyoSection')\n l_xml = l_xml.findall('Device')[0]\n l_device = EntertainmentDeviceInformation()\n # print(PrettyFormatAny.form(l_xml, 'C1-02-A - Onkyo XML'))\n l_ret = entertainmentXML().read_entertainment_device(l_xml, l_device)\n # print(PrettyFormatAny.form(l_ret, 'C1-02-B - Onkyo Device'))\n self.assertEqual(l_ret.Name, TESTING_ONKYO_DEVICE_NAME_0)\n self.assertEqual(str(l_ret.Active), TESTING_ONKYO_DEVICE_ACTIVE_0)\n self.assertEqual(str(l_ret.Key), TESTING_ONKYO_DEVICE_KEY_0)\n self.assertEqual(str(l_ret.UUID), TESTING_ONKYO_DEVICE_UUID_0)\n self.assertEqual(l_ret.Comment, TESTING_ONKYO_DEVICE_COMMENT_0)\n self.assertEqual(l_ret.CommandSet, TESTING_ONKYO_DEVICE_COMMAND_SET_0)\n self.assertEqual(l_ret.Host, TESTING_ONKYO_DEVICE_HOST_0)\n self.assertEqual(convert.long_to_str(l_ret.IPv4), TESTING_ONKYO_DEVICE_IPV4_0)\n self.assertEqual(convert.long_to_str(l_ret.IPv6).lower(), TESTING_ONKYO_DEVICE_IPV6_0.lower())\n self.assertEqual(l_ret.Model, TESTING_ONKYO_DEVICE_MODEL_0)\n self.assertEqual(str(l_ret.Port), TESTING_ONKYO_DEVICE_PORT_0)\n self.assertEqual(l_ret.RoomName, TESTING_ONKYO_DEVICE_ROOM_NAME_0)\n self.assertEqual(l_ret.RoomUUID, TESTING_ONKYO_DEVICE_ROOM_UUID_0)\n self.assertEqual(l_ret.Type, TESTING_ONKYO_DEVICE_TYPE_0)\n self.assertEqual(str(l_ret.Volume), TESTING_ONKYO_DEVICE_VOLUME_0)", "title": "" }, { "docid": "b736b81009f1ad4bc79de443d7be861a", "score": "0.5209216", "text": "async def test_adam_climate_sensor_entity_2(\n hass: HomeAssistant, mock_smile_adam_4: MagicMock, init_integration: MockConfigEntry\n) -> None:\n state = hass.states.get(\"sensor.woonkamer_humidity\")\n assert state\n assert float(state.state) == 56.2", "title": "" }, { "docid": "4bf02c3f3bae6dfb42a74caf8f0db84a", "score": "0.5186921", "text": "def test_alive_trees(self):\n apple_orchard = AppleOrchard()\n self.assertEqual(apple_orchard.alive_trees(), 0)\n apple_orchard.plant_tree()\n self.assertEqual(apple_orchard.alive_trees(), 1)", "title": "" }, { "docid": "c533dbd59d24310abddf691b2afae226", "score": "0.5184059", "text": "def test_plant_tree(self):\n apple_orchard = AppleOrchard()\n self.assertEqual(apple_orchard.trees, [])\n apple_orchard.plant_tree()\n self.assertEqual(type(apple_orchard.trees[0]), AppleTree)", "title": "" }, { "docid": "758d6794b226b37657c088e805c296c4", "score": "0.5180979", "text": "def test_Xiaomi_MHO_C401(self):", "title": "" }, { "docid": "00cd4cb9dfed13b0a1fda27413025482", "score": "0.5176161", "text": "def switch_zooz_zen72_fixture(client, switch_zooz_zen72_state):\n node = Node(client, copy.deepcopy(switch_zooz_zen72_state))\n client.driver.controller.nodes[node.node_id] = node\n return node", "title": "" }, { "docid": "c6d7ad9d9d787c050bccd9127b0a4f6e", "score": "0.5169507", "text": "def test_node_is_root():\n node = graph.TreeNode()\n node.generation = 1\n assert node.is_root()", "title": "" }, { "docid": "4a251eb56b4cc4629c94b59494ab3121", "score": "0.51655644", "text": "def test_send_to_random_node():\n B.test_random_node = send_to_random_node(A.foo)\n\n b = B()\n a = b.test_random_node()\n\n assert b.b_called\n assert a.a_called", "title": "" }, { "docid": "e27110738ef6dbb4552bda9748dc1a0d", "score": "0.5162147", "text": "def test_get_device_mapping_mtii():\n node = MockNode(manufacturer_id=\"013c\", product_type=\"0002\", product_id=\"0002\")\n value = MockValue(data=0, node=node, index=0)\n assert workaround.get_device_mapping(value) == \"trigger_no_off_event\"", "title": "" }, { "docid": "68c94e4f9031cdc25888cc2a9f1afe28", "score": "0.51447654", "text": "def test_node_instantiation(bst_node):\n assert bst_node.val == 1\n assert bst_node.left is None\n assert bst_node.right is None", "title": "" }, { "docid": "4962b6958f578c822285bcc13439e938", "score": "0.5138534", "text": "def fan_honeywell_39358_fixture(client, fan_honeywell_39358_state):\n node = Node(client, copy.deepcopy(fan_honeywell_39358_state))\n client.driver.controller.nodes[node.node_id] = node\n return node", "title": "" }, { "docid": "56c1446aaf60ebb2bcdbad6ff21048b4", "score": "0.5137574", "text": "def test_set_fan_mode_on(self):\n self.data.reset_mock()\n self.thermostat.set_fan_mode(\"on\")\n self.data.ecobee.set_fan_mode.assert_has_calls(\n [mock.call(1, \"on\", 20, 40, \"nextTransition\")]\n )", "title": "" }, { "docid": "cc348ed8e816cc063b3364b1212ae2b8", "score": "0.51237094", "text": "def get_zone() -> MagicMock:\n zone = MagicMock()\n\n zone.power = False\n return zone", "title": "" }, { "docid": "63e6547876fe3bcdb9a3bf2c49c95d56", "score": "0.5117479", "text": "def app(mocker):\n\n class MockTelnetModule(Module):\n @provider\n @singleton\n def provide_telnet(self) -> telnetlib.Telnet:\n telnet = telnetlib.Telnet()\n mocker.patch.multiple(telnet, write=mocker.DEFAULT, read_until=mocker.DEFAULT)\n telnet.write.return_value = None\n mocker.patch.object(telnet, 'read_until')\n telnet.read_until.return_value = '\\r\\nrate_finder 1, 139\\r\\nend#'\n\n return telnet\n\n _app = create_app(TestConfig, binds=[MockTelnetModule])\n ctx = _app.test_request_context()\n ctx.push()\n\n yield _app\n\n ctx.pop()", "title": "" }, { "docid": "d094eed1f93fc3d0081ba6956380582d", "score": "0.5116257", "text": "async def test_tai8570_sensors(\n hass: HomeAssistant,\n config_entry: ConfigEntry,\n owproxy: MagicMock,\n device_id: str,\n entity_registry: er.EntityRegistry,\n caplog: pytest.LogCaptureFixture,\n) -> None:\n mock_devices = deepcopy(MOCK_OWPROXY_DEVICES)\n mock_device = mock_devices[device_id]\n mock_device[ATTR_INJECT_READS].append(OwnetError)\n mock_device[ATTR_INJECT_READS].append(OwnetError)\n\n with _patch_dict(MOCK_OWPROXY_DEVICES, mock_devices):\n setup_owproxy_mock_devices(owproxy, Platform.SENSOR, [device_id])\n\n with caplog.at_level(logging.DEBUG):\n await hass.config_entries.async_setup(config_entry.entry_id)\n await hass.async_block_till_done()\n\n assert entity_registry.entities.get(\"sensor.12_111111111111_temperature\") is None\n assert \"unreachable sensor /12.111111111111/TAI8570/temperature\" in caplog.text\n\n assert entity_registry.entities.get(\"sensor.12_111111111111_pressure\") is None\n assert \"unreachable sensor /12.111111111111/TAI8570/pressure\" in caplog.text", "title": "" }, { "docid": "0488f1d929fc99fda4126331f9fe9112", "score": "0.51017624", "text": "def _testing_tree():\n branch1 = TreeBranch(1, 0.5, TreeLeaf([1, 2.5, 3]), TreeLeaf([2, 3, 1]))\n return TreeBranch(0, 3, TreeLeaf([-3, -5, 3]), branch1)", "title": "" }, { "docid": "6f5397e4c4cdf715a54213a9a9828659", "score": "0.5099457", "text": "async def test_electrical_measurement_init(\n hass: HomeAssistant,\n zigpy_device_mock,\n zha_device_joined,\n) -> None:\n\n cluster_id = homeautomation.ElectricalMeasurement.cluster_id\n zigpy_device = zigpy_device_mock(\n {\n 1: {\n SIG_EP_INPUT: [cluster_id, general.Basic.cluster_id],\n SIG_EP_OUTPUT: [],\n SIG_EP_TYPE: zigpy.profiles.zha.DeviceType.ON_OFF_SWITCH,\n }\n }\n )\n cluster = zigpy_device.endpoints[1].in_clusters[cluster_id]\n zha_device = await zha_device_joined(zigpy_device)\n entity_id = find_entity_id(\n Platform.SENSOR, zha_device, hass, qualifier=\"active_power\"\n )\n\n # allow traffic to flow through the gateway and devices\n await async_enable_traffic(hass, [zha_device])\n\n # test that the sensor now have a state of unknown\n assert hass.states.get(entity_id).state == STATE_UNKNOWN\n\n await send_attributes_report(hass, cluster, {0: 1, 1291: 100, 10: 1000})\n assert int(hass.states.get(entity_id).state) == 100\n\n cluster_handler = zha_device._endpoints[1].all_cluster_handlers[\"1:0x0b04\"]\n assert cluster_handler.ac_power_divisor == 1\n assert cluster_handler.ac_power_multiplier == 1\n\n # update power divisor\n await send_attributes_report(hass, cluster, {0: 1, 1291: 20, 0x0403: 5, 10: 1000})\n assert cluster_handler.ac_power_divisor == 5\n assert cluster_handler.ac_power_multiplier == 1\n assert hass.states.get(entity_id).state == \"4.0\"\n\n await send_attributes_report(hass, cluster, {0: 1, 1291: 30, 0x0605: 10, 10: 1000})\n assert cluster_handler.ac_power_divisor == 10\n assert cluster_handler.ac_power_multiplier == 1\n assert hass.states.get(entity_id).state == \"3.0\"\n\n # update power multiplier\n await send_attributes_report(hass, cluster, {0: 1, 1291: 20, 0x0402: 6, 10: 1000})\n assert cluster_handler.ac_power_divisor == 10\n assert cluster_handler.ac_power_multiplier == 6\n assert hass.states.get(entity_id).state == \"12.0\"\n\n await send_attributes_report(hass, cluster, {0: 1, 1291: 30, 0x0604: 20, 10: 1000})\n assert cluster_handler.ac_power_divisor == 10\n assert cluster_handler.ac_power_multiplier == 20\n assert hass.states.get(entity_id).state == \"60.0\"", "title": "" }, { "docid": "27c77f1a40c04fd238de9f7764f2fb12", "score": "0.50930524", "text": "def test_get_root(self):\n pass", "title": "" }, { "docid": "ed2231612723cd44ece48bf2a3f14c67", "score": "0.50882787", "text": "def test_tree_manager_config():\n TreeManagerConfiguration()", "title": "" }, { "docid": "9e08ebe9b9117558edaa412524de62b3", "score": "0.5079762", "text": "def test_handleEvents_TempEnvThreshold_Event(self):\n\n\n self.myNode = Node('KITCHEN',self.myHub,'','http://192.168.0.119','ENV')\n\n\n emailsBefore = numberOfEmailsInbox('[email protected]','miller12')\n\n self.myNode.addSensor(IPTempSensor('TMP-1',[0,1],self.myNode,'LOW'))\n self.myNode.scanSensors('LOW') # scan once all sensors\n self.myNode.getHub().getEventQ().handleEvents()\n\n self.assertEqual(self.myNode.getHub().getEventQ().getEventQLength(),0)\n\n emailsAfter = numberOfEmailsInbox('[email protected]','miller12')\n\n self.assertGreater(emailsAfter,emailsBefore)\n\n time.sleep(4)", "title": "" }, { "docid": "558e99786efd1de6db5b9e14ebceae68", "score": "0.5075707", "text": "def test_Xiaomi_HHCCPOT002(self):", "title": "" }, { "docid": "4ff76fb16cd91e4acace687dd5ace8a9", "score": "0.5070587", "text": "def test_call_phylogenetic(self):\n c = AlphaDiversityCalc(metric=PD_whole_tree,\n is_phylogenetic=True)\n self.assertEqual(c(data_path=self.otu_table1_fp, tree_path=self.tree1, \\\n taxon_names=self.otu_table1.ObservationIds, \n sample_names=self.otu_table1.SampleIds), \n [13, 17, 0])", "title": "" }, { "docid": "425f1f591d964ea2f8032996bbe07b36", "score": "0.5068987", "text": "async def test_onboarding_core_no_rpi_power(\n hass: HomeAssistant,\n hass_storage: dict[str, Any],\n hass_client: ClientSessionGenerator,\n aioclient_mock: AiohttpClientMocker,\n no_rpi,\n mock_default_integrations,\n) -> None:\n mock_storage(hass_storage, {\"done\": [const.STEP_USER]})\n\n assert await async_setup_component(hass, \"onboarding\", {})\n await hass.async_block_till_done()\n\n client = await hass_client()\n\n resp = await client.post(\"/api/onboarding/core_config\")\n\n assert resp.status == 200\n\n await hass.async_block_till_done()\n\n rpi_power_state = hass.states.get(\"binary_sensor.rpi_power_status\")\n assert not rpi_power_state", "title": "" }, { "docid": "83f2cf6b527cfabe9342577dcde5c863", "score": "0.5066437", "text": "def test_kubelet_node(self):\n self._test_metricset('node', 1, self.get_kubelet_hosts())", "title": "" }, { "docid": "f908fef6efa2162c5e5b2ef1f7d1ebd8", "score": "0.5065536", "text": "async def test_sensor(hass: HomeAssistant) -> None:\n\n await setup_mock_myenergi_config_entry(hass)\n\n entity_state = hass.states.get(TEST_HUB_SENSOR_POWER_GRID_ENTITY_ID)\n assert entity_state\n assert entity_state.state == \"4429\"", "title": "" }, { "docid": "01cfaafab5f7756bf681b1624e57f3a7", "score": "0.5063894", "text": "def test_mc():\n t = TrendService()\n assert t is not None", "title": "" }, { "docid": "65ad9c2196b816a011f698486888fb01", "score": "0.50622076", "text": "def test_single_node_tree():\n binary_tree = BinaryTree(Node_BT('True'))\n actual = binary_tree.root.value\n expected = 'True'\n assert actual == expected", "title": "" }, { "docid": "0a240c5b5ecadcfa800e78a894432877", "score": "0.5058398", "text": "def test_enable_disable_obm(obmd_cfg):\n obmd_uri = 'http://localhost' + obmd_cfg['ListenAddr'] + '/node/node-99'\n\n # register a node with obmd:\n requests.put(\n obmd_uri,\n auth=('admin', obmd_cfg['AdminToken']),\n data=json.dumps({\n \"type\": \"ipmi\",\n \"info\": {\n \"addr\": \"10.0.0.4\",\n \"user\": \"ipmuser\",\n \"pass\": \"ipmipass\",\n },\n }))\n\n # and then with hil:\n api.node_register(\n node='node-99',\n obm={\n \"type\": 'http://schema.massopencloud.org/haas/v0/obm/mock',\n \"host\": \"ipmihost\",\n \"user\": \"root\",\n \"password\": \"tapeworm\",\n },\n obmd={\n 'uri': obmd_uri,\n 'admin_token': obmd_cfg['AdminToken'],\n },\n )\n\n # Then create a project, and attach the node.\n api.project_create('anvil-nextgen')\n api.project_connect_node('anvil-nextgen', 'node-99')\n\n # now the test proper:\n\n # First, enable the obm\n api.node_enable_disable_obm('node-99', enabled=True)\n\n # Obm is enabled; we shouldn't be able to detach the node:\n with pytest.raises(errors.BlockedError):\n api.project_detach_node('anvil-nextgen', 'node-99')\n\n # ...so disable it first:\n api.node_enable_disable_obm('node-99', enabled=False)\n\n # ...and then it should work:\n api.project_detach_node('anvil-nextgen', 'node-99')", "title": "" }, { "docid": "377ad75d586b31a82d3df83190a7cffe", "score": "0.50547487", "text": "def test_get_node_by_name(self, client):\n # get with node with name == TEST_NODE\n node = client.api.get_node_by_name(LAB_PATH, TEST_NODE)\n assert node['name'] == TEST_NODE", "title": "" }, { "docid": "e6ba77d22d392409e6f12a6e774fd507", "score": "0.50451577", "text": "def test_is_aux_heat_on(self):\n assert not self.thermostat.is_aux_heat\n self.ecobee[\"equipmentStatus\"] = \"fan, auxHeat\"\n assert self.thermostat.is_aux_heat", "title": "" }, { "docid": "2f2e07717808a73622d6d2e4683c5175", "score": "0.50426793", "text": "def mock(ctx: Any) -> None:\n # Create a mock device\n from edge.mocks import ( # noqa: E501, pylint: disable=import-outside-toplevel\n MockDigitalInputDevice,\n )\n\n power_status_device = MockDigitalInputDevice(_POWER_PIN)\n # Run the 'real' software\n with EdgeApp(power_status_device, ctx.obj[\"config\"]):\n # Allow the user to toggle the power status\n while True:\n logging.debug(\"listening for input\")\n char = click.getchar()\n if char == \"t\":\n # Toggle power status\n power_status_device.toggle()\n elif char == \"h\":\n # Set power status high\n power_status_device.high()\n elif char == \"l\":\n # Set power status low\n power_status_device.low()", "title": "" }, { "docid": "d84649e055573d194bfa7d7c3fb06668", "score": "0.5031938", "text": "def test_send_to_all_nodes():\n pass", "title": "" }, { "docid": "6c6bd850c14bce5f45caf1e1d250bdcd", "score": "0.5031499", "text": "def test_01_BaseLight(self):", "title": "" }, { "docid": "05dcbe8124b2ae77a1b83306f733a318", "score": "0.50308794", "text": "def test_mock_depth(self):\n instance = testdata.mock(foo=1)\n self.assertEqual(1, instance.bar.che.foo)", "title": "" }, { "docid": "679de95803d01006fcd7201778052681", "score": "0.5021269", "text": "def test_me(self):\n pass", "title": "" }, { "docid": "4c8c3f22c0d6001c8920a4dfbc83b8ff", "score": "0.5017815", "text": "def test_node(self):\n self.plan()\n pycram.orm.base.MetaData().description = \"Unittest\"\n pycram.task.task_tree.root.insert(self.session, )\n\n node_results = self.session.query(pycram.orm.task.TaskTreeNode).all()\n self.assertEqual(len(node_results), len(pycram.task.task_tree.root))\n\n code_results = self.session.query(pycram.orm.task.Code).all()\n self.assertEqual(len(code_results), len(pycram.task.task_tree.root))\n\n position_results = self.session.query(pycram.orm.base.Position).all()\n self.assertEqual(8, len(position_results))\n\n quaternion_results = self.session.query(pycram.orm.base.Quaternion).all()\n self.assertEqual(8, len(quaternion_results))\n\n park_arms_results = self.session.query(pycram.orm.action_designator.ParkArmsAction).all()\n self.assertEqual(0, len(park_arms_results))\n\n navigate_results = self.session.query(pycram.orm.action_designator.NavigateAction).all()\n self.assertEqual(1, len(navigate_results))\n\n action_results = self.session.query(pycram.orm.action_designator.Action).all()\n self.assertEqual(4, len(action_results))", "title": "" }, { "docid": "c223be2903aa11cabf1c4af4f833ca73", "score": "0.50172895", "text": "def test_switch_register(self):\n switchinfo = {\n \"type\": \"http://schema.massopencloud.org/haas/v0/switches/mock\",\n \"username\": \"name\",\n \"password\": \"asdasd\",\n \"hostname\": \"example.com\"}\n subtype = \"http://schema.massopencloud.org/haas/v0/switches/mock\"\n assert C.switch.register('mytestswitch', subtype, switchinfo) is None", "title": "" }, { "docid": "0816e2daf7c4e08cab85cd68cff705e0", "score": "0.50168735", "text": "def nortek_thermostat_added_event_fixture(client):\n event_data = json.loads(load_fixture(\"zwave_js/nortek_thermostat_added_event.json\"))\n event = Event(\"node added\", event_data)\n return event", "title": "" }, { "docid": "af9ae37e9e920fe40ae95cfb47fbdeac", "score": "0.5014725", "text": "def test_simple_robot(self):\n robot = ytr(test_simple_robot)\n\n # Make sure all required fields are set\n self.assertTrue(robot.IsInitialized())\n\n self.assertEquals(0, robot.id, \"Robot ID not correctly set.\")\n\n root = robot.body.root\n self.assertEquals(\"Core\", root.id, \"Root ID not correctly set. (%s)\" % root.id)\n\n self.assertEquals(2, len(root.child), \"Root should have two children.\")\n\n sub1_conn = root.child[0]\n sub2_conn = root.child[1]\n\n # Check connection sources / destinations\n self.assertEquals(0, sub1_conn.src)\n self.assertEquals(1, sub2_conn.src)\n self.assertEquals(1, sub1_conn.dst)\n self.assertEquals(0, sub2_conn.dst)\n\n sub1 = sub1_conn.part\n sub2 = sub2_conn.part\n\n # Check types\n self.assertEquals(\"2Params\", sub1.type)\n self.assertEquals(\"2Params\", sub2.type)\n\n # Check parameter lists\n sub1params = [p.value for p in sub1.param]\n sub2params = [p.value for p in sub2.param]\n\n self.assertEquals([-1, 15], sub1params)\n self.assertEquals([10, 20], sub2params)\n\n # Check the brain\n brain = robot.brain\n\n # 1 + 2 + 2 output, 2 + 2 + 2 input, 3 hidden\n self.assertEquals(14, len(brain.neuron))\n self.assertEquals(len(brain.connection), 2)\n\n conn0 = brain.connection[0]\n self.assertEquals(\"Sub1-out-1\", conn0.src)\n self.assertEquals(\"Sub1-out-1\", conn0.dst)\n self.assertEquals(2, conn0.weight)\n\n conn1 = brain.connection[1]\n self.assertEquals(\"Sub2-in-1\", conn1.src)\n self.assertEquals(\"Sub1-out-1\", conn1.dst)\n self.assertEquals(0, conn1.weight)\n\n hidden1 = [a for a in brain.neuron if a.id == \"Hidden1\"][0]\n hidden1params = [p.value for p in hidden1.param]\n self.assertEquals([0.1, 0.2, 0.3], hidden1params)\n self.assertEquals(\"Oscillator\", hidden1.type)\n self.assertEquals(\"Sub1\", hidden1.partId)\n\n hidden2 = [a for a in brain.neuron if a.id == \"Hidden2\"][0]\n hidden2params = [p.value for p in hidden2.param]\n self.assertEquals([0, 0, 0], hidden2params)\n self.assertEquals(\"Oscillator\", hidden2.type)\n\n hidden3 = [a for a in brain.neuron if a.id == \"Hidden3\"][0]\n self.assertEquals(\"Simple\", hidden3.type)\n\n sub1 = [a for a in brain.neuron if a.id == \"Sub1-out-1\"][0]\n self.assertTrue(sub1.HasField(\"partId\"), \"Sub1 output neuron should have part ID.\")\n self.assertEquals(\"Sub1\", sub1.partId, \"Sub1 output neuron should have `Sub1` part ID.\")\n sub1params = [p.value for p in sub1.param]\n self.assertEquals([0, 10, 0], sub1params)", "title": "" }, { "docid": "043faf4e8478b86a578509d59b27961e", "score": "0.50128365", "text": "async def test_temp_uom(\n hass: HomeAssistant,\n uom,\n raw_temp,\n expected,\n restore,\n hass_ms,\n core_rs,\n zigpy_device_mock,\n zha_device_restored,\n) -> None:\n\n entity_id = \"sensor.fake1026_fakemodel1026_004f3202_temperature\"\n if restore:\n core_rs(entity_id, uom, state=(expected - 2))\n await async_mock_load_restore_state_from_storage(hass)\n\n hass = await hass_ms(\n CONF_UNIT_SYSTEM_METRIC\n if uom == UnitOfTemperature.CELSIUS\n else CONF_UNIT_SYSTEM_IMPERIAL\n )\n\n zigpy_device = zigpy_device_mock(\n {\n 1: {\n SIG_EP_INPUT: [\n measurement.TemperatureMeasurement.cluster_id,\n general.Basic.cluster_id,\n ],\n SIG_EP_OUTPUT: [],\n SIG_EP_TYPE: zigpy.profiles.zha.DeviceType.ON_OFF_SWITCH,\n }\n }\n )\n cluster = zigpy_device.endpoints[1].temperature\n zha_device = await zha_device_restored(zigpy_device)\n entity_id = find_entity_id(Platform.SENSOR, zha_device, hass)\n\n if not restore:\n await async_enable_traffic(hass, [zha_device], enabled=False)\n assert hass.states.get(entity_id).state == STATE_UNAVAILABLE\n\n # allow traffic to flow through the gateway and devices\n await async_enable_traffic(hass, [zha_device])\n\n # test that the sensors now have a state of unknown\n if not restore:\n assert hass.states.get(entity_id).state == STATE_UNKNOWN\n\n await send_attribute_report(hass, cluster, 0, raw_temp)\n await hass.async_block_till_done()\n state = hass.states.get(entity_id)\n assert state is not None\n assert round(float(state.state)) == expected\n assert state.attributes[ATTR_UNIT_OF_MEASUREMENT] == uom", "title": "" }, { "docid": "cfd79d00c7ce745814e78f345f4933d8", "score": "0.5012798", "text": "def mock_awair_device(\n client: AwairClient, device: Optional[Dict[str, Any]] = None,\n) -> AwairDevice:\n if not device:\n device = MOCK_GEN1_DEVICE_ATTRS\n\n return AwairDevice(client=client, attributes=device)", "title": "" }, { "docid": "7f75b42a8fdb782893ee40676f505463", "score": "0.5012328", "text": "def test_1(self):\n n1 = Node(5)\n self.assertEqual(n1.value(), 5)", "title": "" }, { "docid": "cf1bfc6cdf4c978f47e1a855f6dbafcf", "score": "0.50108856", "text": "async def test_sensors(hass: HomeAssistant) -> None:\n\n with patch(\n \"pynina.baseApi.BaseAPI._makeRequest\",\n wraps=mocked_request_function,\n ):\n conf_entry: MockConfigEntry = MockConfigEntry(\n domain=DOMAIN, title=\"NINA\", data=ENTRY_DATA\n )\n\n entity_registry: er = er.async_get(hass)\n conf_entry.add_to_hass(hass)\n\n await hass.config_entries.async_setup(conf_entry.entry_id)\n await hass.async_block_till_done()\n\n assert conf_entry.state == ConfigEntryState.LOADED\n\n state_w1 = hass.states.get(\"binary_sensor.warning_aach_stadt_1\")\n entry_w1 = entity_registry.async_get(\"binary_sensor.warning_aach_stadt_1\")\n\n assert state_w1.state == STATE_ON\n assert state_w1.attributes.get(ATTR_HEADLINE) == \"Ausfall Notruf 112\"\n assert (\n state_w1.attributes.get(ATTR_DESCRIPTION)\n == \"Es treten Sturmböen mit Geschwindigkeiten zwischen 70 km/h (20m/s, 38kn, Bft 8) und 85 km/h (24m/s, 47kn, Bft 9) aus westlicher Richtung auf. In Schauernähe sowie in exponierten Lagen muss mit schweren Sturmböen bis 90 km/h (25m/s, 48kn, Bft 10) gerechnet werden.\"\n )\n assert state_w1.attributes.get(ATTR_SENDER) == \"Deutscher Wetterdienst\"\n assert state_w1.attributes.get(ATTR_SEVERITY) == \"Minor\"\n assert state_w1.attributes.get(ATTR_RECOMMENDED_ACTIONS) == \"\"\n assert (\n state_w1.attributes.get(ATTR_AFFECTED_AREAS)\n == \"Gemeinde Oberreichenbach, Gemeinde Neuweiler, Stadt Nagold, Stadt Neubulach, Gemeinde Schömberg, Gemeinde Simmersfeld, Gemeinde Simmozheim, Gemeinde Rohrdorf, Gemeinde Ostelsheim, Gemeinde Ebhausen, Gemeinde Egenhausen, Gemeinde Dobel, Stadt Bad Liebenzell, Stadt Solingen, Stadt Haiterbach, Stadt Bad Herrenalb, Gemeinde Höfen an der Enz, Gemeinde Gechingen, Gemeinde Enzklösterle, Gemeinde Gutach (Schwarzwaldbahn) und 3392 weitere.\"\n )\n assert state_w1.attributes.get(ATTR_ID) == \"mow.DE-NW-BN-SE030-20201014-30-000\"\n assert state_w1.attributes.get(ATTR_SENT) == \"2021-10-11T05:20:00+01:00\"\n assert state_w1.attributes.get(ATTR_START) == \"2021-11-01T05:20:00+01:00\"\n assert state_w1.attributes.get(ATTR_EXPIRES) == \"3021-11-22T05:19:00+01:00\"\n\n assert entry_w1.unique_id == \"083350000000-1\"\n assert state_w1.attributes.get(\"device_class\") == BinarySensorDeviceClass.SAFETY\n\n state_w2 = hass.states.get(\"binary_sensor.warning_aach_stadt_2\")\n entry_w2 = entity_registry.async_get(\"binary_sensor.warning_aach_stadt_2\")\n\n assert state_w2.state == STATE_OFF\n assert state_w2.attributes.get(ATTR_HEADLINE) is None\n assert state_w2.attributes.get(ATTR_DESCRIPTION) is None\n assert state_w2.attributes.get(ATTR_SENDER) is None\n assert state_w2.attributes.get(ATTR_SEVERITY) is None\n assert state_w2.attributes.get(ATTR_RECOMMENDED_ACTIONS) is None\n assert state_w2.attributes.get(ATTR_AFFECTED_AREAS) is None\n assert state_w2.attributes.get(ATTR_ID) is None\n assert state_w2.attributes.get(ATTR_SENT) is None\n assert state_w2.attributes.get(ATTR_START) is None\n assert state_w2.attributes.get(ATTR_EXPIRES) is None\n\n assert entry_w2.unique_id == \"083350000000-2\"\n assert state_w2.attributes.get(\"device_class\") == BinarySensorDeviceClass.SAFETY\n\n state_w3 = hass.states.get(\"binary_sensor.warning_aach_stadt_3\")\n entry_w3 = entity_registry.async_get(\"binary_sensor.warning_aach_stadt_3\")\n\n assert state_w3.state == STATE_OFF\n assert state_w3.attributes.get(ATTR_HEADLINE) is None\n assert state_w3.attributes.get(ATTR_DESCRIPTION) is None\n assert state_w3.attributes.get(ATTR_SENDER) is None\n assert state_w3.attributes.get(ATTR_SEVERITY) is None\n assert state_w3.attributes.get(ATTR_RECOMMENDED_ACTIONS) is None\n assert state_w3.attributes.get(ATTR_AFFECTED_AREAS) is None\n assert state_w3.attributes.get(ATTR_ID) is None\n assert state_w3.attributes.get(ATTR_SENT) is None\n assert state_w3.attributes.get(ATTR_START) is None\n assert state_w3.attributes.get(ATTR_EXPIRES) is None\n\n assert entry_w3.unique_id == \"083350000000-3\"\n assert state_w3.attributes.get(\"device_class\") == BinarySensorDeviceClass.SAFETY\n\n state_w4 = hass.states.get(\"binary_sensor.warning_aach_stadt_4\")\n entry_w4 = entity_registry.async_get(\"binary_sensor.warning_aach_stadt_4\")\n\n assert state_w4.state == STATE_OFF\n assert state_w4.attributes.get(ATTR_HEADLINE) is None\n assert state_w4.attributes.get(ATTR_DESCRIPTION) is None\n assert state_w4.attributes.get(ATTR_SENDER) is None\n assert state_w4.attributes.get(ATTR_SEVERITY) is None\n assert state_w4.attributes.get(ATTR_RECOMMENDED_ACTIONS) is None\n assert state_w4.attributes.get(ATTR_AFFECTED_AREAS) is None\n assert state_w4.attributes.get(ATTR_ID) is None\n assert state_w4.attributes.get(ATTR_SENT) is None\n assert state_w4.attributes.get(ATTR_START) is None\n assert state_w4.attributes.get(ATTR_EXPIRES) is None\n\n assert entry_w4.unique_id == \"083350000000-4\"\n assert state_w4.attributes.get(\"device_class\") == BinarySensorDeviceClass.SAFETY\n\n state_w5 = hass.states.get(\"binary_sensor.warning_aach_stadt_5\")\n entry_w5 = entity_registry.async_get(\"binary_sensor.warning_aach_stadt_5\")\n\n assert state_w5.state == STATE_OFF\n assert state_w5.attributes.get(ATTR_HEADLINE) is None\n assert state_w5.attributes.get(ATTR_DESCRIPTION) is None\n assert state_w5.attributes.get(ATTR_SENDER) is None\n assert state_w5.attributes.get(ATTR_SEVERITY) is None\n assert state_w5.attributes.get(ATTR_RECOMMENDED_ACTIONS) is None\n assert state_w5.attributes.get(ATTR_AFFECTED_AREAS) is None\n assert state_w5.attributes.get(ATTR_ID) is None\n assert state_w5.attributes.get(ATTR_SENT) is None\n assert state_w5.attributes.get(ATTR_START) is None\n assert state_w5.attributes.get(ATTR_EXPIRES) is None\n\n assert entry_w5.unique_id == \"083350000000-5\"\n assert state_w5.attributes.get(\"device_class\") == BinarySensorDeviceClass.SAFETY", "title": "" }, { "docid": "4058e0d63af8a1deb3d7999656daeaa0", "score": "0.50072974", "text": "def test_root2(self):\n print \"test root2\"\n jobs = self.myTests.createRootHaddTest()\n assertDiracSucceeds( jobs, self )\n thisJob = jobs['Value']\n res = self.myTests.runJobLocally(thisJob, \"Root\")\n assertDiracSucceeds( res, self )", "title": "" }, { "docid": "6b790f9ad8d2d73a0d73c55237d53876", "score": "0.5002783", "text": "def test_servo_interface_implementation():\n MockServoDriver()", "title": "" }, { "docid": "867d0e8fa8e334946d4ae36a762b1e5a", "score": "0.49977773", "text": "async def test_websocket_api(hass, generic_data, hass_ws_client):\n await setup_ozw(hass, fixture=generic_data)\n client = await hass_ws_client(hass)\n\n # Test network status\n await client.send_json({ID: 5, TYPE: \"ozw/network_status\"})\n msg = await client.receive_json()\n result = msg[\"result\"]\n\n assert result[\"state\"] == \"driverAllNodesQueried\"\n assert result[OZW_INSTANCE] == 1\n\n # Test node status\n await client.send_json({ID: 6, TYPE: \"ozw/node_status\", NODE_ID: 32})\n msg = await client.receive_json()\n result = msg[\"result\"]\n\n assert result[OZW_INSTANCE] == 1\n assert result[NODE_ID] == 32\n assert result[\"node_query_stage\"] == \"Complete\"\n assert result[\"is_zwave_plus\"]\n assert result[\"is_awake\"]\n assert not result[\"is_failed\"]\n assert result[\"node_baud_rate\"] == 100000\n assert result[\"is_beaming\"]\n assert not result[\"is_flirs\"]\n assert result[\"is_routing\"]\n assert not result[\"is_securityv1\"]\n assert result[\"node_basic_string\"] == \"Routing Slave\"\n assert result[\"node_generic_string\"] == \"Binary Switch\"\n assert result[\"node_specific_string\"] == \"Binary Power Switch\"\n assert result[\"neighbors\"] == [1, 33, 36, 37, 39]\n\n # Test node statistics\n await client.send_json({ID: 7, TYPE: \"ozw/node_statistics\", NODE_ID: 39})\n msg = await client.receive_json()\n result = msg[\"result\"]\n\n assert result[OZW_INSTANCE] == 1\n assert result[NODE_ID] == 39\n assert result[\"send_count\"] == 57\n assert result[\"sent_failed\"] == 0\n assert result[\"retries\"] == 1\n assert result[\"last_request_rtt\"] == 26\n assert result[\"last_response_rtt\"] == 38\n assert result[\"average_request_rtt\"] == 29\n assert result[\"average_response_rtt\"] == 37\n assert result[\"received_packets\"] == 3594\n assert result[\"received_dup_packets\"] == 12\n assert result[\"received_unsolicited\"] == 3546", "title": "" }, { "docid": "e406baebb6e7a969121fda955139a338", "score": "0.4991776", "text": "def test_extensive_v1_electron(self):\n electron_tool = ROOT.egammaMVACalib(ROOT.egammaMVACalib.egELECTRON, True, \"egammaMVACalib/v1\")\n f = ROOT.TFile.Open(\"electron_test.root\")\n tree = f.Get(\"electron_test\")\n self.do_test_file(electron_tool, tree)", "title": "" }, { "docid": "9b83f281f581195cba22cc5e936b1a73", "score": "0.49899042", "text": "def test_init(self):\n abi_str = (\n '[{\"constant\":true,\"inputs\":[],\"name\":\"minBet\",\"outputs\":[{\"na'\n 'me\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"'\n 'view\",\"type\":\"function\"}]'\n )\n with patch_get_abi(abi_str):\n etheroll = Etheroll()\n assert etheroll.contract is not None", "title": "" }, { "docid": "355095c646b60887b6938851aa0a838a", "score": "0.49882734", "text": "def test_02_Onkyo(self):\n l_xml = XmlConfigTools.find_xml_section(self.m_pyhouse_obj, 'HouseDivision/EntertainmentSection/OnkyoSection')\n # print(PrettyFormatAny.form(l_xml, 'C3-02-A - Onkyo XML'))\n l_ret = entertainmentXML().read_entertainment_subsection(self.m_pyhouse_obj, l_xml)\n # print(PrettyFormatAny.form(l_ret, 'C3-02-B - Onkyo Plugin'))\n # print(PrettyFormatAny.form(l_ret.Devices, 'C3-02-C - Onkyo Devices'))\n self.assertEqual(l_ret.Active, TESTING_ONKYO_ACTIVE)\n self.assertEqual(l_ret.Name, 'onkyo')\n self.assertEqual(l_ret.Devices[0].Name, TESTING_ONKYO_DEVICE_NAME_0)\n self.assertEqual(str(l_ret.Devices[0].Active), TESTING_ONKYO_DEVICE_ACTIVE_0)\n self.assertEqual(str(l_ret.Devices[0].Key), TESTING_ONKYO_DEVICE_KEY_0)\n self.assertEqual(l_ret.Devices[0].Comment, TESTING_ONKYO_DEVICE_COMMENT_0)", "title": "" }, { "docid": "e8ab6c93758958a45e1d7e777daa7b13", "score": "0.49866056", "text": "def test_device_state_attributes(self):\n self.ecobee[\"equipmentStatus\"] = \"heatPump2\"\n assert {\n \"fan\": \"off\",\n \"climate_mode\": \"Climate1\",\n \"fan_min_on_time\": 10,\n \"equipment_running\": \"heatPump2\",\n } == self.thermostat.device_state_attributes\n\n self.ecobee[\"equipmentStatus\"] = \"auxHeat2\"\n assert {\n \"fan\": \"off\",\n \"climate_mode\": \"Climate1\",\n \"fan_min_on_time\": 10,\n \"equipment_running\": \"auxHeat2\",\n } == self.thermostat.device_state_attributes\n self.ecobee[\"equipmentStatus\"] = \"compCool1\"\n assert {\n \"fan\": \"off\",\n \"climate_mode\": \"Climate1\",\n \"fan_min_on_time\": 10,\n \"equipment_running\": \"compCool1\",\n } == self.thermostat.device_state_attributes\n self.ecobee[\"equipmentStatus\"] = \"\"\n assert {\n \"fan\": \"off\",\n \"climate_mode\": \"Climate1\",\n \"fan_min_on_time\": 10,\n \"equipment_running\": \"\",\n } == self.thermostat.device_state_attributes\n\n self.ecobee[\"equipmentStatus\"] = \"Unknown\"\n assert {\n \"fan\": \"off\",\n \"climate_mode\": \"Climate1\",\n \"fan_min_on_time\": 10,\n \"equipment_running\": \"Unknown\",\n } == self.thermostat.device_state_attributes\n\n self.ecobee[\"program\"][\"currentClimateRef\"] = \"c2\"\n assert {\n \"fan\": \"off\",\n \"climate_mode\": \"Climate2\",\n \"fan_min_on_time\": 10,\n \"equipment_running\": \"Unknown\",\n } == self.thermostat.device_state_attributes", "title": "" }, { "docid": "7a73262f3d67e0fe2eea9d26099f32f0", "score": "0.4983609", "text": "def test_elevation():\n pass", "title": "" }, { "docid": "a5243ceeea70a3c1b3306dac6631848d", "score": "0.49834675", "text": "def test_timeout_reuse(self):\n self.mock_operations.get_node_state.side_effect = [\"maintenance\"] * 50\n self.assertRaises(UpcloudTimeoutException, self.destroyer.destroy_node, 1)\n\n self.mock_operations.get_node_state.side_effect = [\"maintenance\", None]\n self.assertTrue(self.destroyer.destroy_node(1))", "title": "" }, { "docid": "888e274543ff85d5608811c5b2958793", "score": "0.49829534", "text": "def test_Xiaomi_CGH1(self):", "title": "" }, { "docid": "c557517a10050c6b35419f2657e223d3", "score": "0.4981638", "text": "def test_set_fan_mode_auto(self):\n self.data.reset_mock()\n self.thermostat.set_fan_mode(\"auto\")\n self.data.ecobee.set_fan_mode.assert_has_calls(\n [mock.call(1, \"auto\", 20, 40, \"nextTransition\")]\n )", "title": "" }, { "docid": "4adb6aa39be7b8daa022475403e5ae12", "score": "0.49793854", "text": "def setUp(self):\n super().setUp()\n self.start_radius()\n self.start_chewie()", "title": "" }, { "docid": "915199872a7ca375de404aba771ed34d", "score": "0.4975546", "text": "async def test_sensor_unknown_error(hass: HomeAssistant) -> None:\n mock_entry = _mock_config_entry()\n\n with patch(\"aurorapy.client.AuroraSerialClient.connect\", return_value=None), patch(\n \"aurorapy.client.AuroraSerialClient.measure\",\n side_effect=AuroraError(\"another error\"),\n ):\n mock_entry.add_to_hass(hass)\n await hass.config_entries.async_setup(mock_entry.entry_id)\n await hass.async_block_till_done()\n power = hass.states.get(\"sensor.mydevicename_power_output\")\n assert power is None", "title": "" }, { "docid": "e443a34916be502e32e0065978927964", "score": "0.49709854", "text": "def test_one_node(self):\n Tabs().nodes.click()\n time.sleep(1)\n Nodes().add_nodes.click()\n time.sleep(1)\n Nodes().nodes_discovered[0].checkbox.click()\n RolesPanel().controller.click()\n Nodes().apply_changes.click()\n time.sleep(1)\n Tabs().networks.click()\n time.sleep(1)\n with Networks() as n:\n n.verify_networks.click()\n self.assertIn(\n 'At least two nodes are required',\n n.verification_alert.text,\n 'Alert text contains \"At least two nodes are required\"')", "title": "" } ]
2afaae3a124adf69099d3fb92181a88b
Returns the name of the testing location
[ { "docid": "8c978d7cc1bd01dc292af9c9e44c8ca6", "score": "0.6202428", "text": "def get_testsite(input):\n\tbase = get_filename(input)\n\treturn base.split('_',1)[0]", "title": "" } ]
[ { "docid": "392a74788ff7c29dc56cf193234207ac", "score": "0.69871753", "text": "def Location(self) -> str:", "title": "" }, { "docid": "2ce99033809f7a04352d082dd91f767f", "score": "0.6938344", "text": "def get_test_name(self):\n\n frames = inspect.getouterframes(inspect.currentframe())\n for frame in frames:\n if re.match('test_.*', os.path.basename(frame[1])):\n return os.path.basename(frame[1])[:-3]\n\n return self.shishito_support.get_opt('project_name')", "title": "" }, { "docid": "2ce99033809f7a04352d082dd91f767f", "score": "0.6938344", "text": "def get_test_name(self):\n\n frames = inspect.getouterframes(inspect.currentframe())\n for frame in frames:\n if re.match('test_.*', os.path.basename(frame[1])):\n return os.path.basename(frame[1])[:-3]\n\n return self.shishito_support.get_opt('project_name')", "title": "" }, { "docid": "d442564e3e8981f01269231b3de4076c", "score": "0.6805232", "text": "def get_current_test_name():\n # PYTEST_CURRENT_TEST value will be of syntax \"FILE_NAME::FUNC_NAME (STAGE)\"\n full_name = os.getenv(\"PYTEST_CURRENT_TEST\", \"\").split(\" \")[0]\n return full_name.split(\"::\")[-1]", "title": "" }, { "docid": "5e6a1873bd163d67da6bb11a164ae31e", "score": "0.68026394", "text": "def getNamelocation(self):\n\t\treturn self.actualisland.getName()", "title": "" }, { "docid": "69248d7ee4c286d829ae9a02d2655b49", "score": "0.67172486", "text": "def get_location(self): # real signature unknown; restored from __doc__\n return \"\"", "title": "" }, { "docid": "f434d3358fb9f7101163ed5d02ca3deb", "score": "0.66663855", "text": "def local_unit_name(self):\n return self.configuration_class().local_unit_name", "title": "" }, { "docid": "07648a4b8d8593dbf4b35a8b99b45d9f", "score": "0.66544574", "text": "def location_name(self):\n return self.wilderness.mapprovider.get_location_name(self.coordinates)", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.6645051", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.6645051", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.6645051", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.6645051", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.6645051", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.6645051", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.6645051", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.6645051", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.6645051", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.6645051", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.6645051", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.6645051", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.6645051", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.6645051", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.6645051", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.6645051", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.6645051", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.6645051", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.6645051", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.6645051", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.6645051", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.6645051", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.6645051", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.6645051", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.6645051", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.6645051", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "5fc02afd463aefb3c859ee80b6832996", "score": "0.6645051", "text": "def location(self) -> str:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "93c40e1b2c326d12cadce9214f9c51dd", "score": "0.6616004", "text": "def getName(self, location):\n\n return re.match(\"(.*)/(.*?)\\.py\\?\", location).groups()[1]", "title": "" }, { "docid": "aa671538587643bea43e6434582e4e26", "score": "0.656021", "text": "def test_path(name='.'):\n return TEST_ROOT_DIR / name", "title": "" }, { "docid": "de4d989dfe5508d01b4101aaa0bec2b0", "score": "0.64972556", "text": "def location(self) -> str:\n return self.__location", "title": "" }, { "docid": "de4d989dfe5508d01b4101aaa0bec2b0", "score": "0.64972556", "text": "def location(self) -> str:\n return self.__location", "title": "" }, { "docid": "de4d989dfe5508d01b4101aaa0bec2b0", "score": "0.64972556", "text": "def location(self) -> str:\n return self.__location", "title": "" }, { "docid": "de4d989dfe5508d01b4101aaa0bec2b0", "score": "0.64972556", "text": "def location(self) -> str:\n return self.__location", "title": "" }, { "docid": "f1898dc36ea9fc1c1d970785a9a53ce7", "score": "0.6475585", "text": "def get_current_test_id():\n # PYTEST_CURRENT_TEST value will be of syntax \"FILE_NAME::FUNC_NAME (STAGE)\"\n full_name = os.getenv(\"PYTEST_CURRENT_TEST\", \"\").split(\" \")[0]\n return full_name", "title": "" }, { "docid": "e969a50239bec657823d304cf87ada21", "score": "0.64651257", "text": "def location_path(self) -> str:\n return f'locations/{self.location}'", "title": "" }, { "docid": "d0483e5569821f721d76144a9e4abbc8", "score": "0.64629036", "text": "def TestName(self):\n\t\treturn self._testMethodName", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.64581656", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.64581656", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.64581656", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.64581656", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.64581656", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.64581656", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.64581656", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.64581656", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.64581656", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.64581656", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.64581656", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.64581656", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.64581656", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.64581656", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.64581656", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.64581656", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.64581656", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.64581656", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.64581656", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.64581656", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.64581656", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.64581656", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.64581656", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "91ef896bfb19593a0daefd17d3b9df9b", "score": "0.64581656", "text": "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "1a3997f57abaf4879a3df470876816ee", "score": "0.644422", "text": "def location(self) -> str:\n return 'prod' if self.is_prod else 'dev'", "title": "" }, { "docid": "148478f657bf089f650047ad0b8b96ae", "score": "0.6433636", "text": "def location(self) -> str:\n return self.metadata.location", "title": "" }, { "docid": "828728a8448dd578246ea4403d5d195b", "score": "0.6405767", "text": "def full_bed_location():\n return \"tests/test_data/full_bed.bed\"", "title": "" }, { "docid": "d517a93ef2718d9580ae1bafb585910f", "score": "0.6379595", "text": "def location(self) -> str:\n return self._location", "title": "" }, { "docid": "3175e9bc4082f8ffe60ac1870c93d665", "score": "0.637443", "text": "def getName(self, location):\n\n # return os.path.split(os.path.splitext(location)[0])[1]\n head,ext = g.os_path_splitext(location)\n path,name = g.os_path_split(head)\n return name", "title": "" }, { "docid": "e6b9b061d30a5d1781335a3fb8ca1f5b", "score": "0.6340447", "text": "def test_location(self):", "title": "" }, { "docid": "74d2fcd46120d134695f20da0a683255", "score": "0.62979275", "text": "def statistical_test_name(self) -> str:\n raise NotImplementedError", "title": "" }, { "docid": "3c4f2a846b611ad4d152b50cd9831647", "score": "0.6261979", "text": "def location(self) -> Optional[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "3c4f2a846b611ad4d152b50cd9831647", "score": "0.6261979", "text": "def location(self) -> Optional[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "3c4f2a846b611ad4d152b50cd9831647", "score": "0.6261979", "text": "def location(self) -> Optional[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "3c4f2a846b611ad4d152b50cd9831647", "score": "0.6261979", "text": "def location(self) -> Optional[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "3c4f2a846b611ad4d152b50cd9831647", "score": "0.6261979", "text": "def location(self) -> Optional[str]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "2c12ac870d4f4f62c2181a6992837655", "score": "0.6217114", "text": "def get_name(self):\n return \"Test.%s\" % self.specifier.name", "title": "" }, { "docid": "a83c4e3eabf7679d8aa709a6511f5d27", "score": "0.6213318", "text": "def location(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "a83c4e3eabf7679d8aa709a6511f5d27", "score": "0.6213318", "text": "def location(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"location\")", "title": "" }, { "docid": "e3dacd21240a7de20e8d4e582aa44bca", "score": "0.61724037", "text": "def ktest_path(self):\r\n\t\treturn self.__pathstub + \".ktest\"", "title": "" }, { "docid": "3cab6f013f25f2dd11b9443cd137416f", "score": "0.61345506", "text": "def get_filename(cls):\n return get_folder_from_cloud_test_repo([*cls.dir_path])", "title": "" }, { "docid": "7391ee6e108620b22ec7d5a1c4690f78", "score": "0.6120584", "text": "def testmethodName(self):\n # The olde way was a bit longish. Something like\n # testTestmyclassnameTestfoldercontainssomething().\n # old = 'test%s%s' % (self.getParent().getCleanName().capitalize(),\n # self.getCleanName().capitalize())\n # The new version starts with 'test_' with the unmodified name\n # after it.\n name = 'test_%s' % self.getCleanName()\n return name", "title": "" }, { "docid": "164850399b3163aaea9d235fcb1d501b", "score": "0.611023", "text": "def __getTestDirectory(self):\r\n\r\n return os.path.join(self.baseDirectory, self.__baseConfiguration.testDirectoryName)", "title": "" }, { "docid": "246a1421d6d9ff5de9557f6ee141f996", "score": "0.6110013", "text": "def Ort(self):\n return self.getMylocation()", "title": "" }, { "docid": "5c4b511156ec59e21acef91fb1c471ba", "score": "0.6041481", "text": "def v_location(self):\n return self._full_name[:-len(self._name) - 1]", "title": "" }, { "docid": "e6e64be9d4e773f592c7dd2f6cc5f0d8", "score": "0.6037785", "text": "def location(self):\n \n p = os.path.abspath(__file__)\n pathSP = os.path.split(p)\n return pathSP", "title": "" }, { "docid": "888d682303767fa332f9f31ad3b472fb", "score": "0.6029801", "text": "def access_location(self) -> str:\n return self._access_location", "title": "" }, { "docid": "62e73f9908243c298f6b57db51f9c94c", "score": "0.5992729", "text": "def get_location_name(self, coordinates):\n return \"The wilderness\"", "title": "" }, { "docid": "46ec6a51139018a8beeb8de3903fb247", "score": "0.5984122", "text": "def printable_location(self):\n return '\"{0}\" ({1})'.format(\n concise_path(self.base_dir), self.pyver)", "title": "" }, { "docid": "c6c418cecfd8d13264835e678200db53", "score": "0.59770477", "text": "def location(self) -> str:\n if self.__expanded_launch_file_path is None:\n # get_launch_description() has not been called yet\n return ' + '.join([str(sub) for sub in self.__launch_file_path])\n return self.__expanded_launch_file_path", "title": "" }, { "docid": "48b51b902ab40e0b7ef88ec5b755740e", "score": "0.5974541", "text": "def get_local_name(self):\n return self.host_name()", "title": "" }, { "docid": "9a1d88888e9873d9ac833d0f2967bfb6", "score": "0.5972528", "text": "def get_test_filing_search_path() -> str:\n this_directory = os.path.dirname(os.path.realpath(__file__))\n test_filepath = os.path.join(\n this_directory, \"test_search_pages\", f\"example_case_query_result.html\"\n )\n return test_filepath", "title": "" }, { "docid": "195fd7abf2e382ea8ee07e1fd87903a2", "score": "0.59617954", "text": "def default_test_results_location():\n return os.path.join(repo_root(), \"test-results\")", "title": "" }, { "docid": "3df0e3a1b971e3b69127f21200643dd1", "score": "0.5931502", "text": "def location(self) -> str:\n locations_response = self.fetch(\n method=self.service.projects().locations().list,\n **{'name': self.project_path}\n )\n locations = \\\n list([location['locationId']\n for location in locations_response['locations']])\n\n return locations[0]", "title": "" }, { "docid": "ffaed53603eba720731947ed6f0785d7", "score": "0.5929669", "text": "def store_location(self) -> str:\n return self._store_location", "title": "" }, { "docid": "e54736380712ac7f727e8e2eb7068bc6", "score": "0.59056294", "text": "def _get_name(self) -> \"std::string\" :\n return _core.Workspace__get_name(self)", "title": "" } ]
718fcf74032b304b53a0dd38a29c6648
The initial horizontal position for the scan
[ { "docid": "5acc9a5763edda2194a0bd529ebf4a51", "score": "0.5978934", "text": "def startx(self):\n return self._x", "title": "" } ]
[ { "docid": "3d525e22c3fb671adc656eebba1455b5", "score": "0.6845583", "text": "def find_starting_location(self):\n\n self.current_row = 0\n self.current_col = self.path[0].index(\"|\")\n self.current_direction = \"down\"", "title": "" }, { "docid": "753d0c10d7916d178823db83a8c064c4", "score": "0.67242074", "text": "def x_begin(self, param='x_filtered'):\n self.check_x_param(param)\n x = getattr(self, param)\n return int(max(0, x - self.width // 2))", "title": "" }, { "docid": "5cf27c47a2b81fcf609a6eb3b6de15f9", "score": "0.66187227", "text": "def heading_min(self):\n return self._scan_range[0]", "title": "" }, { "docid": "371386f90e3f29a273299deeeb7b47ce", "score": "0.6476608", "text": "def start_pos(self):", "title": "" }, { "docid": "c452d5d460d3acf121cac3442131829a", "score": "0.64608634", "text": "def start_col(self):\n return 0", "title": "" }, { "docid": "13c5bb59c423e3292187e13baa4ab6a9", "score": "0.6379881", "text": "def getHorizontal(self) -> float:\n ...", "title": "" }, { "docid": "21a1a375b84155885466a1d8d835bc57", "score": "0.6312407", "text": "def _get_initial_left_pos(row, col):\n # If row+col smaller than last index in col list, return (row=0, col=row+col).\n if row + col <= data.LAST_IDX_COL:\n return 0, row + col\n # Else, return (row=row+col-col last index, col=col last index).\n else:\n return (row + col) - data.LAST_IDX_COL, data.LAST_IDX_COL", "title": "" }, { "docid": "cf475dede21babd58fb1369c3357ad0c", "score": "0.62476707", "text": "def calculate_horizontal(self):\n self.info.astrometry.calculate_horizontal()", "title": "" }, { "docid": "fdb4a948b5bb7c7b354014d7f7daf253", "score": "0.6089307", "text": "def horizontal(self):\n if self.astrometry is None:\n return None\n return self.astrometry.horizontal", "title": "" }, { "docid": "a0fef26a668583fabc268142ae7fee8e", "score": "0.6076157", "text": "def current_position(self) -> int:\n\t\treturn self.__start", "title": "" }, { "docid": "767de60a01076fa500ff696737b799c0", "score": "0.60548323", "text": "def initial_offset(self):\n if self.prev_fit is None:\n return 0\n else:\n return -self.prev_fit[1]['logL']", "title": "" }, { "docid": "895737e88cba361c5084b4e7195ce0a0", "score": "0.6040609", "text": "def scan_start_value(self):\n self.operator._set_scan_start(self.scan_start_spinbox.value())\n self.scan_start_spinbox.setValue(self.operator.properties['scan']['start'])\n self.scan_step_spinbox.setValue(self.operator.properties['scan']['step'])\n set_spinbox_stepsize(self.scan_start_spinbox)\n self.plot1.setXRange(self.operator.properties['scan']['start'], self.operator.properties['scan']['stop'])", "title": "" }, { "docid": "d3aca51014d76c39cbc909c272e60074", "score": "0.6040053", "text": "def get_start_pos(self):\n i_pos, j_pos = -1, -1\n\n is_init_pos_found = False\n while not is_init_pos_found:\n i_pos = np.random.randint(low=0, high=self._grid.height)\n j_pos = np.random.randint(low=0, high=self._grid.width)\n\n if not self._grid.is_state_goal((i_pos, j_pos)):\n is_init_pos_found = True\n\n assert (i_pos != -1) and (j_pos != -1), 'Error while looking for an init position.'\n return i_pos, j_pos", "title": "" }, { "docid": "c1af1eab3f0a247e880efa6ee01e1ee7", "score": "0.6034387", "text": "def get_horizontal_position(self, locator: Union[WebElement, str]) -> int:\n return self.find_element(locator).location[\"x\"]", "title": "" }, { "docid": "1b0fedb68e09ef205f4e5314c25ccc16", "score": "0.6027346", "text": "def get_origin_x(self) -> float:\n return (self.get_length() + Fretboard.WIDTH_MARGIN) if self.lefty else (Fretboard.WIDTH_MARGIN + (2**(1/24) -1)*self.get_length())", "title": "" }, { "docid": "ed341a5e3bc9a618f317bb46c2311d9c", "score": "0.6024036", "text": "def position(self):\r\n\r\n return self.scanner.position()", "title": "" }, { "docid": "6411cd99a96f34544e5495df2bd8cd78", "score": "0.6008129", "text": "def scan_positions(self):\n return np.arange(self.num_scans)", "title": "" }, { "docid": "c8596b244c1ee408e0dcf821c285706e", "score": "0.59793615", "text": "def horizontal_center(self) -> float:\n return self.pos.x + self.size.x * 0.5", "title": "" }, { "docid": "5fdca5b25c4c567b23109f76a124420d", "score": "0.5971564", "text": "def CursorLeft(self) -> int:", "title": "" }, { "docid": "0814b9073521762bcf551059dbf8c682", "score": "0.59653", "text": "def xoffset(self):\n return self._xoffset", "title": "" }, { "docid": "51947557ef9ff8a365a719f8edcd9eca", "score": "0.5917166", "text": "def horizontal_roi_moved(self):\n region = self.horizontal_profile.getArraySlice(self.live_image, self.ui.image_view.imageItem)\n\n x0 = region[0][0].start\n x1 = region[0][0].stop\n y0 = region[0][1].start\n y1 = region[0][1].stop\n\n width = np.abs(x1-x0)\n height = np.abs(y1-y0)\n\n self.roi['horizontal']['x0'] = x0\n self.roi['horizontal']['y0'] = y0\n self.roi['horizontal']['length'] = width\n self.roi['horizontal']['width'] = height\n\n self.update_selected_file_profile_plots(is_horizontal=True)\n self.calculate_and_display_current_peak(is_horizontal=True)", "title": "" }, { "docid": "8f0e4c4ee2a47812fedf27c8b24e9a84", "score": "0.5902078", "text": "def get_startposition(self):\n return self.start_position", "title": "" }, { "docid": "4083f0c24e46b3615312b4e4b190cd11", "score": "0.5900054", "text": "def start_row(self):\n # You can manually add an offset to this value in the attributes #\n offset = 0 if self.start_offset is None else self.start_offset\n # Get the first cell that matches the title #\n for i, row in self.full_sheet.iterrows():\n if row[0] == self.title: return i+1+offset\n self.raise_exception(\"Could not find the start row of the table.\")", "title": "" }, { "docid": "118b69605a81dd5b0efe4e3d81bffd57", "score": "0.5884891", "text": "def get_start_point(self):\n return self.x0, self.y0", "title": "" }, { "docid": "6d750ebfdd4a018f231383e3cb2647e4", "score": "0.5870266", "text": "def horizontal_position(self, c, position = None):\n dev = self.selectedDevice(c)\n if position is None:\n resp = yield dev.query('HOR:MAI:POS?')\n else:\n yield dev.write(('HOR:MAI:POS '+str(position)))\n resp = yield dev.query('HOR:MAI:POS?')\n position = float(resp)\n returnValue(position)", "title": "" }, { "docid": "4a5b4fcea82c7b961b3a378f7aa2427c", "score": "0.58604157", "text": "def get_zero_pos(self):\n for row in range(self.get_height()):\n for col in range(self.get_width()): \n if self.get_number(row,col) == 0:\n current_position = row, col\n self._zero_pos = current_position\n return self._zero_pos", "title": "" }, { "docid": "40ee4d89672b13258ba9f2bf163dadbc", "score": "0.5849588", "text": "def position(self):\n \n return self.scanner.position()", "title": "" }, { "docid": "3aebd0e16fcf8de41dd02d7d414795cf", "score": "0.58317953", "text": "def _horizontal_points(self,p,loc):\n\t\tpoints = 0\n\t\tfor i in range(int(int((loc-1)/3)*self.gb.size)+1,int(int((loc-1)/3)*self.gb.size)+int(self.gb.size)+1):\n\t\t\tif loc == i: continue\n\t\t\tif str(p) == str(self.board[i-1]):\n\t\t\t\tpoints+=1\n\t\t\telse:\n\t\t\t\tprint str(self.board[i-1])\n\t\treturn points", "title": "" }, { "docid": "92e47dab178fb0722f37043e3fdcc665", "score": "0.5829246", "text": "def detector_center_line(self):\n return 0", "title": "" }, { "docid": "92e47dab178fb0722f37043e3fdcc665", "score": "0.5829246", "text": "def detector_center_line(self):\n return 0", "title": "" }, { "docid": "68c47613548c9851b5a0112aa4e7b0a7", "score": "0.5829162", "text": "def getPosHeading(self) :\n\t\treturn (self.avatar.getX(), self.avatar.getY(), \\\n\t\t\t(self.avatar.getHpr()[0])%360)", "title": "" }, { "docid": "409671b9994f95c8cf2152964b1f8567", "score": "0.5824367", "text": "def horizontalScan(self):\r\n\t\tif self.iVer != 1.5:\r\n\t\t\tself.iVer = 1.5\r\n\t\t\tself.verPosition(self.iVer)\r\n\t\tservoPIN = 22\r\n\t\tGPIO.setmode(GPIO.BCM)\r\n\t\tGPIO.setwarnings(False)\r\n\t\tGPIO.setup(servoPIN, GPIO.OUT)\r\n\t\tp = GPIO.PWM(servoPIN, 50)\r\n\t\tp.start(self.i)\r\n\t\t\r\n\t\tif self.horzFlag == True:\r\n\t\t\tself.i += .2\r\n\t\t\tp.ChangeDutyCycle(self.i)\r\n\t\t\ttime.sleep(.1)\r\n\t\t\tif self.i > 11.0:\r\n\t\t\t\tself.horzFlag = False\r\n\t\telse:\r\n\t\t\tself.i -= .2\r\n\t\t\tp.ChangeDutyCycle(self.i)\r\n\t\t\ttime.sleep(.1)\r\n\t\t\tif self.i < 2.0:\r\n\t\t\t\tself.horzFlag = True\r\n\r\n\t\tp.stop()\r\n\t\tGPIO.cleanup()", "title": "" }, { "docid": "72c5a7179f907c83bf77a31e8d3fcd99", "score": "0.5822467", "text": "def start_row(self):\n return self._start_row", "title": "" }, { "docid": "72c5a7179f907c83bf77a31e8d3fcd99", "score": "0.5822467", "text": "def start_row(self):\n return self._start_row", "title": "" }, { "docid": "72c5a7179f907c83bf77a31e8d3fcd99", "score": "0.5822467", "text": "def start_row(self):\n return self._start_row", "title": "" }, { "docid": "3bbe067baef85a94678281a55d5547a7", "score": "0.581159", "text": "def detector_center_line(self):\n return -1", "title": "" }, { "docid": "190d8b24ffff079a66e459a72299ab8c", "score": "0.5794418", "text": "def read_rover_starting_position(self):\n\t\tline = self.file.readline()\n\t\tvalues = line.split()\n\n\t\tx = int(values[0])\n\t\ty = int(values[1])\n\t\tdirection = values[2]\n\n\t\treturn (x, y, direction)", "title": "" }, { "docid": "c7b517168721977e157e019a76dc7a23", "score": "0.579433", "text": "def CursorTop(self) -> int:", "title": "" }, { "docid": "59855d1de9227e8ed1dd60174e44fdff", "score": "0.57929665", "text": "def get_start(current_width, previous_width, previous_step):\n circle = np.pi * 2 # 1 radian = 6.28\n if previous_width is None:\n previous_width = 0\n stepper = circle * 0.05 # okay honestly don't really know why this needs to be 0.05\n return previous_step + stepper * (current_width * 10 + previous_width * 10)", "title": "" }, { "docid": "522757bdda0636b662f3c650bbeae5a8", "score": "0.57861257", "text": "def _possible_starting_positions(self):\n if self.game.turn is Player.WHITE:\n positions = (self.game.board.board == 2) | (\n self.game.board.board == 1) | (self.game.board.board == 1.7)\n else:\n positions = (self.game.board.board == -2.5) | (\n self.game.board.board == -2)\n return np.transpose(np.where(positions))", "title": "" }, { "docid": "13ea68b5d84c6256e67f69829783e536", "score": "0.5785346", "text": "def horz_offset(self):\n return self._element.horz_offset", "title": "" }, { "docid": "1a3857293f755aad96cf0bcdd4bb6c8e", "score": "0.577457", "text": "def get_spin_start(self): # FIXME: make more robust\r\n for i, p in enumerate(self.cmd):\r\n if p != 0:\r\n return i", "title": "" }, { "docid": "b190eaabca9ba2e828fb9208cd879c3b", "score": "0.5754707", "text": "def linear_start(self):\n return self.offset_end", "title": "" }, { "docid": "712a5ac91acdea06855c9aa811145633", "score": "0.57280135", "text": "def _getInitialFrame(self):\n TODO\n return 0", "title": "" }, { "docid": "42a41f5447575b66fbf511456dc8340c", "score": "0.571473", "text": "def xmin(self):\n return self.center[0] - self.Size[0]/2./self.scale", "title": "" }, { "docid": "6b4ac35e37f58e17b231f4d755be2d41", "score": "0.57129914", "text": "def left(self):\n self.heading = (self.heading + 1) % 4", "title": "" }, { "docid": "76dad1756f52c7545efbd1877d3acb1c", "score": "0.57107407", "text": "def start(self):\n return self.range.start", "title": "" }, { "docid": "76a93e6a8e577739e06c5704b3863cdc", "score": "0.5703572", "text": "def _get_horizontalCounters(self):\n if 'H' in self.font:\n self._horizontalCounters = self['H'].horizontalCounters\n # If not counters found in the 'H' this way, or more than 1, then second guess beaming the 'H' on 0,25 height\n if not self._horizontalCounters or len(self._horizontalCounters) > 1:\n self._stems, self._horizontalCounters = self['H'].getBeamStemCounters(self['H'].maxY/4) # Cache both.\n return self._horizontalCounters", "title": "" }, { "docid": "96c8a0764d0f57373c7e602310d9f023", "score": "0.56994015", "text": "def offset_start(self):\n return self.header_end", "title": "" }, { "docid": "cb47d46f2da3224010927f4fc45310d6", "score": "0.5691412", "text": "def _get_initial_right_pos(row, col):\n # If row smaller than col, return (row=0, col=col-row).\n if row < col:\n return 0, col - row\n # Else, return (row=row-col, col=0).\n else:\n return row - col, 0", "title": "" }, { "docid": "0e9b07915c31e2cbdb46ba1fbc280645", "score": "0.5687212", "text": "def getStartState(self):\n \"*** YOUR CODE HERE ***\"\n return (self.startingPosition, 0)", "title": "" }, { "docid": "ae2e5c78f70a1f603f30238f756d775d", "score": "0.5684538", "text": "def start_row(self, value):\n self._start_row = max(0, value)", "title": "" }, { "docid": "fd82df9f5803617240df2428e1cdf7e9", "score": "0.56796545", "text": "def __horizontal_callback(self, value):\n self.__columns = int(value)\n self.__x_coordinate_start.configure(to=int(value) - 1)", "title": "" }, { "docid": "b8748d56324625c694f862b66528988f", "score": "0.56770337", "text": "def indexInitialValue(self) -> float:\n return self.__indexInitialValue", "title": "" }, { "docid": "673ca719c1b2e7a46919bed0555be97d", "score": "0.5671544", "text": "def start_col(self):\n return self._start_col", "title": "" }, { "docid": "673ca719c1b2e7a46919bed0555be97d", "score": "0.5671544", "text": "def start_col(self):\n return self._start_col", "title": "" }, { "docid": "509c69fc35a49bb96518c86aaf80b273", "score": "0.5665112", "text": "def set_start_point(self,x_start=0,y_start=0,h_start=0):\n \n self.present_x = x_start\n self.present_y = y_start\n self.present_h = h_start\n self.fixed = True", "title": "" }, { "docid": "e476e4272444db0d47651986acf5305d", "score": "0.5662812", "text": "def get_top_left(self):\n return self.transform.position - self.get_size() / 2", "title": "" }, { "docid": "461837c46227b2b8935de459fa4c7d48", "score": "0.56265736", "text": "def xpos(self):\r\n return self.settings.vi['xpos'] or 0", "title": "" }, { "docid": "4309ce4b8f952c662485b16b43af9826", "score": "0.56229234", "text": "def getLeft(self):\n tiles_array = self.robot.getBaseLineVertRange(length = self.left_sensors_range)\n results = []\n \n for row in tiles_array:\n count = 0\n for tile in row:\n if self.map.getTile(tile) == 1 or self.map.getTile(tile) == -1:\n break\n else:\n count += 1\n results.append(count)\n \n return results", "title": "" }, { "docid": "e2918b933eb7552f3c187330bbbb9e37", "score": "0.562271", "text": "def GetInitialCellRange( self ):\n result = None\n core = self.dmgr.GetCore()\n if core is not None:\n maxx = core.npinx\n maxy = core.npiny\n if self.channelMode:\n maxx += 1\n\tmaxy += 1\n\n result = ( 0, 0, maxx, maxy, maxx, maxy )\n\n return result", "title": "" }, { "docid": "e717498950bc6ff63e62c5ec8db7269d", "score": "0.5617522", "text": "def beginning_index(self):\n if self.language_id == LanguageId.PYTHON:\n lines = self.__text.splitlines()\n line_index = 0\n for line in lines:\n match = KVLANG_TAG_BEGIN.search(line)\n if match:\n return line_index\n line_index += 1\n return 0", "title": "" }, { "docid": "c0ab13baeddd3f198677c0f70e57b9a1", "score": "0.5616735", "text": "def center(self):\n return self.start + self.dt / 2", "title": "" }, { "docid": "f4296679a6be6ceb651f2c63ab8bb16f", "score": "0.56028533", "text": "def horiz_position(self, c, position=None):\n return self.selectedDevice(c).horiz_position(position)", "title": "" }, { "docid": "a306477805cacda684b293b6df99dd40", "score": "0.56027853", "text": "def offsetx(self):\n return self.bounds.mid_x - self.origin[0]", "title": "" }, { "docid": "ce0d5a243262caeb068fadea7236b777", "score": "0.5598953", "text": "def init_position(self):\n return np.array([self.phi(0.), self.lam(0.), self.h(0.)])", "title": "" }, { "docid": "05fd64c9d3c37457886ac21b2a8b2ede", "score": "0.55928576", "text": "def grab_x_index(self, x_loc_chess):\n if x_loc_chess > 8 or x_loc_chess < 1:\n raise ValueError(\"Invalid x coordinate: {0}\".format(x_loc_chess))\n if self.orientation:\n # For when white is on bottom\n return x_loc_chess-1\n else:\n # Black on bottom\n return 8-x_loc_chess", "title": "" }, { "docid": "3472410169ef4f24bb22ce3ac63d352b", "score": "0.558266", "text": "def find_starting_positions(track):\n return find_positions(track, 'S')", "title": "" }, { "docid": "5bb5f340cf951359dbef742dd64ac5ee", "score": "0.5577735", "text": "def usedPosition(self):\n return (constant_table_width - constant_padding - constant_card_width,\n constant_padding)", "title": "" }, { "docid": "df0b9656a3dc896926aab6c5621308a4", "score": "0.55700755", "text": "def guess_start(self):\n return", "title": "" }, { "docid": "a9db7a6ead5571eed2bc3029de644345", "score": "0.5565311", "text": "def initial_point(self):\n return self._initial_point", "title": "" }, { "docid": "e5738d0638fa1ad52a31da0d3772c570", "score": "0.5565029", "text": "def setup_initial_row(self):\n if self.random_state_start:\n return np.random.randint(self.k, size=self.width)\n else:\n split = int(self.width / 2)\n if self.width % 2 == 0:\n return np.array([0] * (split - 1) + [1] + [0] * split)\n else:\n return np.array([0] * split + [1] + [0] * split)", "title": "" }, { "docid": "20ab6b2d8c8aa919f2523e4e9444206f", "score": "0.5564723", "text": "def get_x_position (self):\n pass", "title": "" }, { "docid": "d3a57a0277fce817ebae051c1c2e2efc", "score": "0.5564066", "text": "def start(self):\r\n\t\tif self.document is None:\r\n\t\t\treturn self._orig_start\r\n\r\n\t\t# static computation of start\r\n\t\tif not self.document.is_dirty:\r\n\t\t\treturn self._orig_start\r\n\r\n\t\t# dynamic computation of start, really slow!\r\n\t\tdef compute_start(h):\r\n\t\t\tif h:\r\n\t\t\t\treturn len(h) + compute_start(h.previous_heading)\r\n\t\t\treturn len(self.document.meta_information) if \\\r\n\t\t\t\tself.document.meta_information else 0\r\n\t\treturn compute_start(self.previous_heading)", "title": "" }, { "docid": "454b79a0cb4ca3f8ae400ba52f641dae", "score": "0.55586934", "text": "def get_start_pos(self):\n\n return pygame.math.Vector2(self.start_pos)", "title": "" }, { "docid": "4a41d2d0fc7687c579cd88cb607390ad", "score": "0.55512", "text": "def startarrowhead(self):\n return self[\"startarrowhead\"]", "title": "" }, { "docid": "e333fa22a6b374f4cf8b0879450b693b", "score": "0.5551019", "text": "def start(self) -> Point[Scalar]:\n return self._start", "title": "" }, { "docid": "c87148f515d74b9d921d8f500f05929c", "score": "0.55403835", "text": "def get_min_position(self):\n return self.min_us", "title": "" }, { "docid": "9e1aaadc22a7fc9de49b02d4280175cd", "score": "0.55394185", "text": "def get_start_index(self): \n try: \n cell_corr = self.wks.find(self.last_tag)\n\n except gspread.exceptions.CellNotFound as ex: \n logger.error(f'Nao encontrou em Corretores celula marcada como {self.last_tag}: {ex}')\n\n return cell_corr.row", "title": "" }, { "docid": "a99074689e7e60febd1fc9fe8e032b3e", "score": "0.5537019", "text": "def Offset(self) -> int:", "title": "" }, { "docid": "e66962a1f0c8e7b50bf4975cbbb8584f", "score": "0.55365604", "text": "def get_start_point(self):\n\n return self.x_start, self.y_start, self.h_start \n #return self._adjusted_geometries[-1].get_end_point", "title": "" }, { "docid": "6251f16d9fe9e38ae373e74d85a07418", "score": "0.5535871", "text": "def xcompass(self):\n compass = 90\n if self.xoffset < 0:\n compass = 270\n\n return compass", "title": "" }, { "docid": "fa8acd4402fe77fccfd14b999456f0e2", "score": "0.55332506", "text": "def get_pos(self):\n self.run_dummy()", "title": "" }, { "docid": "ec03a8c6aac28795506145232fa23d09", "score": "0.5529706", "text": "def vertPos(self):\n return 0.", "title": "" }, { "docid": "9dc521f01f29cdb837ce8e50623d0c27", "score": "0.5493599", "text": "def rest_head_loc(self):\n self.head_loc = (self.Gs * cos(self.Cs),\n self.Gs * sin(self.Cs))", "title": "" }, { "docid": "3c4a3275a315c90940f8203c48bdcd4f", "score": "0.54929733", "text": "def get_horizontal_interval(self):\n command = \"INSP? \\\"HORIZ_INTERVAL\\\"\"\n readback = self.visa_if.query(command)\n readback = readback.split('\"')[1]\n readback = readback.replace(\"HORIZ_INTERVAL\",\"\")\n readback = readback.replace(\":\",\"\")\n return float(readback)", "title": "" }, { "docid": "47f61cc4654bad939fd77131464898e1", "score": "0.5485427", "text": "def getAutoComputeHomePosition(self):\r\n return _osgGA.CameraManipulator_getAutoComputeHomePosition(self)", "title": "" }, { "docid": "f3b7e4d66b76f7c0341635f080fdf1e5", "score": "0.54792225", "text": "def _get_prediction_start_index(self, anchor):\n if anchor is None or anchor == \"start\":\n iloc = 0\n elif anchor == \"end\":\n iloc = self.nobs\n else:\n iloc, _, _ = self.model._get_index_loc(anchor)\n if isinstance(iloc, slice):\n iloc = iloc.start\n iloc += 1 # anchor is one before start of prediction/simulation\n\n if iloc < 0:\n iloc = self.nobs + iloc\n if iloc > self.nobs:\n raise ValueError(\"Cannot anchor simulation outside of the sample.\")\n return iloc", "title": "" }, { "docid": "c23aa6160543eea38834f6fc51b64971", "score": "0.547619", "text": "def horizontal(self, value):\n if self.astrometry is None:\n return\n self.astrometry.horizontal = value", "title": "" }, { "docid": "a034a9a778d04c87a433a45f507a9636", "score": "0.5467698", "text": "def midleft(self) -> Tuple[float]:\n return self._x, self._y + self._h / 2.0", "title": "" }, { "docid": "190d3188722ebdb684fb6135cadd6c27", "score": "0.54613084", "text": "def _home(self):\n if (0, 0) != (self.vitem_idx, self.vitem_shift):\n self.vitem_idx = 0\n self.vitem_shift = 0", "title": "" }, { "docid": "a27a338f2a4d7f665395a70cad60a62e", "score": "0.54603636", "text": "def _get_pos(self):\n\t\treturn (self.rect.midbottom[0]-12)/24, (self.rect.midbottom[1]-16)/16", "title": "" }, { "docid": "00112441673293689cfe7c85898e73b9", "score": "0.545648", "text": "def _get_row(self, start):\n\n # Binary Search\n mn = 0\n mx = len(self.line_map) - 1\n if mx == -1 or start <= self.line_map[mn]:\n return mn + 1\n\n if start > self.line_map[-1]:\n return mx + 2\n\n while mx - mn != 1:\n idx = mn + ((mx - mn) >> 1)\n if start > self.line_map[idx]:\n mn = idx\n else:\n mx = idx\n\n return mx + 1", "title": "" }, { "docid": "380c97f1a37fcb99be6be8c02578950d", "score": "0.54433066", "text": "def mid(self):\n self.value = 0", "title": "" }, { "docid": "f3fee411b29fa378924b1de22b24f7c3", "score": "0.5438119", "text": "def origin_idxmin(self):\n return int(dataarray_idxmin(self)['origin_idx'])", "title": "" }, { "docid": "9d3acdde8666f030a807339ffa389cdd", "score": "0.5437813", "text": "def getFwdSlashStartPos(self, slashnum:int):\n if slashnum > (self.height-1):\n startx = slashnum-(self.height-1)\n starty = (self.height-1)\n if slashnum <= (self.height-1):\n startx = 0\n starty = slashnum\n \n return XYCoord(startx, starty)", "title": "" }, { "docid": "2f90d567ce6c053d2481eb1f2177fc3f", "score": "0.5434953", "text": "def getHomeOffset(self):\n params = MOT_HomingParameters()\n self.dll.ISC_GetHomingParamsBlock(self.serialNo, params)\n params.direction = 2 # go foward, hardcode it cause I'm tired of it moving.\n return self._deviceToReal(params.offsetDistance)", "title": "" }, { "docid": "5c7a0eec1cd0885171f871dc174484ee", "score": "0.5429529", "text": "def left(self):\n # change the indexes by one col to the right and draw new position to the grid\n self.coordinates = [(row, col - 1) for (row, col) in self.coordinates]", "title": "" }, { "docid": "9a8bf1a2e33941d0f4fcfc521de1dc1c", "score": "0.5429155", "text": "def set_nominal_pixel_positions(self):\n self.data.position = (\n self.info.detector_array.pixel_offsets[self.data.spaxel])", "title": "" }, { "docid": "3c2083e4cf646ec4d0ea48a1311557a4", "score": "0.5427902", "text": "def getHeading() -> float:\n return 0", "title": "" } ]
f3b432acd6c90f973684fbe8a79ae157
Get all possible cardset id
[ { "docid": "662acda943d795974cf313da5aefd815", "score": "0.55051076", "text": "def get_setids(file_path: str) -> list:\n setids = []\n with open(file_path) as f:\n lines = f.readlines()\n for line in lines:\n setids.append(line.strip())\n return setids", "title": "" } ]
[ { "docid": "c971932543eade088f65a4f8fe01c226", "score": "0.7555017", "text": "def get_all_cardset():\n setids = get_setids(default_setids_path)\n for setid in setids:\n save_cardset(setid)", "title": "" }, { "docid": "91fe7c508e52bddd80006d26d0258794", "score": "0.6444697", "text": "def get_set_list(self):\r\n return self._SETIDS.keys()", "title": "" }, { "docid": "a741f74ce92a41eab7c21742f2ac3034", "score": "0.61599535", "text": "def get_sets(self, identifier):\r\n # This provider does not use sets.\r\n return []", "title": "" }, { "docid": "90663d2240292395f4aca2e65dd13b4c", "score": "0.6117517", "text": "def find_sets(self, cards_per_set):\n def is_set(cards):\n \"\"\"\n A set consists of n cards in which each feature or value\n associated with a dimension is EITHER the same on each card OR is\n different on each card.\n\n :param cards: collection of n cards\n :return: whether the cards are considered a set\n \"\"\"\n for d in range(self.dimensions):\n dimension_values = [c[d] for c in cards]\n\n if sum(1 for v in dimension_values if v is cards[0][d]) == len(cards):\n # all values in dimension are same\n continue\n\n if len(dimension_values) is len(set(dimension_values)):\n # all values in dimension are different\n continue\n\n return False\n return True\n\n if len(set(self.cards)) is not len(self.cards):\n raise Exception('Invalid input, cards differ in dimension count')\n\n sets = [c for c in combinations(self.cards, cards_per_set) if is_set(c)]\n sets_by_index = list()\n for s in sets:\n sets_by_index.append([self.cards.index(c) for c in s])\n return sets_by_index", "title": "" }, { "docid": "d4c46e919ad123b9657ddb4995e64508", "score": "0.60521495", "text": "def get_ids():\n\n # No IDs.\n if not hasattr(cdp, 'spectrum_ids'):\n return []\n\n # Return the IDs.\n return cdp.spectrum_ids", "title": "" }, { "docid": "ec77560eb358a9d2c704cb573c7ddd3c", "score": "0.6031586", "text": "def get_content_id_set() -> dict:\n return requests.get(OFFICIAL_CONTENT_ID_SET_PATH).json()", "title": "" }, { "docid": "1ae8f286065c3d5fabaefcc35c730d27", "score": "0.6016389", "text": "def get_ids(self):\n pass", "title": "" }, { "docid": "2fe5f71303c8c53da472495d32e5a083", "score": "0.59921455", "text": "def getIds():\n connect()\n c.execute('SELECT id FROM standings;')\n rows = c.fetchall()\n ids = []\n for row in rows:\n ids.append(row[0])\n return ids\n db.close", "title": "" }, { "docid": "48702bd22d3e434554caa0d26dff5c36", "score": "0.59816754", "text": "def checkid(self):\r\n print(\"Here are the ids of all Pad objects: {}\".format(self._ids))\r\n return self._ids", "title": "" }, { "docid": "dec35a99aad80de3594d9056a0314199", "score": "0.59169143", "text": "def fetchIds(self):\n\t\ttry:\n\t\t\treturn list(map(lambda resource: resource['resource']['id'], requests.get(self.url+'?_count=500').json()['entry']))\n\t\texcept:\n\t\t\tprint('Given API does not match FHIR schema. To see your endoint ids edit fetchIds() function.')", "title": "" }, { "docid": "7666026e5ae3f4b90f0b5df9eefdcad2", "score": "0.5870845", "text": "def generate_container_ids():\n return CONTAINER_IDS", "title": "" }, { "docid": "a215453a6d1f3d848719014592a8463f", "score": "0.58688176", "text": "def allcinema_id(self):\n return self._allcinema_id", "title": "" }, { "docid": "b6ade6caf7083885dc539acd04ad43d1", "score": "0.5842509", "text": "def get_question_ids(conn):\n cur = conn.cursor()\n cur.execute(\"SELECT id FROM qac\")\n ids = [item[0] for item in cur.fetchall()]\n return ids", "title": "" }, { "docid": "072252a0056826b86222842a2a84a84b", "score": "0.5832944", "text": "def iter_card_sets(self) -> bool:\n for card_set in self.__root_package.all_card_sets():\n yield card_set", "title": "" }, { "docid": "f3ac911832a47084f0f0f3633c60cb8e", "score": "0.58253986", "text": "def build_configuration_set_ids(self):\n return self._build_configuration_set_ids", "title": "" }, { "docid": "218746950ffdb9f1139a7ce55b28d49a", "score": "0.5804887", "text": "def ids(self) -> Set[ItemId]:", "title": "" }, { "docid": "f1931e4889b7bd3899936324829d05b1", "score": "0.5773256", "text": "def ids(self):\n # Get a list of the datasets that this channel has\n return [ds.id_ for ds in self.datasets]", "title": "" }, { "docid": "944de24ade17125974b3740811be1485", "score": "0.5767388", "text": "def generate_all_pairs(set_of_cards):\n pairs_of_cards = []\n for pair in itertools.combinations(set_of_cards, 2):\n pairs_of_cards.append(pair)\n return pairs_of_cards", "title": "" }, { "docid": "c6c1a4aa4d4742b1ffc8c06427b0fd72", "score": "0.57107913", "text": "def set_list_of_bitsets(bit_df):\n global list_of_bitsets\n list_of_bitsets = bit_df['VarId'].unique()", "title": "" }, { "docid": "6a8b7016c7cde54e638698ce30de4f62", "score": "0.570224", "text": "def make_id_set(feature_list):\n return set((x['id'] for x in feature_list))", "title": "" }, { "docid": "bbca2ff522401a96d705138496adade9", "score": "0.56943476", "text": "def createIdList(dataset):\n idlist = []\n for item in dataset:\n idlist.append(int(item[0]))\n return idlist", "title": "" }, { "docid": "1343080127c6adc75125ee1fdd6df02e", "score": "0.5683568", "text": "def object_ids(self) -> List[int]:\n return [int(x) for x in self._extract_set(\"id\")]", "title": "" }, { "docid": "1526c9ec76ff71d1663db564006bec9c", "score": "0.5680549", "text": "def _generate_set_hand(self, card_list):\n for card in card_list:\n self.cards.append(card)\n\n self._generate_card_data_list()", "title": "" }, { "docid": "7e2761b190c9ad8eacea922efb6484c9", "score": "0.5653454", "text": "def get_id_list() -> list:\n suffix_list = [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"_neg\", \"PASS\", \"PASS_neg\"]\n tense_list = [\"present{}\", \"conditional{}\", \"imperative{}\", \"imperative{}_neg\",\n \"perfect{}\", \"past{}\", \"pluperfect{}\", \"conditional_perfect{}\",\n \"quotative{}\", \"quotative_perfect{}\", \"jussive{}\", \"jussive_perfect{}\"]\n\n def add_conjugations_ids(_tense: str):\n for s in suffix_list:\n result.append(_tense.format(s))\n\n result = []\n for tense in tense_list:\n add_conjugations_ids(tense)\n\n result.append(\"usage-info\")\n result.append(\"ma_infinitive\")\n result.append(\"da_infinitive\")\n result.append(\"participle_past_active\")\n result.append(\"participle_past_passive\")\n result.append(\"quotative\")\n result.append(\"quotative_perfect\")\n result.append(\"jussive\")\n result.append(\"jussive_perfect\")\n result.append(\"des_form\")\n result.append(\"ma_inessive\")\n result.append(\"ma_elative\")\n result.append(\"ma_translative\")\n result.append(\"ma_abessive\")\n result.append(\"participle_present_active\")\n result.append(\"participle_present_passive\")\n return result", "title": "" }, { "docid": "7be8b049b5c947ada928793679b47623", "score": "0.5645947", "text": "def ids(self):\n return self._ids", "title": "" }, { "docid": "87bb5ff80b805727ac0cf15775f247fc", "score": "0.56342447", "text": "def count_sets(cards):\n if len(cards) != 12:\n raise ValueError(\"there are not exactly 12 cards\")\n if len(set(cards)) != 12:\n raise ValueError(\"the cards are not all unique\")\n for card in cards:\n if len(card) != 4:\n raise ValueError(\"one or more cards does not have exactly 4 digits\")\n for i in set(''.join(cards)):\n if int(i) not in (0, 1, 2):\n raise ValueError(\"one or more cards has a character other than 0, 1, or 2\")\n count = 0\n for combo in combinations(cards, 3):\n if is_set(combo[0], combo[1], combo[2]):\n count += 1\n return count", "title": "" }, { "docid": "469a14010efc6711e987bbd1d73f4dc4", "score": "0.56337255", "text": "def getSynsetIds(self, lemma, lang='EN'):\n response = requests.get(f\"https://babelnet.io/v5/getSynsetIds?lemma={lemma}&searchLang={lang}&key={self.key}\")\n return response.json()", "title": "" }, { "docid": "1f0fd5bd3477733020be983c8a261412", "score": "0.5607452", "text": "def codeSetIdentifiers():", "title": "" }, { "docid": "8381c99de11f242db0b61b61e241b76d", "score": "0.55854505", "text": "def list_imdbs():\n return list(__sets.keys())", "title": "" }, { "docid": "5221c6efaa98e54a319ff65306ad8d8a", "score": "0.55848783", "text": "def card_numbers(self):\n return [c.card for c in self.cards]", "title": "" }, { "docid": "b79e2c89372868ab8a21122965e6605d", "score": "0.5583825", "text": "def list_imdbs():\n return __sets.keys()", "title": "" }, { "docid": "b79e2c89372868ab8a21122965e6605d", "score": "0.5583825", "text": "def list_imdbs():\n return __sets.keys()", "title": "" }, { "docid": "1d131d14bd0f7ae0d8656026ceb607db", "score": "0.5580719", "text": "def incl_lctn_ids():\n return list(lctn_to_qid_dict().values())", "title": "" }, { "docid": "f443d4f713406a1c8b30b1c64d5174dc", "score": "0.5550369", "text": "def getArrayIDs(vis):\n if (not os.path.exists(vis)):\n print(\"Could not find measurement set\")\n return\n mytb = createCasaTool(tbtool)\n mytb.open(vis)\n arrayid = list(np.unique(mytb.getcol('ARRAY_ID')))\n mytb.close()\n return(arrayid)", "title": "" }, { "docid": "e2d381c21dbb50ef66313d95b75ed891", "score": "0.5548152", "text": "def _generate_card_data_list(self):\n\n # IDEA: cast the lists into sets for evaluation.\n self.value_list = set([card.value for card in self.cards])\n self.suit_list = set([card.suit for card in self.cards])", "title": "" }, { "docid": "a98b4e53ec8f597e521a6b693520dd49", "score": "0.5542943", "text": "def get_ids(self) -> List[str]:\n ids = set()\n\n if self.options[\"ae_id\"]:\n ids.update(self.options[\"ae_id\"])\n\n if self.options[\"ae_ids_file\"]:\n with open(self.options[\"ae_ids_file\"]) as ae_ids_file:\n ids.update((ae_id.strip() for ae_id in ae_ids_file.readlines()))\n\n return sorted(ids)", "title": "" }, { "docid": "d4de35271a8350cd3c27fe09c7fe27f9", "score": "0.5532444", "text": "def getTestIds(self, theme: str) -> list:\n testIdDict = {'gml': [\"EID61070ae8-13cb-4303-a340-72c8b877b00a\",\"EID09820daf-62b2-4fa3-a95f-56a0d2b7c4d8\",\"EID499937ea-0590-42d2-bd7a-1cafff35ecdb\",\"EID63f586f0-080c-493b-8ca2-9919427440cc\"],\n 'plu': [\"EIDeefb2267-a0ca-40b4-87ee-a286ff6dd97f\",\"EID9251e31c-1318-4f52-afe5-900eb16f5647\",\"EIDa4bf4091-b26d-4e13-ab94-4d26ea10a625\",\"EIDda4c0f98-f97a-44ad-9366-cef577cf809a\"]}\n \n return testIdDict[theme]", "title": "" }, { "docid": "4c86fee600e99163f8d79dee9a260168", "score": "0.55179787", "text": "def get_ids(self) -> List[str]:\n ids = set()\n\n if self.options[\"gpl_id\"]:\n ids.update(self.options[\"gpl_id\"])\n\n if self.options[\"gpl_ids_file\"]:\n with open(self.options[\"gpl_ids_file\"]) as gpl_ids_file:\n ids.update((gpl_id.strip() for gpl_id in gpl_ids_file.readlines()))\n\n return sorted(ids)", "title": "" }, { "docid": "ac8f223b1ea665344a402df169734f33", "score": "0.54772544", "text": "def fetch_cards(set_url):\n resp = requests.get(set_url)\n html = lxml.html.fromstring(resp.content)\n cards = html.cssselect('ul.index li')\n\n # Get card names and image urls\n return [(\n c.attrib['id'],\n c.cssselect('img')[0].attrib['data-original']\n ) for c in cards]", "title": "" }, { "docid": "60a5b49c9e875fda00175e5e41a68a5a", "score": "0.54759395", "text": "def get_all_customer_ids():\n\n # your code", "title": "" }, { "docid": "8dcae2350897b70668311e6f78056b40", "score": "0.5461868", "text": "def getAllChipIDs(device, gtx, mask=0xff000000, debug=False):\n chipID0s = readAllVFATs(device, gtx, \"ChipID0\", mask, debug)\n chipID1s = readAllVFATs(device, gtx, \"ChipID1\", mask, debug)\n ##make unknown chips report 0xdead\n return dict(map(lambda slotID: (slotID, (((chipID1s[slotID])&0xff)<<8)|(chipID0s[slotID]&0xff)\n #if (((chipID1s[slotID]>>16)&0xffff) != ((0x050<<4)+(slotID))) else 0xdead),\n if (((chipID1s[slotID]>>16)&0xffff) == 0x0000) else 0xdead),\n range(0,24)))", "title": "" }, { "docid": "35786eb5784c58d497cbef29ac6b3666", "score": "0.5459643", "text": "def generate_clinic_ids():\n pass", "title": "" }, { "docid": "366cb8fbd67de841fd7410bf6efcc98c", "score": "0.54566157", "text": "def get_species_ids(self):\n return [s.id for s in self.species]", "title": "" }, { "docid": "b4452427a70c0e16975691b95b1f6313", "score": "0.5450289", "text": "def _get_ids_from_hostname(self, hostname):\n results = self.list_hardware(hostname=hostname, mask=\"id\")\n return [result['id'] for result in results]", "title": "" }, { "docid": "9523a785b0174276ebc03b5a0ef97295", "score": "0.54420626", "text": "def get_all_sets(self):\n if self.fullset:\n return [self.fullset] + self.incset_list\n else:\n return self.incset_list", "title": "" }, { "docid": "0786eed6719594397e371461e113605a", "score": "0.54410845", "text": "def profile_ids():", "title": "" }, { "docid": "2d0bcd68dd7904d6b39c444205d884dc", "score": "0.54282206", "text": "def get_commune_ids(self):\r\n\t\treturn [rec.id_com for rec in self]", "title": "" }, { "docid": "45ff6abce875d54e93d7ea0a968e2342", "score": "0.5427406", "text": "def allCodes(codeSet):", "title": "" }, { "docid": "45ff6abce875d54e93d7ea0a968e2342", "score": "0.5427406", "text": "def allCodes(codeSet):", "title": "" }, { "docid": "35f09f821d98b49cf3af349a39a41d24", "score": "0.54105866", "text": "def get_all_simus_id(dbpath):\n ids = []\n with tables.openFile(dbpath) as db:\n for g in get_first_level_groups(db.root):\n ids.append((g._v_attrs['time'], g._v_attrs['uuid']))\n return ids", "title": "" }, { "docid": "9f2e7daaea3fcf3e5868dde2f396e151", "score": "0.54038924", "text": "def get_set(set_code: str) -> List[Dict[str, Any]]:\n LOGGER.info(f\"Downloading set {set_code} information\")\n set_api_json: Dict[str, Any] = download(SCRYFALL_API_SETS + set_code)\n if set_api_json[\"object\"] == \"error\":\n if not set_api_json[\"details\"].startswith(\"No Magic set found\"):\n LOGGER.warning(f\"Set api download failed for {set_code}: {set_api_json}\")\n return []\n\n # All cards in the set structure\n scryfall_cards: List[Dict[str, Any]] = []\n\n # Download both normal card and variations\n for cards_api_url in [\n set_api_json.get(\"search_uri\"),\n SCRYFALL_VARIATIONS.format(set_code),\n ]:\n # For each page, append all the data, go to next page\n page_downloaded: int = 1\n while cards_api_url:\n LOGGER.info(\n f\"Downloading page {page_downloaded} of card data for {set_code}\"\n )\n page_downloaded += 1\n\n cards_api_json: Dict[str, Any] = download(cards_api_url)\n if cards_api_json[\"object\"] == \"error\":\n if not cards_api_json[\"details\"].startswith(\"Your query didn’t match\"):\n LOGGER.warning(f\"Error downloading {set_code}: {cards_api_json}\")\n break\n\n # Append all cards on this page\n for card_obj in cards_api_json[\"data\"]:\n scryfall_cards.append(card_obj)\n\n # Go to the next page, if it exists\n if not cards_api_json.get(\"has_more\"):\n break\n\n cards_api_url = cards_api_json.get(\"next_page\")\n\n # Return sorted by card name, and by card number if the same name is found\n return sorted(\n scryfall_cards, key=lambda card: (card[\"name\"], card[\"collector_number\"])\n )", "title": "" }, { "docid": "9b139a751d3e486d600ae1fd27eede1f", "score": "0.5391465", "text": "def get_all_testcases(self, testset_id):", "title": "" }, { "docid": "88a678ba5237c3f669983712b198b027", "score": "0.5375534", "text": "def _get_option_ids(self):\n qry = self.options.values_list('option_group__id', 'value').order_by('option_group')\n ret = [make_option_unique_id(*v) for v in qry]\n return sorted_tuple(ret)", "title": "" }, { "docid": "b4d61a77628b73b8fa3220ed1c2462aa", "score": "0.53590775", "text": "def _get_plugin_id_set(plugin_info_list):\n return {plugin_info.id for plugin_info in plugin_info_list}", "title": "" }, { "docid": "460d8b99b54337e8eb4d84f83ecc8f7b", "score": "0.5354307", "text": "def get_group_ids():\n pattern = re.compile(r'\\d{6,}')\n group_ids = set()\n group_id = raw_input(\"Enter group id(s):\\n\")\n while group_id not in ['', 'done', 'exit']:\n current_ids = pattern.findall(group_id) # Find all group ids entered\n map(lambda x: group_ids.add((x, 0)), current_ids) # add them to set. 0 is to indicate to parse everything\n group_id = raw_input(\"Enter group id(s): \")\n \n return group_ids", "title": "" }, { "docid": "d2465d4ae061153bcd35220fe939c6b2", "score": "0.53469145", "text": "def ids(self):\n return self[\"ids\"]", "title": "" }, { "docid": "ff9bd5c8091cb184ab893675c287e052", "score": "0.5344109", "text": "def sunetids(self):\n\n ids = (self.sunetid1, self.sunetid2, self.sunetid3,\n self.sunetid4, self.sunetid5, self.sunetid6)\n return [s for s in ids if s]", "title": "" }, { "docid": "5fc3753b9f57765545f84c83f81bb065", "score": "0.53337735", "text": "def get_all_atIDs( self ):\n all_entries = self.get_all()\n all_atom_ids = [res[\"atID\"] for res in all_entries]\n return all_atom_ids", "title": "" }, { "docid": "a5b41bbd442634bf78377f95fb46078e", "score": "0.5328403", "text": "def _get_derived_set(index: Index, id_: UUID) -> Set[Dataset]:\n derived_set = {cast(Dataset, index.datasets.get(id_))}\n to_process = {id_}\n while to_process:\n derived = index.datasets.get_derived(to_process.pop())\n to_process.update(d.id for d in derived)\n derived_set.update(derived)\n return derived_set", "title": "" }, { "docid": "21b642d315e07331a45956f8e37a35d5", "score": "0.5327553", "text": "def all_cards_creation(self):\r\n all_cards = []\r\n for face_card in self.face_cards:\r\n for suit in self.suits:\r\n all_cards.append((suit, face_card))\r\n return all_cards", "title": "" }, { "docid": "6dad4a62d7769289250d780055ee7cea", "score": "0.5325888", "text": "def get_all_customer_ids_from_table(table):\n\n customer_ids = set({})\n for row in table:\n customer_ids.add(str(row[6]))\n return customer_ids", "title": "" }, { "docid": "f762c8f829890be85f5f8a3383726600", "score": "0.5313835", "text": "def find_camera_ids():\n\n # Initialise the ids set\n camera_ids = set()\n\n # Create a new video capture instance\n temp = VideoCapture()\n\n # Crawl over the possible ids\n for camera_id in range(CAMERA_CAPTURES_COUNT):\n\n # Assign a new id to the video capture\n temp.open(camera_id)\n\n # Fetch the frame\n _, frame = temp.read()\n\n # If the frame is valid\n if frame is not None:\n\n # Add the id to the set\n camera_ids.add(camera_id)\n\n # Release the current camera id\n temp.release()\n\n # Remove the video capture instance\n del temp\n\n # Return the final result\n return camera_ids", "title": "" }, { "docid": "f478d81e42438fdfde300af37384e8e8", "score": "0.5313593", "text": "def reserve_ids(self, collection: str, amount: int) -> List[int]:", "title": "" }, { "docid": "d718d630cf89afd43143bb9810a6e7f8", "score": "0.53135663", "text": "def generate_numbers_set():\n return set(random.sample(range(1, 46), 6))", "title": "" }, { "docid": "db76fde91194a0c7a671bf93cab208d9", "score": "0.5303777", "text": "def cidrs(self) -> Sequence[str]:\n return pulumi.get(self, \"cidrs\")", "title": "" }, { "docid": "4a6c4888235d4919c89c2c852459dc46", "score": "0.5298426", "text": "def ids(self) -> Sequence[str]:\n return pulumi.get(self, \"ids\")", "title": "" }, { "docid": "4a6c4888235d4919c89c2c852459dc46", "score": "0.5298426", "text": "def ids(self) -> Sequence[str]:\n return pulumi.get(self, \"ids\")", "title": "" }, { "docid": "4a6c4888235d4919c89c2c852459dc46", "score": "0.5298426", "text": "def ids(self) -> Sequence[str]:\n return pulumi.get(self, \"ids\")", "title": "" }, { "docid": "4a6c4888235d4919c89c2c852459dc46", "score": "0.5298426", "text": "def ids(self) -> Sequence[str]:\n return pulumi.get(self, \"ids\")", "title": "" }, { "docid": "8fa69d72bd7ed5ccdbedfe43e1a65f4b", "score": "0.5293125", "text": "def test_biomolecules_id_get(self):\n pass", "title": "" }, { "docid": "6c4373e043d798fa6d672958eb2309f5", "score": "0.52885914", "text": "def get_n_sets(self):\r\n return len(self._SETIDS)", "title": "" }, { "docid": "6db8fb2722fe56d776c3d252d7972a7b", "score": "0.52768517", "text": "def objectIds(type=None):", "title": "" }, { "docid": "e37f943014f99f04045c8fe6b6e0238a", "score": "0.52761775", "text": "def all_sets_on_table(table):\n sets = []\n for card1 in table:\n for card2 in table[table.index(card1)+1:]:\n for card3 in table[table.index(card2)+1:]:\n if is_set(card1, card2, card3):\n print is_set(card1,card2,card3)\n print [pretty_card(card1), pretty_card(card2), pretty_card(card3)]\n sets.append([card1, card2, card3])\n if not sets:\n return False\n else:\n prettysets = []\n for _set in sets:\n prettyset = []\n for card in _set:\n prettyset.append(pretty_card(card))\n prettysets.append(prettyset)\n return prettysets", "title": "" }, { "docid": "68ae314772f9d12bad3d73a045e09b37", "score": "0.52677315", "text": "def get_all_unit_ids(self) -> [str]:\n all_units_ids = []\n for id in self.units:\n all_units_ids.append(id)\n return all_units_ids", "title": "" }, { "docid": "d781ffe9c39824f33acfe661d2a0d8b9", "score": "0.5266597", "text": "def getSubIds():\n\n returnList = []\n subs = Golfer.objects.all().filter(team=0).values('name', 'id')\n\n for sub in subs:\n returnList.append({'name': sub['name'], 'id': sub['id']})\n\n return returnList", "title": "" }, { "docid": "4740ab5f7d40364d6f349adb1d790128", "score": "0.5252688", "text": "def idmap():\n return _INSTANCE.idmap", "title": "" }, { "docid": "ee92b157489200d86f61b514894b6a13", "score": "0.5250147", "text": "def get_all_config_id(self):\n\n return self.query_all(f\"\"\"\n SELECT DISTINCT config_id\n FROM config;\"\"\")", "title": "" }, { "docid": "e96ff5c1c5df069b9210a59adcf4bc9b", "score": "0.5232603", "text": "def nuclide_set(self):\n return self._argument.nuclide_set", "title": "" }, { "docid": "52dcacb8d45f7b0ac116e14bfc61d475", "score": "0.5229126", "text": "def get_sets (self):\n raise NotImplementedError", "title": "" }, { "docid": "99ad613f354df58916a716ac99e6662c", "score": "0.5228579", "text": "def ids(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"ids\")", "title": "" }, { "docid": "99ad613f354df58916a716ac99e6662c", "score": "0.5228579", "text": "def ids(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"ids\")", "title": "" }, { "docid": "99ad613f354df58916a716ac99e6662c", "score": "0.5228579", "text": "def ids(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"ids\")", "title": "" }, { "docid": "99ad613f354df58916a716ac99e6662c", "score": "0.5228579", "text": "def ids(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"ids\")", "title": "" }, { "docid": "99ad613f354df58916a716ac99e6662c", "score": "0.5228579", "text": "def ids(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"ids\")", "title": "" }, { "docid": "99ad613f354df58916a716ac99e6662c", "score": "0.5228579", "text": "def ids(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"ids\")", "title": "" }, { "docid": "99ad613f354df58916a716ac99e6662c", "score": "0.5228579", "text": "def ids(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"ids\")", "title": "" }, { "docid": "ff260c0eadecfb658c89052eb463f326", "score": "0.5220032", "text": "def content_ids(self):\n if not self.content_id_map:\n return\n if self._cids is None:\n self._load_cids()\n return self._cids", "title": "" }, { "docid": "3c3900ccf577994726265833c719a2c7", "score": "0.5217564", "text": "def build_all_cards(self):\r\n\r\n cards = list()\r\n\r\n for suit in range(0, 4):\r\n for value in range(2, 15):\r\n cards.append(Card(value, suit))\r\n\r\n return cards", "title": "" }, { "docid": "29b8ab86a192dd9f79ae6d36d0b7d9ab", "score": "0.5208064", "text": "def available_identifiers():\n return list(instance_pool.keys())", "title": "" }, { "docid": "27b8089cf96b8d2650fb011f144dca40", "score": "0.52040815", "text": "def getallid(self):\n i = 0\n j = 0\n while True:\n if self.search() == 0:\n break\n if self.crc8(self.newromno) == 0:\n self.romstorage[i] = deepcopy(self.newromno)\n print (\"No. {} ID:{}\".format(i, self.romstorage[i]))\n i += 1\n\n if i == len(self.romstorage):\n print (\"max. Anzahl OneWire Clients\")\n break\n\n else:\n if self.debug:\n print (\"ERR CRC8 Test\")\n raise CRCError()\n if self.debug:\n i = 0\n print (\" \")\n while self.romstorage[i] != None:\n print (str(i) +\": \")\n for j in range(0, len(self.romstorage[i])):\n print(hex(self.romstorage[i][j]), end=\"\")\n i += 1\n print (\" \")\n return self.romstorage", "title": "" }, { "docid": "7ad26bcc39a97f215759053fc3d4e4f9", "score": "0.52009195", "text": "def list_hand_ids():\n\n # Load existing hands.\n client = pymongo.MongoClient()\n db = client[\"bridge_problem_database\"]\n hands_collection = db[\"hands\"]\n\n for hand in hands_collection.find():\n\n print(hand[\"_id\"])\n print(hand[\"question\"])\n print(hand)", "title": "" }, { "docid": "8eea4fc5f8dbca3bd30095520b62b2e9", "score": "0.51987106", "text": "def get_work_ids(input_df: pd.DataFrame) -> list:\n return list(set(input_df[\"work.id\"]))", "title": "" }, { "docid": "81d96c9ced2ca6184ddf96faf18902a6", "score": "0.51612777", "text": "def getCatIds(self, catNms=[], supNms=[], catIds=[]):\n ids1 = super(self.__class__, self).getCatIds(catNms, supNms, catIds)\n ids2 = [0]\n if self.dataset.get('categories2', []) != []:\n ids2 = [cat['id'] for cat in self.dataset['categories2']]\n\n return ids1, ids2", "title": "" }, { "docid": "8f01d626a179cd215065bfed84b61458", "score": "0.5158239", "text": "def getDocIds():", "title": "" }, { "docid": "fe53e07359cc002c5c10ab181d64147d", "score": "0.51547474", "text": "def create_cards():\r\n return [ suit + rank for suit in \"CDHS\" for rank in \"A23456789TJQK\" ]", "title": "" }, { "docid": "ca157061d4d1ac49fbde0cacf7b28842", "score": "0.5152769", "text": "def lego_sets():\n # you must replace this line and return your own list\n return res_sets", "title": "" }, { "docid": "178f66bb4ee5c46615fc8a829397352d", "score": "0.5149913", "text": "def getRecipeIds(self):\n url = apiBaseUrl + apiEndpoints[\"Recipes\"]\n params = {\n \"lang\": apiLanguage,\n }\n response = self.httpSession.get(url=url, params=params)\n return response.json()", "title": "" }, { "docid": "6a38b36ccd2099b955e46f2062f6c951", "score": "0.514548", "text": "def sq_abstract(self):\n ids = sorted(self.identifiers())\n retval = []\n for k, g in groupby(ids, lambda x: x.split('-')[0]):\n seq = sorted(g, key=lambda x: int(x.split('-')[1]))\n if len(seq) > 1:\n retval.append(seq)\n \n return retval", "title": "" }, { "docid": "6d6dbd2f7f2425fe27d827388bb4950f", "score": "0.5136878", "text": "def generate_set_l2_general(ident,size=13):\n assert ident >= 0\n assert ident < 256\n l=[]\n k=0\n for x in range(size):\n k+=1\n l.append(k * 2**16 + ident)\n assert len(l) == size\n return l", "title": "" }, { "docid": "6dfa1ea39e675e8f5bb7cf409e5f8335", "score": "0.51348805", "text": "def gen_all_cards():\r\n for s in gen_all_suits():\r\n for r in gen_all_ranks():\r\n yield r + s\r\n yield \"SJ\"\r\n yield \"BJ\"", "title": "" } ]
d56d27a04dffa32334af0fddef254c07
Adds a player to the tournament database. The database assigns a unique serial id number for the player. (This should be handled by your SQL database schema, not in your Python code.)
[ { "docid": "5b2eaed157be9880ea84a10d5e11cd85", "score": "0.75457585", "text": "def registerPlayer(name, tournament = False):\n\n return execQueryAndCommit(\n \"INSERT INTO player (t_id, name) VALUES (%s,%s) RETURNING p_id\",\n **{\n \"parms\": (tournament, name),\n \"tournament\": tournament\n }\n )", "title": "" } ]
[ { "docid": "b28344858e63ea673aaee173313c955a", "score": "0.82379323", "text": "def addPlayerToDb(self,player):\r\n try:\r\n print(\"[SQL] inserting \"+player.name)\r\n string='INSERT INTO users(name,phone,email,points,timeRoot,timeUser) VALUES(\"{}\",{},\"{}\",{},\"{}\",{})'.format(player.name,player.phone,player.email,0,0,0)\r\n cursor= self.getCursor()\r\n cursor.execute(string)\r\n cursor.execute('select id from users where name = \"{}\"'.format(player.name))\r\n player.id = cursor.fetchall()\r\n self.db.commit()\r\n cursor.close()\r\n except self.db.Error as er:\r\n print(\"[SQL ERROR] \"+er.message)", "title": "" }, { "docid": "4662eeb3d939fba6b9d467106c7e63b3", "score": "0.79493356", "text": "def registerTournamentPlayer(tournament, player):\n db = connect()\n cursor = db.cursor()\n query = \"INSERT INTO tournaments_players (tournament, player) VALUES (%s, %s)\"\n cursor.execute(query, (tournament, player))\n db.commit()\n db.close()", "title": "" }, { "docid": "f8c33b926777cb63e903d98339a03693", "score": "0.79327154", "text": "def addPlayerToTournament(player_id, tournament):\n db, cursor = connect()\n player_id = bleach.clean(player_id)\n tournament = bleach.clean(tournament)\n add_player_to_tournament_query = \"INSERT INTO players_in_tournaments(player_id, tournament_id) VALUES(%s, %s)\"\n cursor.execute(add_player_to_tournament_query,\n (player_id, tournament))\n db.commit()\n db.close()", "title": "" }, { "docid": "cbb3ee4dd040d4a72b7c708bb08accb7", "score": "0.773976", "text": "def registerPlayer(name):\n tournament = getCurrentTournamentId()\n\n db = connect()\n cursor = db.cursor()\n cursor.execute('INSERT INTO players (name, tournament) VALUES (%s, %s)', (name, tournament))\n db.commit()\n db.close()", "title": "" }, { "docid": "96f8eddee865af48618f0b3efbe168b4", "score": "0.76891655", "text": "def registerPlayer(name, tournament_id=-1):\n\n (DB, cursor) = connect(caller=\"COUNT PLAYERS\")\n\n if DB:\n try:\n if tournament_id == -1:\n cursor.execute('INSERT INTO players (name, tournament) VALUES (%s, DEFAULT) RETURNING id;', (name, ))\n else:\n cursor.execute('INSERT INTO players (name, tournament) VALUES (%s, %s) RETURNING id;', (name, tournament_id))\n\n id_of_new_row = cursor.fetchone()[0]\n\n DB.commit()\n\n #cursor.execute('SELECT LASTVAL()')\n #id_of_new_row = cursor.fetchone()[0]\n\n cursor.close()\n DB.close()\n\n print \"\\tRESISTER PLAYER: '{name}' was succesfully added as player {id}.\".format(name=name, id=id_of_new_row)\n\n except Exception as e:\n print \"\\tREGISTER PLAERY: An error occured trying to create a player named {name}!\".format(name=name)\n print \"\\t\", e\n\n return id_of_new_row\n else:\n return None", "title": "" }, { "docid": "530f5d697b210068f57cd89646223392", "score": "0.762446", "text": "def registerPlayerInTournament(player_id, tournament_id=0):\n\n query = \"INSERT INTO registry (tournament_id, player_id) VALUES (%s, %s);\"\n\n conn, cur = connect()\n\n cur.execute(query, (tournament_id, player_id))\n conn.commit()\n\n cur.close()\n conn.close()", "title": "" }, { "docid": "61030c46ee70e3167ccfb4f75637becc", "score": "0.7532352", "text": "def registerPlayer(name):\n # takes in a name and registers a player\n DB = connect()\n c = DB.cursor()\n c.execute(\"INSERT INTO players (name) VALUES (%s)\", (name,))\n DB.commit()\n DB.close()\n print(\"A new player named {} joined the tournament\".format(name))", "title": "" }, { "docid": "3ebf64b34d7e4e255485796e65ed7c78", "score": "0.7524633", "text": "def register_player(player_name, tournament_id = None):\n c = connect().cursor()\n # first we need to find an open tournament to register the player into\n\n if tournament_id is None or type(tournament_id) != int:\n tournament_id = globals()['tournaments'].get_tournament()\n\n # We will use the tournament passed in by user, but first we'll check\n # because we can't allow registration into tournaments which have\n # begun already.\n c.execute(\n \"\"\"select count(*) from matches where tournament_id = %s\"\"\"\n % (tournament_id,))\n if int(c.fetchone()[0]) > 0:\n raise CheaterException\n\n # insert player into tournament\n c.execute(\"\"\"\n insert into players (player_name, tournament_id) values (%s, %s) returning player_id\"\"\", (player_name, tournament_id))\n player_id = int(c.fetchone()[0])\n commit(c)\n return player_id", "title": "" }, { "docid": "7894f3cbaaa0f65cb7cde6af44804fbe", "score": "0.7455049", "text": "def add_to_db(self):\n conn = tournament.connect()\n cursor = conn.cursor()\n statement = \"INSERT INTO match (player1_id, player2_id, tourney_id, round, player1_score, player2_score, ties) VALUES (%s, %s, %s, %s, %s, %s, %s)\"\n data = (self.player1_id, self.player2_id, self.tourney_id, self.round, self.player1_score, self.player2_score, self.ties)\n cursor.execute(statement, data)\n conn.commit()\n conn.close()\n return", "title": "" }, { "docid": "1ec2b98fce06691b16a488bdd0e2cf11", "score": "0.74520063", "text": "def create_player():\n\n player = {\n 'name': unique_id('test-player-')\n }\n\n backend.database.player.upsert([player])\n return player['name']", "title": "" }, { "docid": "0a294d971808d852c0420c2f6c450fd3", "score": "0.74282956", "text": "def addPlayer(username, cash):\n DB = connect()\n c = DB.cursor()\n # The player is added to the database and given $100:\n c.execute(\"insert into players values (%s, %s);\", \n (username, cash,))\n \n # Give the player a holding in each candidate:\n candidates = getCandidateList()\n for candidate in candidates:\n c.execute(\"INSERT into positions (owner, candidate, quantity) values (%s, %s, %s);\", (username, candidate[0], 1,))\n\n DB.commit()\n DB.close()", "title": "" }, { "docid": "85c177644190cd80afd48f47a3a691cb", "score": "0.73719746", "text": "def registerPlayer(name, tournament_id=0):\n\n registerPlayerInTournament(addPlayer(name), tournament_id)", "title": "" }, { "docid": "f64a2048b383b56ad3452a960596df83", "score": "0.73597354", "text": "def registerPlayer(name, t_id):\n p_id = execute_query(['INSERT INTO players\\\n (name) VALUES (%s) RETURNING id'],\n [(bleach.clean(name),)], 1)\n\n execute_query(['INSERT INTO scorecard\\\n (tournament,player,score,played,bye)\\\n VALUES (%s,%s,%s,%s,%s)'],\n [(bleach.clean(t_id), p_id[0], 0, 0, 0)])", "title": "" }, { "docid": "1db5947d0d1b7b97d7cecc9c00a4ab88", "score": "0.7301643", "text": "def test_add_player(self):\n g = Game(players=['id1', 'id2'])\n db.session.add(g)\n db.session.commit()\n\n new_id = g.add_player()\n\n db.session.flush()\n\n self.assertListEqual(g.players, ['id1', 'id2', new_id])", "title": "" }, { "docid": "56a5378a6c090091bd1f1adda2b98ba2", "score": "0.72928685", "text": "def registerPlayer(name, tourn_id=1):\n\n conn = connect()\n c = conn.cursor()\n if tourn_id == 2:\n query = \"INSERT INTO players_2 (name) VALUES (%s);\"\n else:\n query = \"INSERT INTO players (name) VALUES (%s);\"\n c.execute(query, (name,))\n conn.commit()\n conn.close()", "title": "" }, { "docid": "363451f1be9707d9e40e73aa0d796d14", "score": "0.7275994", "text": "def addplayer(self, player):\n self.roster[player.seed] = player", "title": "" }, { "docid": "4ab1d290521a093bc6b426c3d394007d", "score": "0.7264944", "text": "def registerPlayer(playerName):\n\tdb, cursor = connect()\n\tquery = \"\"\"\n\tINSERT INTO players (name) VALUES (%s)\n\t\"\"\"\n\tparameter = (playerName,)\n\tcursor.execute(query, parameter)\n\tdb.commit()\n\tdb.close()", "title": "" }, { "docid": "53c4c48d2a51c05141a317db999b3229", "score": "0.7245547", "text": "def add_player():\n\n player_id = request.form['player_id']\n name = request.form['name']\n position = request.form['position']\n team = request.form['team']\n adp = request.form['adp']\n high = request.form['high']\n low = request.form['low']\n stdev = request.form['stdev']\n bye = request.form['bye']\n\n \n\n player = Players(player_id=player_id, name=name, position=position, team=team, adp=adp, high=high, low=low, stdev=stdev, bye=bye)\n db.session.add(player)\n db.session.commit()\n\n return redirect(f\"/list\")", "title": "" }, { "docid": "670020439ffd6207a3c4cddbd63c22f5", "score": "0.7217887", "text": "def registerPlayer(name):\n\n # Get or create the current tournament\n tournament_id = __get_current_tournament()\n if not tournament_id:\n __commit_and_close(\"INSERT INTO tournaments (is_won) VALUES (FALSE);\")\n tournament_id = __get_current_tournament()\n\n # Register this new player to the current tournament\n __commit_and_close(\"INSERT INTO players (name, tournament_id) VALUES (%s, %s)\", (name, tournament_id))", "title": "" }, { "docid": "253537fbd2dc8379a2558d3350126e06", "score": "0.72004664", "text": "def registerPlayer(name):\n conn, c = getConnCursor()\n\n # Add the player providing the name\n c.execute(\"INSERT INTO players (name) VALUES (%s);\", (name,))\n conn.commit()\n conn.close()", "title": "" }, { "docid": "74f61f08ba49a1fb8390a214d4f3ee1d", "score": "0.7168091", "text": "def save_player(self):\n if players_table.get(PlayerQuery.name == self.name) is None:\n players_table.insert(self.serialize())\n return True\n else:\n print(\"Player already existing in the database - Resubmit current player\")\n return False", "title": "" }, { "docid": "121f6d3afcd859efd8077028b73f49cd", "score": "0.715388", "text": "def set_new_player(player_id, playername, position, club_id):\n c = Club.query.filter_by(id=club_id).first()\n p = Player(id=player_id, name=playername, position=position, club=c)\n db.session.add(p)\n db.session.commit()\n return p", "title": "" }, { "docid": "a10f9c175151dbda880318425c7fd999", "score": "0.7120996", "text": "def registerPlayer(name):\n DB = connect()\n c = DB.cursor()\n c.execute(\"INSERT INTO players \"\n \"(name) VALUES (%s);\" , (name,) )\t\n DB.commit()\n DB.close()", "title": "" }, { "docid": "c2b81cc046ce54770741d2f2c2198bb6", "score": "0.7089735", "text": "def registerPlayer(name, team_id):\n # Splitting the full name in first name and last name\n full_name = name.split()\n first_name = full_name[0]\n\n if len(full_name) == 2:\n last_name = full_name[1]\n query = \"\"\"INSERT INTO swiss_tournament.player_info( first_name, last_name, team_id)\n VALUES ( %s, %s, %s ) RETURNING id;\"\"\"\n data = (first_name, last_name, team_id)\n else:\n query = \"\"\"INSERT INTO swiss_tournament.player_info( first_name, team_id)\n VALUES ( %s, %s) RETURNING id;\"\"\"\n data = (first_name, team_id)\n\n conn = connect()\n cur = conn.cursor()\n cur.execute(query, data)\n player_id = cur.fetchone()[0]\n conn.commit()\n cur.close()\n conn.close()\n query = \"\"\"INSERT INTO\n swiss_tournament.player_stats(id,wins,losses,draws,total_points)\n VALUES (%s, %s, %s, %s, %s);\"\"\"\n data = (player_id, 0, 0, 0, 0)\n conn = connect()\n cur = conn.cursor()\n cur.execute(query, data)\n conn.commit()\n cur.close()\n conn.close()", "title": "" }, { "docid": "79e8138052fb6e0ec476dcad7e5e0ec0", "score": "0.7081572", "text": "def registerPlayer(name):\n with get_cursor() as cur:\n query = \"INSERT INTO Player(name) VALUES (%s);\"\n cur.execute(query, (name,))", "title": "" }, { "docid": "f9c70eb43af2e200d5eee6f0ed984e68", "score": "0.70732206", "text": "def add_new_players(self, tournament, number):\n player = self.get_player(number)\n tournament.add_player(player)\n self.players_table(player)", "title": "" }, { "docid": "75a9bfdb6b6a8130cf836ecfd754f6af", "score": "0.7071062", "text": "def registerPlayer(name):\n query = \"INSERT INTO players (name) VALUES (%s);\"\n parametr = (name,)\n conn = DB().execute(query, parametr, True)", "title": "" }, { "docid": "4f8082983f686763d05e9fe186c35949", "score": "0.7060247", "text": "def registerPlayer(name):\n\n db = connect()\n c = db.cursor()\n c.execute(\"INSERT INTO players (name) VALUES(%s)\", (name,))\n db.commit()\n db.close()", "title": "" }, { "docid": "f8b63c3001ad1911de2ef50b6282213a", "score": "0.70576113", "text": "def menu_add_player(self):\n\n viewplayer = ViewPlayer()\n player = MethodeTournament().elements_player()\n cplayer = Player(**player)\n add_player = cplayer.add_players(player)\n resultat = MethodePlayer().duplicate_search(add_player)\n serialized_player = resultat.get(\"valided\")\n existing = resultat.get(\"no_valided\")\n if not serialized_player == []:\n cplayer.save_player(serialized_player)\n viewplayer.print_new_player_register()\n self.ask_add_again_player()\n if not existing == []:\n viewplayer.print_exicting_player(existing)\n self.ask_add_again_player()", "title": "" }, { "docid": "95ee0c6cfa27487874f72ee1b8d0f574", "score": "0.70462745", "text": "def registerPlayer(name):\n db, cursor = connect()\n register_player_query = \"INSERT INTO players(player_name) VALUES(%s) RETURNING player_id\"\n name = bleach.clean(name)\n cursor.execute(register_player_query, (name,))\n row_id = cursor.fetchone()[0]\n db.commit()\n db.close()\n return row_id", "title": "" }, { "docid": "03d8f4333ade9c9e19bdf52fdc3a7a28", "score": "0.70428485", "text": "def registerPlayer(name):\n DB = connect()\n cur = DB.cursor()\n cur.execute(\"insert into players (playername) values (%s);\", (name,))\n DB.commit()\n cur.execute(\"\"\"insert into standings (playerid,playername,wins,matches)\n (select p.playerid, p.playername, 0, 0 from players p where\n p.playername = %s);\"\"\", (name,))\n DB.commit()\n DB.close()", "title": "" }, { "docid": "1f86baace0646ddbc12ff175283c3f40", "score": "0.70396936", "text": "def registerPlayer(name):\n\n db, cursor = connect()\n\n # add player to players table and return its id:\n query = \"INSERT INTO players(name) VALUES(%s) RETURNING id;\"\n param = (name,)\n cursor.execute(query, param)\n return_id = cursor.fetchone()[0]\n\n db.commit()\n db.close()\n return return_id", "title": "" }, { "docid": "0bb1e5e7a9f51d9c69d0df1b8f8a0027", "score": "0.701928", "text": "def registerPlayer(name):\n db,c = connect()\n query = \"\"\"INSERT INTO players(name)\n VALUES (%s)\"\"\"\n parameter = (name,)\n c.execute(query, parameter)\n db.commit()\n db.close()", "title": "" }, { "docid": "c22c24b848b6bb60733c006265e2f76b", "score": "0.6985122", "text": "def registerPlayer(name):\n\tDB,cur = connect()\n\n\tcur.execute(\"INSERT INTO player(name) VALUES (%s);\", (name,))\n\n\tDB.commit()\n\tDB.close()", "title": "" }, { "docid": "a9ed7f6379ac55e0754d81b85f3fe188", "score": "0.69665843", "text": "def registerPlayer(name):\n db, cursor = connect()\n answer = raw_input(\"Is this an existing player? [yN]\")\n if answer.lower() != \"y\":\n query = \"INSERT INTO players(name) VALUES (%s)\"\n cursor.execute(query, (name,))\n db.commit()\n query = \"SELECT ID FROM players WHERE name = %s\"\n cursor.execute(query, (name,))\n result = cursor.fetchone()\n player_id = result[0]\n query = \"INSERT INTO currentgame(ID) VALUES (%s)\"\n cursor.execute(query, (player_id,))\n db.commit()\n db.close()", "title": "" }, { "docid": "d96f9bc6465e06c4deb20a6d8e8a148c", "score": "0.6965661", "text": "def addPlayer(name):\n\n query = \"INSERT INTO player (name) VALUES (%s) RETURNING id;\"\n\n conn, cur = connect()\n\n cur.execute(query, (name,))\n conn.commit()\n\n player_id = cur.fetchone()[0]\n\n cur.close()\n conn.close()\n\n return player_id", "title": "" }, { "docid": "18e9354a1d4e93b89f97cfa7ea340c18", "score": "0.6960345", "text": "def registerPlayer(name):\n conn = connect()\n c = conn.cursor()\n query = \"INSERT INTO Players (name) VALUES (%s);\"\n param = (name,)\n c.execute(query, param)\n conn.commit()\n conn.close()", "title": "" }, { "docid": "eb49ba12f25801053dc1e4cf3918c33e", "score": "0.6909422", "text": "async def add(player: discord.Member, match: str):\n print(player.name)\n print(player.bot)\n print(player.discriminator)\n print(player.id)\n print(player)\n await bot.say('Adding {0} to tournament {1}'.format(player.name, match))", "title": "" }, { "docid": "071f2431e9f9bf18b8811378a0cd46c4", "score": "0.6877442", "text": "def add_player(self, player):\r\n if player.key() in self.players:\r\n raise JoinError('Player is already in game.')\r\n if len(self.players) >= self.rule_set.num_players:\r\n raise JoinError('Game is full.')\r\n if self.state != 'waiting':\r\n raise JoinError('Game is not accepting new players.')\r\n \r\n self.players.append(player.key())\r\n\r\n # Start the game when it has enough players.\r\n if len(self.players) == self.rule_set.num_players:\r\n random.shuffle(self.players)\r\n self.state = 'playing'\r\n self.turn = 0\r\n self.current_player = 1\r\n\r\n self.update_player_names()\r\n self.put(True)", "title": "" }, { "docid": "7c42e887789114ea341e9fe9a2fa7fb3", "score": "0.68652344", "text": "def register_player(self, player):\n self.players[player.player_id] = player\n return True", "title": "" }, { "docid": "9e5dd9890ba5a146905e9678136a3c24", "score": "0.6855911", "text": "def registerPlayer(name):\n name = str(name)\n try:\n connection = connect()\n c = connection.cursor()\n c.execute(\"INSERT INTO players(name) values(%s)\", (name,))\n connection.commit()\n connection.close()\n except Exception as e:\n print \"error: \" + str(e)", "title": "" }, { "docid": "25d0cf0dc4bf1b8bbc6b84e6ed32feae", "score": "0.6840294", "text": "def add_player(game_id, player_id, player_type):\n game = Game(None, None, persistence_provider=persistence_provider) # instance as null to later load from id\n game.load(game_id)\n if game.join_player(player_id=player_id, player_type=player_type):\n update_game_index(game_id=game_id, gameinfo=game.static_metadata())\n return True\n else:\n return False", "title": "" }, { "docid": "2e8648060c25afe431f08dfc91eb1374", "score": "0.68223935", "text": "def add_players_to_tournament(self):\n number_of_player = 0\n list_of_players_added = []\n list_of_remaining_players = Player.list_of_remaining_players(\n self, self.player_table\n )\n while number_of_player < 8:\n self.table.remaining_players(list_of_remaining_players)\n self.warning.remaining_players_to_add(number_of_player)\n while True:\n response = self.select.add_player_create_tournament_menu()\n if input_validators.is_valid_add_player_create_tournament_menu_response(\n response\n ):\n break\n if response == \"1\":\n if not self.player_table or list_of_remaining_players == []:\n self.warning.no_player()\n continue\n while True:\n existed_player_id = int(\n self.choice_player_for_add_player_to_a_tournament()\n )\n if input_validators.is_valid_id_player(existed_player_id) is True:\n break\n list_of_players_added.append(existed_player_id)\n elif response == \"2\":\n new_player_id = self.create_player_in_tournament()\n existed_player_id = new_player_id\n list_of_players_added.append(new_player_id)\n number_of_player += 1\n contains_duplicates = any(\n list_of_players_added.count(element) > 1\n for element in list_of_players_added\n )\n if contains_duplicates:\n list_of_players_added.pop(-1)\n number_of_player -= 1\n self.warning.add_a_player_several_time()\n continue\n self.tournament_table.update(\n {\"players\": list_of_players_added},\n doc_ids=[self.tournament_table.all()[-1].doc_id],\n )\n Player.initialize_score(self, self.player_table, self.tournament_table)\n for player in list_of_remaining_players:\n if player[0] == existed_player_id:\n list_of_remaining_players.remove(player)", "title": "" }, { "docid": "92b7b531cb2af75f09674e99cdf9e4d1", "score": "0.6815449", "text": "def save_player(session, player_dict):\n player = Player()\n player.id = player_dict['id']\n player.name = player_dict['name']\n player.nation = player_dict['nation']\n if 'registration_date' in player_dict:\n player.registration_date = player_dict['registration_date']\n session.add(player)\n session.commit()\n return player", "title": "" }, { "docid": "53131d2012d6cd9cc8a43c63e8f654cd", "score": "0.67894703", "text": "def registerPlayer(name):\n conn = connect()\n c = conn.cursor()\n c.execute(\"insert into players (name, wins, matches) values(%s, 0, 0)\", (name,))\n #query = \"insert into players (wins, matches) values ( 0 , 0)\"\n #c.execute(query)\n conn.commit()\n global numberOfPlayers;\n numberOfPlayers += 1\n \"\"\"\n Printing out the inserted names\n \n print \"current table\"\n\n newquery = \"select * from players\"\n c.execute(newquery)\n table = c.fetchall()\n print table\n print \"\\n\"\n \"\"\"\n\n\n conn.close()", "title": "" }, { "docid": "a934837276bbf88b1b1e92cd11d0145b", "score": "0.6784938", "text": "def registerPlayer(name):\n db = connect()\n cursor = db.cursor()\n query = \"INSERT INTO players (name) VALUES (%s) RETURNING id\"\n cursor.execute(query, (name,))\n db.commit()\n player = cursor.fetchone()\n db.close()\n\n return player[0]", "title": "" }, { "docid": "7ff4bc4cfb6fd2d54525fcb9730024f0", "score": "0.6784539", "text": "def add_player_from_list_players(self, tournament):\n db = self.db\n list_players = db.table(\"player\").all()\n list_players_deserialize = [Player.deserialize(x) for x in list_players]\n\n for player in list_players_deserialize:\n self.view.show_add_players(player, list_players_deserialize.index(player) + 1)\n\n selected_player = self.check_id_player()\n player = list_players_deserialize[selected_player - 1]\n tournament.add_player(player)\n\n self.view.show_message(\" Number of registered players :{}\".format(len(tournament.list_players)))", "title": "" }, { "docid": "2ea4b18bd9e3e4b869e6a95cc6650b42", "score": "0.6671186", "text": "def add_player(self, player):\n for sg in Semigroup.objects.filter(players=player):\n sg.players.remove(player)\n\n self.players.add(player)", "title": "" }, { "docid": "8df0b21a91a48afe6a440edb40893055", "score": "0.6664502", "text": "def add_player(self, player):\n self.__PLAYERS.append(player)", "title": "" }, { "docid": "96a8e036e708e05df452ecb0da42f7b1", "score": "0.66595656", "text": "def registerPlayer(name): #PASS TEST 1\n print \"(4)registerPlayer name =\",name\n\t\n try: \n #cur.execute(\"INSERT INTO players (name) VALUES (%s);\", ('Tom Waits',))\n\tcurG.execute(\"INSERT INTO players (name) VALUES (%s);\", (name,))\n connG.commit() \t\t \n\tcurG.execute(\"SELECT * FROM players;\")\t\t#verify entry \n except Exception as e:\n print \"registerPlayer: Exception =\",e", "title": "" }, { "docid": "8c29140817bd7e107a371ff15c6969f8", "score": "0.66586494", "text": "def registerPlayer(name):\n con = connect()\n cur = con.cursor()\n cur.execute(\"INSERT INTO players (name, had_bye) VALUES (%s, %s);\",\n (name, False,))\n con.commit()\n con.close()", "title": "" }, { "docid": "34b0692d78fb288f4b08a3c5698b9d6d", "score": "0.66581845", "text": "def players_table(self, player: Player):\n db = self.db\n players_table = db.table(\"player\")\n user = Query()\n players_table.upsert({\n \"last_name\": player.last_name,\n \"first_name\": player.first_name,\n \"birth\": player.date_of_birth,\n \"gender\": player.gender,\n \"ranking\": player.ranking,\n \"score\": player.score\n }, (user.last_name == str(player.last_name)) & (user.first_name == str(player.first_name)))", "title": "" }, { "docid": "91530673058bfacdda5ebbf22535baf5", "score": "0.66577053", "text": "def registerPlayer(name):\n RegisterQuery = \"INSERT INTO players (Name) VALUES (%s);\"\n conn = connect()\n c = conn.cursor()\n c.execute(RegisterQuery,(name,))\n conn.commit()\n conn.close()", "title": "" }, { "docid": "efabd0e0f6cf66edccd512f1360d13cf", "score": "0.6654984", "text": "def startNewTournament(name):\n db = connect()\n cursor = db.cursor()\n cursor.execute('INSERT INTO tournaments (name) VALUES (%s)', (name,))\n db.commit()\n db.close()\n return getCurrentTournamentId()", "title": "" }, { "docid": "11075381b99315a0c84de10ad62faa54", "score": "0.6646013", "text": "def add_player(self, player):\n self.thread_lock.acquire()\n\n # Check that player is not already in the pool\n for player_, _ in self.player_pool:\n if player_.id == player.id:\n raise ValueError(\"Player already in pool\")\n\n # Add the player to the pool\n self.player_pool.append((\n player,\n self.num_turns\n ))\n\n self.thread_lock.release()", "title": "" }, { "docid": "ca79a2bd4cd6022aea938fecba4c8d2e", "score": "0.6593641", "text": "def insert_new_game(user_id, num_players):\n game = Game(game_creator=user_id, num_players=num_players, started=False, cancelled=False, completed=False,\n players_joined=0)\n db.session.add(game)\n db.session.commit()\n return game", "title": "" }, { "docid": "5fb60697707104784d2893078807c541", "score": "0.65904635", "text": "def add_player(self, user):\n new_player = player.Player(user)\n self.players.append(new_player)", "title": "" }, { "docid": "da318bc22bbbf3c6ba82abf7730695f7", "score": "0.65865755", "text": "def new(self, p):\n with self._connect() as cur:\n d = vars(p)\n cur.execute(f\"insert into players values ({('?, ' * len(d))[:-2]})\",\n [d[k] for k in Sqlite3PlayerStore.FIELDS])\n cur.commit()", "title": "" }, { "docid": "96fa28ef86c387d29c531164790e6807", "score": "0.65468776", "text": "def add_roster():\n\n player_name = request.form['player_name']\n player_position = request.form['player_position']\n team = roster(player_name=player_name, player_position=player_position)\n db.session.add(team)\n db.session.commit()\n\n return redirect(f\"/roster\")", "title": "" }, { "docid": "9a3fac85b0d6e6f8fe3ab0121ac4abb5", "score": "0.6513458", "text": "def addPitcher(username, dbConnection, dbCursor):\n\n try:\n # user enters a valid player's name spelled correctly\n playerPitchingName = input(\"Enter player's name to add to your team of pitchers: \")\n # find this player's ID\n dbCursor.execute('select playerPitchingID from playersPitching where playerName=?;', (playerPitchingName,))\n\n for playerPitchingID in dbCursor:\n playerAddedID = playerPitchingID[0]\n\n # add the username and the found player's ID to myTeamPitchers table\n dbCursor.execute('insert into myTeamPitchers (userID, playerPitchingID) values (?,?);', (username, playerAddedID))\n\n # commit results to the database\n dbConnection.commit()\n\n print()\n print(playerPitchingName,\"was added to user\", username, \"'s team successfully!\")\n\n except SyntaxError:\n print(\"\\nInvalid input, try again.\\n\")", "title": "" }, { "docid": "d05b475b8b538b5d447403e237b01e92", "score": "0.6470715", "text": "def addHitter(username, dbConnection, dbCursor):\n\n try:\n # user enters a valid player's name spelled correctly\n playerHittingName = input(\"Enter player's name to add to your team of hitters: \")\n # find this player's ID\n dbCursor.execute('select playerHittingID from playersHitting where playerName=?;', (playerHittingName,))\n\n for playerHittingID in dbCursor:\n playerAddedID = playerHittingID[0]\n\n # add the username and the found player's ID to myTeamHitters table\n dbCursor.execute('insert into myTeamHitters (userID, playerHittingID) values (?,?);', (username, playerAddedID))\n\n # commit results to the database\n dbConnection.commit()\n\n print()\n print(playerHittingName,\"was added to user\", username, \"'s team successfully!\")\n\n except SyntaxError:\n print(\"\\nInvalid input, try again.\\n\")", "title": "" }, { "docid": "4b1fc6f1a921896cc694153b0e38b0ca", "score": "0.64621156", "text": "def choice_player_for_add_player_to_a_tournament(self):\n return self.select.add_player()", "title": "" }, { "docid": "15ceb3bdcfe7b487cc979412cd7d44cd", "score": "0.6435539", "text": "def add_player(self, player):\n\n # For the first player\n if not self.players:\n self.players.append(player)\n return\n\n self.players[-1].next_player = player\n player.previous_player = self.players[-1]\n player.next_player = self.players[0]\n self.players[0].previous_player = player\n\n self.players.append(player)", "title": "" }, { "docid": "88655f01787bcb0e03ad2f4a01cd5025", "score": "0.64244735", "text": "def register_player(conn: TConn, chat_id: int, name: str):\n if not name:\n raise Exception(\"Please provide a name, like: /start Almond\")\n\n # get the starting location\n location_row = conn.execute(\"\"\"\n SELECT location_id\n FROM location\n WHERE is_start = 1\n \"\"\").fetchone()\n\n if not location_row:\n raise Exception(\"No starting locations found\")\n\n conn.execute(\"\"\"\n INSERT INTO entity\n (chat_id, name, location_id)\n VALUES\n (:chat_id, :name, :location_id)\n \"\"\", {\n 'chat_id': chat_id,\n 'name': name,\n 'location_id': location_row['location_id']\n })", "title": "" }, { "docid": "3bccbde78a53200c01e868f7fcd75209", "score": "0.6409478", "text": "def add_player(self, player_id):\n new_player = Player(player_id)\n self.player_list.append(new_player)\n return", "title": "" }, { "docid": "3ba18c115523d0bedc0381aaeb2a2e36", "score": "0.64084363", "text": "def registerPlayer(name):\n # Preparing QUERY and DATA\n SQL = \"INSERT INTO players (name) VALUES (%s);\"\n name = bleach.clean(name)\n data = (name, )\n # Working with PostgreSQL\n conn = connect()\n cur = conn.cursor()\n cur.execute(SQL, data)\n conn.commit()\n cur.close()\n conn.close()\n # Line is not required, but somehow I feel it would be nice to know\n # the result\n print \"Player %s has been registered\" % name", "title": "" }, { "docid": "cb838252651f34e52c6a4d401a62c70a", "score": "0.64043826", "text": "def testGetPlayerNumber(self):\n self.game.add_player(player_bob_name)", "title": "" }, { "docid": "afa85a95b5188dce5a92bf196f8b2952", "score": "0.6386272", "text": "def AddGame(game ,playerID,playerName):\r\n conn = sqlite3.connect(databasename)\r\n\r\n InsertUser(playerID,playerName,conn)\r\n\r\n c = conn.cursor()\r\n\r\n\r\n c.execute('SELECT gameID FROM gamz WHERE gameName =?', [game])\r\n list2 = c.fetchall()\r\n if(len(list2)==0):\r\n c.execute('INSERT INTO gamz (gameName) VALUES (?)', [game])\r\n c.execute('SELECT gameID FROM gamz WHERE gameName =?', [game])\r\n list2 = c.fetchall()\r\n\r\n if not GameLookup(list2[0][0],playerID, conn):\r\n c.execute('INSERT INTO plays (gameID,DiscordID) VALUES(?,?)',[list2[0][0],playerID])\r\n\r\n\r\n conn.commit()\r\n conn.close()", "title": "" }, { "docid": "8d7de34978851052bf4300b72b6d6c73", "score": "0.63741744", "text": "def generate_tournament(self):\n tournament = self.add_tournament()\n self.tournaments.append(tournament)\n self.view.show_message(\"________ Player registration _______\")\n\n db = self.db\n list_players = db.table(\"player\").all()\n\n if not list_players:\n for i in range(1, tournament.number_of_players + 1):\n self.add_new_players(tournament, i)\n else:\n self.old_or_new_players(tournament)\n\n self.choice_secondary_menu(tournament)", "title": "" }, { "docid": "faf8b1dd64aa2e6944305a99098ebae9", "score": "0.6354048", "text": "def registerTournament(name):\n db = connect()\n cursor = db.cursor()\n query = \"INSERT INTO tournaments (name) VALUES (%s) RETURNING id\"\n cursor.execute(query, (name,))\n db.commit()\n tournament = cursor.fetchone()\n db.close()\n\n return tournament[0]", "title": "" }, { "docid": "c7e4dd72841b7f42324aab1130bd180f", "score": "0.6343074", "text": "def create_user_player(sender, instance, created, **kwargs):\n if created:\n Player.objects.create(user=instance)", "title": "" }, { "docid": "3d619d6ef2b1162de9b5386fd4e4289e", "score": "0.6329123", "text": "async def add_player(self, ctx, member: discord.Member):\n authorized = False\n guild = db.get_guild_info(ctx.guild.id)\n for role in ctx.author.roles:\n if guild.get(\"st_id\") == role.id:\n authorized = True\n elif guild.get(\"narrator_id\") == role.id:\n authorized = True\n if authorized:\n try:\n exist_check = db.get_player_info(ctx.guild.id, member.id)\n except TypeError:\n db.execute(\n \"INSERT INTO Characters (player_id, bp_max, bp, wp_max, wp, upkeep, upkeep_dt, agg_dmg, \"\n \"alert_flag, guild_id, active_toggle, Experience) VALUES (%s,5,5,5,5,0,' ', 0,0,%s, 0, 0)\",\n (member.id, guild.get(\"id\")),\n )\n await ctx.send(\"Player Added\")\n else:\n await ctx.send(\n \"Player is already in the database for this server. Please remove them first.\"\n )", "title": "" }, { "docid": "7c50ff017b7224fb72ebcf8aedb21cd4", "score": "0.6329023", "text": "def save_user_player(sender, instance, **kwargs):\n instance.player.save()", "title": "" }, { "docid": "77bc0bdfe03a2be3efaf530f6fbafff1", "score": "0.63250864", "text": "def create_player_object(sender, instance, created, **kwargs):\n \n if created:\n Player.objects.create(user=instance)\n instance.player.save()", "title": "" }, { "docid": "2ea55e2b6c88d29a5375ff5a244acd0e", "score": "0.63099855", "text": "def setupTournament():\n db, cursor = connect()\n prompt = \"Do you want to register a player in the tournament?\"\n\n while raw_input(prompt) == \"y\":\n name = raw_input(\"What is the player's name? \")\n registerPlayer(name)\n\n raw_input(\"\\nGetting ready to start the tournament.\n Press 'Enter' to begin.\")\n winner = runTournament()\n\n output = \"\\nThe winner is \" + winner[1] + \" with an ID of \"\n output += str(winner[0]) \" who won \" + str(winner[2]) + \" out of \"\n output += str(winner[2]) + \" matches.\"\n\n print(output)\n standings = playerStandings()\n counter = 0\n print(\"\\nFinal Results:\")\n print(\"------------------------------------------\")\n for row in standings:\n print(row[1] + \", ID: \" + str(row[0]) + \", won \" + str(row[2]) +\n \" of \" + str(row[3]) + \" matches\")\n\n query = \"INSERT INTO tournaments(winnerID) VALUES (%s)\"\n query += \" RETURNING tournamentID\"\n cursor.execute(query, (winner[0],))\n result = cursor.fetchall()\n tournamentID = result[0][0]\n db.commit()\n print(\"\\nResults of tournament with ID \" + str(tournamentID) +\n \" have been recorded.\")\n\n deleteMatches()\n deletePlayers()\n db.close()", "title": "" }, { "docid": "1285937a9195a5f6444c0bc35cb78527", "score": "0.62929076", "text": "def registerTeam(name, tournament_id, country_id):\n\n conn = connect()\n cur = conn.cursor()\n query = \"\"\"INSERT INTO swiss_tournament.teams( tournament_id, name, country)\n VALUES (%s, %s, %s) RETURNING id;\"\"\"\n cur.execute(query, (tournament_id, name, country_id))\n team_id = cur.fetchone()[0]\n conn.commit()\n conn.close()\n return team_id", "title": "" }, { "docid": "075cdce1fcc98b6bc2ed793da19eacc8", "score": "0.6284572", "text": "def registerMatch(tournament_id, first_player, second_player, match_date):\n\n query = \"\"\"INSERT INTO swiss_tournament.match_schedule\n\t(tournament_id, first_player, second_player, match_date)\n VALUES ( %s, %s, %s, %s) RETURNING id;\"\"\"\n data = (tournament_id, first_player, second_player, match_date)\n conn = connect()\n cur = conn.cursor()\n cur.execute(query, data)\n match_id = cur.fetchone()[0]\n conn.commit()\n cur.close()\n conn.close()\n return match_id", "title": "" }, { "docid": "1667bceda0576e1aca2a6c5766b87333", "score": "0.6264054", "text": "def add_team_and_players(self, team: dict):\n to = models.Team(name=team['name'],\n url=team['url'],\n players=list())\n for player in team['players']:\n po = models.Player(name=player['name'],\n url=player['url'],\n role=player['role'],\n played_positions=player['played_positions'],\n age=player['age'],\n height=player['height'],\n weight=player['weight'])\n to.players.append(po)\n self._session.add(po)\n self._session.add(to)\n self._session.commit()\n\n logger.info('successfully added team %s and squad to database %s' % (team['name'], self._name))", "title": "" }, { "docid": "b8a225d7911e59efb1e70a1ebfdc0ff3", "score": "0.6230242", "text": "def create_new_player_ID(self):\n try:\n ID = self.sql.get_last_rowID(\"PlayerMain\") + 1\n return str(ID).zfill(IdRule.max_player_ID_num)\n \n except TypeError:\n print(\"The table is empty\")\n return str(1).zfill(IdRule.max_player_ID_num)", "title": "" }, { "docid": "f78a7ad46e8d95210b7c11264407c2b2", "score": "0.61849123", "text": "def createTournament(name):\n db, cursor = connect()\n name = bleach.clean(name)\n create_tournament_query = \"INSERT INTO tournaments(tournament_name) VALUES(%s) RETURNING tournament_id\"\n cursor.execute(create_tournament_query, (name,))\n row_id = cursor.fetchone()[0]\n db.commit()\n db.close()\n return row_id", "title": "" }, { "docid": "4ed773c433857a11ea3c961297368fe9", "score": "0.6182593", "text": "def register_player(self, name, r, c):\n current_id = len(self.players)\n # if all players already joined\n if current_id == self.number_of_players:\n raise RuntimeError('A player is trying to join a full lobby. His name: ', name)\n # else append a new Player instance to the list of players\n # the name argument is the chosen username\n else:\n assert current_id >= 0 and current_id < self.number_of_players\n self.players.append(Player(current_id, name, r, c))", "title": "" }, { "docid": "b9f7f82d43683832fe4497a1ab88459f", "score": "0.6161502", "text": "def save_tournament(self, tournament: Tournament):\n db = self.db\n tournaments_table = db.table(\"tournament\")\n tour = Query()\n tournaments_table.upsert({\n \"name\": tournament.name,\n \"location\": tournament.location,\n \"date\": tournament.date,\n \"list_date\": tournament.list_date,\n \"pace\": tournament.time_control,\n \"comment\": tournament.description,\n \"players\": [x.serialize() for x in tournament.list_players],\n \"rounds\": [x.serialize() for x in tournament.list_rounds]\n\n }, tour.name == str(tournament.name))", "title": "" }, { "docid": "7e33f5ffbe66e50d8a8e4122f342c9e2", "score": "0.6150967", "text": "def ReportMatch(winner, loser, tournament_id=0):\n\n db, cursor = connect()\n\n # Add players into matches table:\n query = \"INSERT INTO matches (winner_id, loser_id, tournament_id) VALUES(%s, %s, %s);\"\n param = (winner, loser, tournament_id)\n cursor.execute(query, param)\n\n db.commit()\n db.close()", "title": "" }, { "docid": "42b0cd7a01e2717ecc0db4468a4045c4", "score": "0.61390513", "text": "def next_player(self):\n if self.current_player_index == self.number_of_players - 1:\n self.current_player_index = 0\n else:\n self.current_player_index += 1\n self.save()", "title": "" }, { "docid": "6767720c648ce9005a19569b8b13ca56", "score": "0.6113685", "text": "def createTournament(name):\n conn, c = getConnCursor()\n \n c.execute(\"INSERT INTO tournaments (name) VALUES (%s) RETURNING id;\", (name,))\n tournament_id = c.fetchone()[0]\n conn.commit()\n conn.close()\n return tournament_id", "title": "" }, { "docid": "64a47e2d82849c388e016d3e82e7239d", "score": "0.61068076", "text": "def handleAddPlayer(self, **k):\r\n\t\t\t\tpass", "title": "" }, { "docid": "5978fb88b724fe69399c93c70b64a9d7", "score": "0.61061305", "text": "def create_player(conn, name, Player):\n age = int(Player.age)\n height = Player.height.split('-')\n height = (int(height[0])*12) + int(height[1])\n weight = ''.join(x for x in Player.weight if x.isdigit())\n awards = ', '.join(Player.awards)\n\n inp = (name, Player.profile_link, age, height, weight, Player.position[0], Player.shoots, Player.team, Player.born, Player.college, Player.high_school, Player.recruiting_rank, Player.draft, Player.nba_debut, Player.experience, awards)\n cur = conn.cursor()\n cur.execute(sql_cmd.PLAYERS_REPLACE, inp)\n return Player.name", "title": "" }, { "docid": "acfc8ec5d190f190b0302aa21b41ae69", "score": "0.610291", "text": "def test_add_player(service):\n service.create_game(\"AAAA\")\n\n new_player = Player(name=\"player1\", image_url=\"aaa.bbb\")\n service.add_player(\"AAAA\", new_player)\n\n read_game = service.read_game(\"AAAA\")\n assert read_game.players == [new_player]\n assert read_game.turn_player_name == \"player1\"", "title": "" }, { "docid": "b5e4d8cf905c5dc06f6effbc6e8ebd22", "score": "0.6090652", "text": "def registerTournament(name, start_date, country_id):\n\n conn = connect()\n cur = conn.cursor()\n query = \"\"\"INSERT INTO swiss_tournament.tournament (start_date, name, origin_country)\n VALUES (%s, %s, %s) RETURNING id;\"\"\"\n cur.execute(query, (start_date, name, country_id))\n tournament_id = cur.fetchone()[0]\n conn.commit()\n conn.close()\n return tournament_id", "title": "" }, { "docid": "f26718b32c3216359d75fd161f96f110", "score": "0.6072345", "text": "def save_player(cls, player):\n cls.load_players()\n cls.__PLAYERS.append(player)\n Player.save_players(cls.__PLAYERS)", "title": "" }, { "docid": "4f6e6a3b9d2933a85fced9ebed95317e", "score": "0.60475636", "text": "def add_player(self, player: str, faction: str):\n\n # add the faction to the dict if it does not exist\n if player not in self.campaign['players']:\n self.campaign['players'][player] = {'faction': faction, 'transits': {}}\n\n # then for every planet\n for planet in self.campaign['planets']:\n localPlanet = self.campaign['planets'][planet]\n # if the player added is not already listed in the planet, assign all the starting vars for the player\n if player not in localPlanet['resources']:\n localPlanet['resources'][player] = 0\n if player not in localPlanet['ships']:\n localPlanet['ships'][player] = {}\n if player not in localPlanet['fleets']:\n localPlanet['fleets'][player] = {}\n if player not in localPlanet['production']:\n localPlanet['production'][player] = {}\n\n # return a message for the added player\n print(f\"Player {player} added\")", "title": "" }, { "docid": "0225af9ce3de320ad515d95c221d7fb4", "score": "0.603218", "text": "def next_players_turn():\n\n # TODO select next turn player from connected players in the db\n next_player = random.choice(list(usernames_by_sid.values()))\n emit(\"turn_update\", {\"username\": next_player})", "title": "" }, { "docid": "6855eaae1d74f14dc7fad90f8b7f09a9", "score": "0.60305923", "text": "def createTournament(name):\n t_id = execute_query(['INSERT INTO tournaments (name)\\\n VALUES (%s) RETURNING id'],\n [(bleach.clean(name),)], 1)\n\n return t_id[0]", "title": "" }, { "docid": "b2d886b5ce6062e1a969edaae2cacf1a", "score": "0.6015296", "text": "def add_player(self, player):\n # The game must be in preparation or have started\n if self.status != MatchStatus.PREPARATION and self.status != MatchStatus.IN_PROGRESS:\n return False\n # add player in team depending on the team size\n if len(self.first_team.players) < len(self.second_team.players) or len(self.first_team.players) == 0 :\n self.first_team.append_player(player)\n return self.first_team.name\n elif len(self.first_team.players) > len(self.second_team.players) or len(self.second_team.players) == 0:\n self.second_team.append_player(player)\n return self.second_team.name\n # if teams have the same sizes, add player in team depending on the categories changes\n team_1_category = determine_category(sum(p.weight for p in self.first_team.players) / len(self.first_team.players))\n team_2_category = determine_category(sum(p.weight for p in self.second_team.players) / len(self.second_team.players))\n team_1_new_category = determine_category((sum(p.weight for p in self.first_team.players) + player.weight) / (len(self.first_team.players) + 1))\n team_2_new_category = determine_category((sum(p.weight for p in self.second_team.players) + player.weight) / (len(self.second_team.players) + 1))\n if team_1_category != team_1_new_category:\n self.second_team.append_player(player)\n return self.second_team.name\n elif team_2_category != team_2_new_category:\n self.first_team.append_player(player)\n return self.first_team.name\n # if categories does not change, add player in team depending on the experience\n team_1_experience = sum(p.weight for p in self.first_team.players)\n team_2_experience = sum(p.weight for p in self.second_team.players)\n if (team_1_experience < team_2_experience):\n self.first_team.append_player(player)\n return self.first_team.name\n else:\n self.second_team.append_player(player)\n return self.second_team.name", "title": "" }, { "docid": "38203bc4244fb0ec931ee591611bbeff", "score": "0.59924775", "text": "def add_player(self, name: str):\n if self.status == GameStatus.lobby and name not in [player.name for player in self.players]:\n self.players.append(Player(name=name))", "title": "" }, { "docid": "aad3cdcc5cf9d5ce77ccde3f3e6fee14", "score": "0.59616876", "text": "def add_player():\n content = request.json\n\n try:\n if content[\"player_type\"] == \"center\":\n center = Center(content[\"player_id\"], content[\"first_name\"], content[\"last_name\"], content[\"height\"], content[\"weight\"], content[\"year_drafted\"], content[\"player_type\"], content[\"num_rebounds\"], content[\"play_type\"])\n player_manager.add_player(center)\n elif content[\"player_type\"] == \"forward\":\n forward = Forward(content[\"player_id\"], content[\"first_name\"], content[\"last_name\"], content[\"height\"], content[\"weight\"], content[\"year_drafted\"], content[\"player_type\"], content[\"num_shots_took\"], content[\"num_shots_made\"])\n player_manager.add_player(forward)\n elif content[\"player_type\"] == \"guard\":\n guard = Guard(content[\"player_id\"], content[\"first_name\"], content[\"last_name\"], content[\"height\"], content[\"weight\"], content[\"year_drafted\"], content[\"player_type\"], content[\"num_steals\"], content[\"num_assists\"])\n player_manager.add_player(guard)\n\n response = app.response_class(\n status=200,\n )\n\n except ValueError as e:\n response = app.response_class(\n response=str(e),\n status=400\n )\n\n return response", "title": "" }, { "docid": "2ac8771799451ac4cbba546234975ea8", "score": "0.59372413", "text": "def test_attempt_to_add_player_to_puzzle_valid(test_client, init_db, verification_true):\n response = test_client.post('/puzzles/1/player',\n headers={'Authorization': 'Bearer 2342351231asdb'})\n assert response.status_code == 200\n assert response.json == {\n 'message': 'Successfully added Joe Biden (id = 5) to puzzle with id 1.'\n }", "title": "" }, { "docid": "a856072d9cfdefac1be73d31393f1cf3", "score": "0.59254146", "text": "def add_game(user: int, user_char: str, opponent: str, opponent_char: str,\n stage: str, win: bool, user_stock: int, opponent_stock: int):\n if user is opponent:\n raise UserIsOpponent\n usr = users_collection.find_one({ID: user})\n if usr is None:\n raise UserNotFound(user)\n opp = users_collection.find_one({EMAIL: opponent})\n if opp is None:\n raise UserNotFound(opponent)\n\n # generate random match_up id\n _id = 0\n while True:\n _id = random.randint(0, 999999999)\n m = games_collection.find_one({ID: _id})\n if m is None:\n break\n # create games with user and opponent data\n user_match = PlayerMatch(user, usr[EMAIL], usr[TAG], user_char, win, user_stock, True)\n opponent_match = PlayerMatch(opp[ID], opp[EMAIL], opp[TAG], opponent_char, not win, opponent_stock)\n\n # add match up to db\n new_game = Game(_id, stage, [user_match, opponent_match])\n games_collection.insert_one(new_game.dict())\n # add match up id to user and opponent's match up arrays\n users_collection.update_many({ID: {\"$in\": [user, opp[ID]]}}, {\"$push\": {GAMES: _id}})\n return new_game", "title": "" }, { "docid": "a88bac6cf5bb1413ab785fff62b52298", "score": "0.5899106", "text": "def add_player(self, player_oid: ObjectId, character: Character) -> bool:\n if player_oid in self.players:\n return False\n\n self.players[player_oid] = PlayerEntry(player_oid, character)\n return True", "title": "" }, { "docid": "3f6d5b5ebf52bbbed8febce55666c843", "score": "0.58793664", "text": "def add(self, player):\n self.currentTeam.append(player)\n positions_to_update = set()\n for pos in player.positions:\n positions_to_update.add(pos)\n for b_pos in broaderPositions[pos]:\n positions_to_update.add(b_pos)\n for update_pos in positions_to_update:\n self.current_positions[update_pos] += 1", "title": "" } ]
fd9d3fb6af87c0517d9927e57906e628
computes the size (in bytes) of all contained files
[ { "docid": "ff6545f775a28aee8ff0945b1672d246", "score": "0.71110624", "text": "def get_size(start_path: str = './') -> float:\n\n total_size = 0\n for dirpath, dirnames, filenames in os.walk(start_path):\n for f in filenames:\n fp = os.path.join(dirpath, f)\n if not os.path.islink(fp):\n total_size += os.path.getsize(fp)\n\n return total_size", "title": "" } ]
[ { "docid": "78bcec5626343cdfcff3657f48051c02", "score": "0.8575421", "text": "def size(self):\n return sum(osp.getsize(d['path']) for d in self._files)", "title": "" }, { "docid": "09fc4a6da1fe5d6b5b53923f24b623ee", "score": "0.81680053", "text": "def size(self):\n total = sum(f.count_bytes for f in self.files)\n return autopaths.file_size.FileSize(total)", "title": "" }, { "docid": "a096813a612bebb0b8219c795699e311", "score": "0.79090965", "text": "def dir_contents_size(path):\n for root, dirs, files in os.walk(path):\n print(root, \"consumes\"),\n print(sum(getsize(join(root, name)) for name in files)),\n print(\"bytes in\", len(files), \"non-directory files\")", "title": "" }, { "docid": "4b02fd54883969a111e167c6800378f5", "score": "0.79046816", "text": "def size(self) -> int:\n return sum(file.size for file in self.files)", "title": "" }, { "docid": "194770015da1f78d2256d1554b26c616", "score": "0.78418183", "text": "def CountBytes():\n bytes = 0\n a = []\n j = 0\n for k in list(walk(getcwd())):\n a.append(k[0])\n for _, _, files in walk(getcwd()):\n size = 0\n for file in files:\n path = (a[j] + '/' + file)\n if exists(path):\n if isfile(path):\n size = getsize(path)\n j += 1\n bytes += size\n return bytes", "title": "" }, { "docid": "fae0e94d3815639e660db557d5033677", "score": "0.7773111", "text": "def get_size():\n\ttotal_size = 0\n\tfor dirpath, dirnames, filenames in os.walk(DATABASE_DIR):\n\t\tfor f in filenames:\n\t\t\tfp = os.path.join(dirpath, f)\n\t\t\ttotal_size += os.path.getsize(fp)\n\treturn total_size", "title": "" }, { "docid": "9d74cf6527c42568328fed7b9d64a8d2", "score": "0.7755991", "text": "def calculate_size(self):\n print('calculation of file size')\n for root, file, direc in os.walk(self.path, topdown=False):\n files_info = {'root_folder': [], 'folder_info': [], 'file_name': []}\n files_info[\"root_folder\"].append(root)\n files_info[\"folder_info\"].append(file)\n files_info[\"file_name\"].append(direc)\n self.file_detail.append(files_info)\n return self.file_detail", "title": "" }, { "docid": "bef195373f99c445908258b7a8d2cfb3", "score": "0.7604745", "text": "def file_size(self) -> int:\n return self.header_size + sum(archive.size for archive in self.archives)", "title": "" }, { "docid": "d20ea7768343cf134d0a291157dfec1d", "score": "0.7600579", "text": "def calculate_size(path):\n return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file() )", "title": "" }, { "docid": "a91ad84d46e0dffc3705c6e630a66c38", "score": "0.7576608", "text": "def total_size(outputs):\n files = []\n visit_class(outputs, (\"File\",), files.append)\n # Per https://www.commonwl.org/v1.0/CommandLineTool.html#File\n # size is optional in the class, so default to 0 if not found\n return sum([f.get('size', 0) for f in files])", "title": "" }, { "docid": "1075ac6997e61858f83bdd62e3a3b8f0", "score": "0.7438534", "text": "def getDataSize(datafiles):\n total_size = 0 \n for f in datafiles:\n if os.path.isfile(f):\n total_size += os.path.getsize(f)\n total = total_size / 1000 / 1000 / 1000\n return int(round(total))", "title": "" }, { "docid": "9e778963023f1a1ed128192e81ebb984", "score": "0.74352694", "text": "def dirsize(start_dirpath):\n total_size = 0\n for dirpath, dirnames, filenames in os.walk(start_dirpath):\n for filename in filenames:\n filepath = os.path.join(dirpath, filename)\n total_size += os.path.getsize(filepath) / 1000000000 # Convert to GB\n total_size = float(\"{0:.2f}\".format(total_size)) # Limit float to one decimat point\n return total_size", "title": "" }, { "docid": "531723533eba1caf4abd2c5303464966", "score": "0.73316926", "text": "def meta_size(self):\n total = (32 + 8 + 8) * len(self.sizes) # hashes, sizes, counts\n for paths in self.paths.itervalues():\n total += sum(len(x) for x in paths)\n return total", "title": "" }, { "docid": "1012e0360fecab19fc192de403f972b6", "score": "0.7320394", "text": "def get_size(the_path):\n path_size = 0\n for path, directories, files in os.walk(the_path):\n for filename in files:\n path_size += os.lstat(os.path.join(path, filename)).st_size\n for directory in directories:\n path_size += os.lstat(os.path.join(path, directory)).st_size\n path_size += os.path.getsize(the_path)\n return path_size", "title": "" }, { "docid": "1e610c7882653ea0f92a8380cbe27581", "score": "0.72779083", "text": "def getTotalSize ():", "title": "" }, { "docid": "043c19d799ec3e35c9985f7059f3d2d7", "score": "0.726485", "text": "def getFileSize():", "title": "" }, { "docid": "e3887db18f29962078022eb7b6a625eb", "score": "0.72393423", "text": "def file_size(self):\n return round(self.array.nbytes / (1024**3), 3)", "title": "" }, { "docid": "77caaa0d38a9f38355fdd165321f050f", "score": "0.72380644", "text": "def fileSizes(self):\n # First, check that the files have been downloaded (exist on the local\n # file system, where they are expected to be)\n self.assumeFilesExist()\n \n iata_por_file_size = os.path.getsize(self.local_iata_por_filepath)\n unlc_por_file_size = os.path.getsize(self.local_unlc_por_filepath)\n if self.verbose:\n print(\"[Opentraveldata::fileSizes] Sizes - \"\n f\"{self.local_iata_por_filepath}: {iata_por_file_size} ; \"\n f\"{self.local_unlc_por_filepath}: {unlc_por_file_size}\")\n return (iata_por_file_size, unlc_por_file_size)", "title": "" }, { "docid": "663a08c550eed44a79ce3a7fd829199b", "score": "0.7227358", "text": "def size(self, path):\n path = self.process_path(path)\n if self.is_file(path):\n return os.path.getsize(path)\n else:\n total_size = 0\n for each_path in Path(path).glob('**/*'):\n total_size += os.path.getsize(each_path)\n \n return total_size", "title": "" }, { "docid": "43983f9e25101c9f77cbeeed72cfafdf", "score": "0.7204177", "text": "def get_size(f,block_size=1):\n if os.path.isfile(f):\n size = os.lstat(f).st_size\n else:\n size = os.lstat(f).st_size\n for dirpath,dirnames,filenames in os.walk(f):\n for d in dirnames:\n size_incr = os.lstat(os.path.join(dirpath,d)).st_size\n size += size_incr\n ##print \"%s: %d\" % (os.path.join(dirpath,d),size_incr)\n for f in filenames:\n size_incr = get_size(os.path.join(dirpath,f),block_size=block_size)\n size += size_incr\n ##print \"%s: %d\" % (os.path.join(dirpath,f),size_incr)\n # Return number of blocks, rounded\n # up or down to nearest integer\n return int(round(float(size)/float(block_size)))", "title": "" }, { "docid": "73c0d08bb75bed4ebb899cfc41da3f8f", "score": "0.7174264", "text": "def sizes(self, fileids=None, categories=None):\n\n # resolve the fileids and the categories\n fileids = self.resolve(fileids, categories)\n\n # create a generator, getting every path and computing filesize\n for path in self.abspaths(fileids):\n yield os.path.getsize(path)", "title": "" }, { "docid": "33f9957c897be8fed620f9a42175fdfd", "score": "0.71553123", "text": "def _count_total_size(self) -> float:\n total_size = 0\n if self.logfile_list:\n for file in self.logfile_list:\n total_size += file.size\n\n return total_size", "title": "" }, { "docid": "34c13dd082d2d7cedaff012ac134acaa", "score": "0.71517944", "text": "def dir_size(path):\n total = 0\n for dirpath, dirnames, filenames in os.walk(path):\n for f in filenames:\n try:\n total += os.path.getsize(os.path.join(dirpath, f))\n except FileNotFoundError:\n pass\n return total", "title": "" }, { "docid": "4d1851172288006ef58194f2f6749108", "score": "0.71132004", "text": "def size(path):\r\n return os.path.getsize(path)", "title": "" }, { "docid": "4e09cee53cd82b42ed57cecc71a5fa41", "score": "0.7099277", "text": "def get_size():", "title": "" }, { "docid": "05018970e30b0f02650f43d6bf44ae30", "score": "0.70341575", "text": "def __folder_size(self, path):\n\n ret = 0\n for f in scandir(path):\n if f.is_dir() and (f.name != '.' or f.name != '..'):\n ret += self.__folder_size(os.path.join(path, f.name))\n else:\n try:\n ret += f.stat().st_size\n except OSError:\n pass\n\n return ret", "title": "" }, { "docid": "3c05113f80a55b9626224a4f5e98ab5f", "score": "0.70226216", "text": "def size(self):\n return os.path.getsize(self.file)", "title": "" }, { "docid": "f58e6c2ec30479d113b55487c99249be", "score": "0.70162755", "text": "def count_files(self):\n return len(self.get_folder_contents())", "title": "" }, { "docid": "5c14b247631eeaac9c1ab246d084e046", "score": "0.700403", "text": "def size(self):\n\t\tif not self.exists:\n\t\t\treturn 0\n\t\treturn os.path.getsize( self.path ) / ( 1024 * 1024.0 )#TODO check this method", "title": "" }, { "docid": "acd7ec501d750939b97fe052a131c8ea", "score": "0.69739366", "text": "def calculate_file(self, file_total):\n print(\"Calculation of total size\")\n sum_value = 0\n for i in file_total:\n sum_value = sum_value + len(i['file_name'][0])\n\n print('Total number of files:- ', sum_value)\n sum_folder = 0\n\n for i in file_total:\n sum_folder = sum_folder + len(i['folder_info'][0])\n print('Total number of folder:- ', sum_folder)", "title": "" }, { "docid": "4149cfc9427ca66d95f517e0aa94d1c1", "score": "0.697253", "text": "def get_file_size(self):\n\t size = os.path.getsize(self._file)\n\t return size", "title": "" }, { "docid": "de9edb07bdad0c0bc5575788fe862b58", "score": "0.69501805", "text": "def size(self):\n try:\n return os.path.getsize(self.__filename)\n except OSError, err:\n if err.errno == errno.ENOENT:\n return 0 \n raise", "title": "" }, { "docid": "4353262aaa8ebd1ad45a8eeb36846e98", "score": "0.69445544", "text": "def getFolderSize(self,start_path):\n total_size = 0\n for dirpath, dirnames, filenames in os.walk(start_path):\n for f in filenames:\n fp = os.path.join(dirpath, f)\n # skip if it is symbolic link\n if not os.path.islink(fp):\n total_size += os.path.getsize(fp)\n\n return total_size*8", "title": "" }, { "docid": "5fa5a807a3135f460f92cd5632262c74", "score": "0.69384587", "text": "def nfiles(self):\n return len(self.files)", "title": "" }, { "docid": "649565cf5e7270e35af50fd14466d881", "score": "0.6923648", "text": "def num_files(self) -> int:\n if self._num_files is None:\n self._num_files = 0\n for _, _, files in os.walk(self.binaries_dir):\n self._num_files += len(files)\n return self._num_files", "title": "" }, { "docid": "690d80cf1734f203653b65ca75248b1f", "score": "0.6893306", "text": "def fileSize( self, filename = None ):\n if not self: return -1\n if filename is None: \n totalSize = 0\n for f in self.value():\n totalSize += fileutil.getsize( f )\n return totalSize\n if filename in self.value():\n return fileutil.getsize( filename )\n return -1", "title": "" }, { "docid": "c0a09fcb3a28d00d637f8a21f7ab2340", "score": "0.68925995", "text": "def FileSize(file_path):\n return os.path.getsize(file_path) / BYTES_PER_MEGABYTE", "title": "" }, { "docid": "c0197f38e4244deb99df8f9c3be8fe5a", "score": "0.6892528", "text": "def getFileCount(self) -> int:\n ...", "title": "" }, { "docid": "b8e3ecf3b4b158a8764466769bfa9298", "score": "0.6869703", "text": "def file_count(self) -> int:", "title": "" }, { "docid": "6bf994bf9952c59e16df4275dfdf07e9", "score": "0.6864968", "text": "def file_size(path: Path) -> int:\n return path.stat().st_size", "title": "" }, { "docid": "1c1a6379f95b6fb70eb1336a573c2112", "score": "0.68492186", "text": "def number_of_files(self) -> int:\n return sum(len(tuple(dblock.files)) for dblock in self._orig_datablocks)", "title": "" }, { "docid": "97d1c297aec74b8b4f406e23072d4587", "score": "0.6815117", "text": "def getDirectorySize(dir):\n size = 0\n if os.path.exists(dir):\n for path, dirs, files in os.walk(dir):\n for file in files:\n size += os.path.getsize(os.path.join(path, file))\n return size", "title": "" }, { "docid": "8bd3b9807eb06a0084b939204a0ec4eb", "score": "0.6778398", "text": "def get_directory_size(start_path='.'):\n total_size = 0\n for dirpath, dirnames, filenames in os.walk(start_path):\n for f in filenames:\n fp = os.path.join(dirpath, f)\n # skip if it is symbolic link\n if not os.path.islink(fp):\n total_size += os.path.getsize(fp)\n\n return total_size", "title": "" }, { "docid": "b456eb210458fc469437b406e8e092c6", "score": "0.67722255", "text": "def file_size_actual(self) -> int:\n return Path(self.path).stat().st_size", "title": "" }, { "docid": "7a8c50ca0e08f696b50c52ca834b37d8", "score": "0.6755827", "text": "def get_size(self):\n new_size = 0\n for volume in self.volumes:\n new_size += volume.size\n # TODO: get FS size used by bucket(s)\n self.size = new_size\n return self.size", "title": "" }, { "docid": "939609092e4b5eb8948cbf3b8ae367d1", "score": "0.6740752", "text": "def file_size(file_path):\r\n if os.path.isfile(file_path):\r\n file_info = os.stat(file_path)\r\n return convert_bytes(file_info.st_size)", "title": "" }, { "docid": "ce53a4153114a1471583b1db1f6553f5", "score": "0.66954947", "text": "def file_size(file_path):\n if os.path.isfile(file_path):\n file_info = os.stat(file_path)\n return convert_bytes(file_info.st_size)", "title": "" }, { "docid": "ce53a4153114a1471583b1db1f6553f5", "score": "0.66954947", "text": "def file_size(file_path):\n if os.path.isfile(file_path):\n file_info = os.stat(file_path)\n return convert_bytes(file_info.st_size)", "title": "" }, { "docid": "ce53a4153114a1471583b1db1f6553f5", "score": "0.66954947", "text": "def file_size(file_path):\n if os.path.isfile(file_path):\n file_info = os.stat(file_path)\n return convert_bytes(file_info.st_size)", "title": "" }, { "docid": "65f8d0a784fce73f279de6d1e78cb518", "score": "0.66941303", "text": "def size(self):\n return os.stat(self._filepath).st_size", "title": "" }, { "docid": "9c4aa9dd36465c933cb3a12abf07c6fd", "score": "0.66932833", "text": "def file_size(self):\n\n # declare types of arguments and response of the corresponding golang function\n self.uplink.m_libuplink.uplink_stat_object.argtypes = [ctypes.POINTER(_ProjectStruct),\n ctypes.c_char_p, ctypes.c_char_p]\n self.uplink.m_libuplink.uplink_stat_object.restype = _ObjectResult\n #\n # get object information by calling the exported golang function\n object_result = self.uplink.m_libuplink.uplink_stat_object(self.project, self.bucket_name,\n self.storj_path)\n # if error occurred\n if bool(object_result.error):\n raise _storj_exception(object_result.error.contents.code,\n object_result.error.contents.message.decode(\"utf-8\"))\n # find object size\n return int(object_result.object.contents.system.content_length)", "title": "" }, { "docid": "b95c53bba5ca6f0f56afeb04cbdf8c5a", "score": "0.6687671", "text": "def CountFiles():\n summ = 0\n for _, _, files in walk(getcwd()):\n count = len(files)\n summ += count\n return summ", "title": "" }, { "docid": "69afa51b2c5d5dc0d2578172e02a75c1", "score": "0.6687432", "text": "def getsize(self, filename):\n return self.stat(filename).st_size", "title": "" }, { "docid": "37294dfd7f91e820c31acc3166997dcc", "score": "0.6682611", "text": "def file_size(filepath: str) -> int:\n return os.path.getsize(filepath)", "title": "" }, { "docid": "5fcc054801b249a7c4195f2f21bc4e8d", "score": "0.6680107", "text": "def updateSize(self, doc):\n from .file import File\n # get correct size from child files\n size = 0\n fixes = 0\n fileModel = File()\n for file in self.childFiles(doc):\n s, f = fileModel.updateSize(file)\n size += s\n fixes += f\n # fix value if incorrect\n if size != doc.get('size'):\n self.update({'_id': doc['_id']}, update={'$set': {'size': size}})\n fixes += 1\n return size, fixes", "title": "" }, { "docid": "9caa922e8eccc8c3161da6ade1cbe5d0", "score": "0.6673637", "text": "def archive_size(self):\n return (None if self.size is None else\n sum(cd.size for cd in self.descendant_members()))", "title": "" }, { "docid": "b4303f56824abe8278da6bfe3437a58f", "score": "0.66662216", "text": "def __len__(self):\n return self._nfiles", "title": "" }, { "docid": "e3adb90e2c79d3017516f507aa281bc1", "score": "0.66575634", "text": "def get_directory_size(path):\n import os\n total_size = 0\n for dirpath, dirnames, filenames in os.walk(path):\n for f in filenames:\n fp = os.path.join(dirpath, f)\n total_size += os.path.getsize(fp)\n return total_size", "title": "" }, { "docid": "1c7dc97f09dbae2860d2dbddaafd3de5", "score": "0.6650538", "text": "def get_directory_size(path):\n if os.path.isfile(path):\n return os.path.getsize(path)\n else:\n return sum([get_directory_size(os.path.join(path, f))\n for f in os.listdir(path)])", "title": "" }, { "docid": "ee6ec2347e358a8d6ca7ad640fd19b74", "score": "0.66366553", "text": "def get_trace_directory_size(self):\n if not hasattr(self,\"__trace_directory_size__\"):\n self.__trace_directory_size__ = number_of_files(self.trace_directory)\n return self.__trace_directory_size__", "title": "" }, { "docid": "1048d3627f52806ebf0f46628590b80b", "score": "0.66237104", "text": "def getNumberOfFiles_fast(path):\n print(path)\n f = get_h5_file(path)\n return len(f['/entry/data'])", "title": "" }, { "docid": "0fca316ddf60506339d836c8607b8ea4", "score": "0.65861094", "text": "def size(self):\n return self.stat().size", "title": "" }, { "docid": "204d339aed16321aa9ca19e8ad53ee42", "score": "0.6582877", "text": "def __len__(self):\n return len(self.files)", "title": "" }, { "docid": "204d339aed16321aa9ca19e8ad53ee42", "score": "0.6582877", "text": "def __len__(self):\n return len(self.files)", "title": "" }, { "docid": "204d339aed16321aa9ca19e8ad53ee42", "score": "0.6582877", "text": "def __len__(self):\n return len(self.files)", "title": "" }, { "docid": "8c55b30f67efc65e8bf59839a3062144", "score": "0.65585625", "text": "def calculateFileSizes(files):\n write( \"nrh trace - Start calculateFileSizes() procedure...\" )\n\n filecount=0\n totalvideosize=0\n totalaudiosize=0\n totalmenusize=0\n\n for node in files:\n write( \"nrh trace - calculateFileSizes() node value is : %s\" % node )\n filecount+=1\n #Generate a temp folder name for this file\n folder=getItemTempPath(filecount)\n write( \"nrh trace - calculateFileSizes() folder name for this file %s\" % folder )\n #Process this file\n file=os.path.join(folder,\"stream.m2v\")\n write( \"nrh trace - calculateFileSizes() file name : %s\" % file )\n #Get size of vobfile in MBytes\n totalvideosize+=os.path.getsize(file) \n\n #Get size of audio track 1\n if doesFileExist(os.path.join(folder,\"stream0.ac3\")):\n totalaudiosize+=os.path.getsize(os.path.join(folder,\"stream0.ac3\")) \n if doesFileExist(os.path.join(folder,\"stream0.mp2\")):\n totalaudiosize+=os.path.getsize(os.path.join(folder,\"stream0.mp2\")) \n\n #Get size of audio track 2 if available \n if doesFileExist(os.path.join(folder,\"stream1.ac3\")):\n totalaudiosize+=os.path.getsize(os.path.join(folder,\"stream1.ac3\")) \n if doesFileExist(os.path.join(folder,\"stream1.mp2\")):\n totalaudiosize+=os.path.getsize(os.path.join(folder,\"stream1.mp2\")) \n\n # add chapter menu if available\n if doesFileExist(os.path.join(getTempPath(),\"chaptermenu-%s.mpg\" % filecount)):\n totalmenusize+=os.path.getsize(os.path.join(getTempPath(),\"chaptermenu-%s.mpg\" % filecount)) \n\n # add details page if available\n if doesFileExist(os.path.join(getTempPath(),\"details-%s.mpg\" % filecount)):\n totalmenusize+=os.path.getsize(os.path.join(getTempPath(),\"details-%s.mpg\" % filecount))\n\n filecount=1\n while doesFileExist(os.path.join(getTempPath(),\"menu-%s.mpg\" % filecount)):\n totalmenusize+=os.path.getsize(os.path.join(getTempPath(),\"menu-%s.mpg\" % filecount))\n filecount+=1\n\n write( \"nrh trace - ...calculateFileSizes() procedure End\\n\" )\n\n return totalvideosize,totalaudiosize,totalmenusize", "title": "" }, { "docid": "e78d5f9e9c4acf7cad9dfba306f0bdc1", "score": "0.6541786", "text": "def get_size(self):\n ...", "title": "" }, { "docid": "860cf2bcb2e7127a069df9f308b3bb32", "score": "0.6534193", "text": "def getdirsize(path):\n if os.path.isdir(path):\n total = 0\n for current_dir, folderlist, fnamelist in os.walk(path):\n for fname in fnamelist:\n total += os.path.getsize(os.path.join(current_dir, fname))\n return total\n else:\n raise Exception(\"%s is not a directory!\" % path)", "title": "" }, { "docid": "282098103ac9c1dcd9e2555b6016bfbd", "score": "0.65292877", "text": "def DirectorySize(directory):\n directory_size = 0\n if os.path.isdir(directory):\n for (path, unused_dirs, files) in os.walk(directory):\n for file_name in files:\n file_path = os.path.join(path, file_name)\n directory_size += os.path.getsize(file_path)\n\n return directory_size / BYTES_PER_MEGABYTE", "title": "" }, { "docid": "34d8653054c3d5202b309b36a10d01eb", "score": "0.65286225", "text": "def __getFileSize(self):\n\n self.f.seek(0, 2) # move the cursor to the end of the file\n size = self.f.tell()\n return size", "title": "" }, { "docid": "8c515551ae8c68765ab29b1f128314ec", "score": "0.65252113", "text": "def totalSize(self):\n \n pass", "title": "" }, { "docid": "91873e1641efa749d309f70705948f02", "score": "0.6519551", "text": "def size ():", "title": "" }, { "docid": "06ebb8f1d12911ad5fa0878f10e64e20", "score": "0.6517789", "text": "def folder_size_count(path):\n total = {\n 'size': 0,\n 'count': 0\n }\n for dirpath, folders, filenames in os.walk(path):\n total['count'] += len(filenames)\n if 'tmp' not in dirpath:\n for filename in filenames:\n _file = os.path.join(dirpath, filename)\n total['size'] += os.path.getsize(_file)\n\n return total", "title": "" }, { "docid": "0925ff1b6403b8486c62973ebd558509", "score": "0.6498804", "text": "def size(self) -> int:\n if self.is_on_local:\n return file_size(cast(Path, self.local_path))\n return self._remote_size # type: ignore[return-value]", "title": "" }, { "docid": "362e53e0df7538a148be116d9cb319c7", "score": "0.64932305", "text": "def get_tree_size(path):\n size = 0\n try:\n for entry in scandir.scandir(path):\n if entry.is_symlink():\n pass\n elif entry.is_dir():\n size += get_tree_size(os.path.join(path, entry.name))\n else:\n size += entry.stat().st_size\n except OSError:\n pass\n return size", "title": "" }, { "docid": "1077d38eb17a4b3ed12016e884374d93", "score": "0.64889354", "text": "def get_size(filename):\n fileinfo = os.stat(filename)\n return fileinfo", "title": "" }, { "docid": "ed17c6af9c6a340547832f8faa617902", "score": "0.64827216", "text": "def total_length(self) -> int:\n total = len(self.songs)\n for sub_dir in self.subdirectories:\n total += len(sub_dir)\n return total", "title": "" }, { "docid": "234f1714376a17ea70bf06a50b57132e", "score": "0.64816606", "text": "def recalculateSize(self, item):\n size = 0\n for file in self.childFiles(item):\n # We could add a recalculateSize to the file model, in which case\n # this would be:\n # size += File().recalculateSize(file)\n size += file.get('size', 0)\n delta = size - item.get('size', 0)\n if delta:\n logger.info('Item %s was wrong size: was %d, is %d' % (\n item['_id'], item['size'], size))\n item['size'] = size\n self.update({'_id': item['_id']}, update={'$set': {'size': size}})\n self.propagateSizeChange(item, delta)\n return size", "title": "" }, { "docid": "a80cd4f2e6690cd53103de89862fd9e0", "score": "0.6477678", "text": "def _get_files_and_size(path: str) -> Tuple[List[str], int]:\n # enumerate all of the files on disk to be checked\n disk_files = _enumerate_path(path)\n # for all of the files we want to check\n size = 0\n for disk_file in disk_files:\n # determine the size of the file\n size += os.path.getsize(disk_file)\n return (disk_files, size)", "title": "" }, { "docid": "7cc4a7a0fa975bb23ae8734b8b067a71", "score": "0.647722", "text": "def get_numfiles(self):\n return 10", "title": "" }, { "docid": "7f5b2475efd1e3c7114964d67511cbf9", "score": "0.6475636", "text": "def getSize(self) -> int:\n ...", "title": "" }, { "docid": "7f5b2475efd1e3c7114964d67511cbf9", "score": "0.6475636", "text": "def getSize(self) -> int:\n ...", "title": "" }, { "docid": "10bfb330fc73968c3a587adb0a09c299", "score": "0.64634335", "text": "def containedDataSize(self) -> int:", "title": "" }, { "docid": "d9b96106155b0bebb5aec8fad5baccb7", "score": "0.64499336", "text": "def check_Num_Size(self, folder, name):\n\n num1 = 0\n num2 = 0\n filename = []\n filelist = (walkfiles(self.path + '/' + folder, prt=0))[1]\n for file in filelist:\n if name in file:\n out_ama_size = os.path.getsize(self.path + '/' + folder + '/' + file)\n if ((out_ama_size > 10.0 ** 6.0) and (out_ama_size < 10.0 ** 7.0)):\n num1 +=1\n filename.append(file)\n\n for file in filelist:\n if name in file:\n out_ama_size = os.path.getsize(self.path + '/' + folder + '/' + file)\n if (out_ama_size > 10.0 ** 7.0):\n num2 += 1\n filename.append(file)\n return num1, num2, filename", "title": "" }, { "docid": "d1b03e7a70d385987f36b139652b68ce", "score": "0.6434553", "text": "def get_size(self, excludes=EmptyI):\n\n size = 0\n for a in self.gen_actions(excludes):\n size += int(a.attrs.get(\"pkg.size\", \"0\"))\n\n return size", "title": "" }, { "docid": "54280c354438d45caf7bc05df023b8a2", "score": "0.6429244", "text": "def get_file_size(file_path):\n file_stats = os.stat(file_path)\n return file_stats.st_size", "title": "" }, { "docid": "8f8efe4236caec9c858c4a87852cc0e7", "score": "0.64188236", "text": "def file_size(self):\n return self.__fileSize", "title": "" }, { "docid": "2e9b138485f48153b4558aa4325d56e7", "score": "0.6417583", "text": "def total_size(self):\n return None if self.size is None else sum((cd.size for cd in self.descendants()))", "title": "" }, { "docid": "14de76c36449d9fe7effda060fe7958e", "score": "0.6414905", "text": "def get_size(self, path):\n \n raise NotImplementedError()", "title": "" }, { "docid": "14de76c36449d9fe7effda060fe7958e", "score": "0.6414905", "text": "def get_size(self, path):\n \n raise NotImplementedError()", "title": "" }, { "docid": "017aaa1dbb9679d49425859591b37718", "score": "0.640725", "text": "def __len__(self):\n return len(self.x_list)*self.filesize", "title": "" }, { "docid": "8ae000416ebef22f7ff9e91bba3c7901", "score": "0.6393787", "text": "def numInFiles(self):\n return len(self.INFILE);", "title": "" }, { "docid": "356311263490076884ec75e1fd716af9", "score": "0.6393692", "text": "def get_disc_file_size_bytes(self):\n return self.get_disc_file_size_frames() * self.audio_format.bytes_per_frame", "title": "" }, { "docid": "a23ef4b4f86a9cf6e6d9297208daa13c", "score": "0.6392131", "text": "def getSize(self):\n\t\t\n\t\treturn int(self._getCachedInfo('size'))", "title": "" }, { "docid": "b3da2c81b3839b49d51ec3294e7f1b44", "score": "0.6388886", "text": "def check_file_size(data_dir, dates, subjects, surveys, data_streams):\n\n for date in dates:\n print(\"Date:\", date)\n print(\"-----------------\")\n for subject in subjects:\n print(\"Subject:\", subject)\n\n # passive data files\n for data_stream in data_streams:\n path = data_dir + subject + \"/\" + data_stream + \"/\" + date + \"*\"\n files = glob.glob(path)\n total_size = 0\n for file in files:\n total_size += os.path.getsize(file)\n print(\" %s total file size is %d bytes.\" % (data_stream, total_size))\n\n # survey files\n for survey in surveys:\n path = data_dir + subject + \"/survey_answers/\" + survey + \"/\" + date + \"*\"\n files = glob.glob(path)\n total_size = 0\n for file in files:\n total_size += os.path.getsize(file)\n print(\" %s survey total file size is %d bytes.\" % (survey, total_size))\n print(\"\")\n print(\"\")", "title": "" }, { "docid": "94422707434a451b692d71930f57ec4a", "score": "0.6377666", "text": "def size(self):\n type_ = self.type_\n sizes = {'total': 0.0, 'ondisk': 0.0, 'git': 0.0, 'annex': 0.0, 'annex_worktree': 0.0}\n\n if type_ in ['file', 'link', 'link-broken']:\n # if node is under annex, ask annex for node size, ondisk_size\n if isinstance(self.repo, AnnexRepo) and self.repo.is_under_annex(self._path):\n size = self.repo.info(self._path, batch=True)['size']\n ondisk_size = size \\\n if self.repo.file_has_content(self._path) \\\n else 0\n # else ask fs for node size (= ondisk_size)\n else:\n size = ondisk_size = 0 \\\n if type_ == 'link-broken' \\\n else lstat(self.symlink or self._path).st_size\n\n sizes.update({'total': size, 'ondisk': ondisk_size})\n\n if self.repo.path == self._path:\n sizes.update({'git': self.git_local_size,\n 'annex': self.annex_local_size,\n 'annex_worktree': self.annex_worktree_size})\n return sizes", "title": "" }, { "docid": "1515145f6397bd672d57e4006b440eeb", "score": "0.6373844", "text": "def get_size(objects: List[Any]) -> int:\n res = 0\n for o in objects:\n try:\n res += getsizeof(o)\n except AttributeError:\n print(\"IGNORING: type=%s; o=%s\" % (str(type(o)), str(o)))\n return res", "title": "" }, { "docid": "0b810b12511bf5750dac1ccd7f9846b6", "score": "0.6373387", "text": "def GetSize(self):", "title": "" }, { "docid": "876a9667e2ac7ae3ea0e363e9a8fc71f", "score": "0.6361742", "text": "def get_tree_size(path, log_func=None):\n total = 0\n for entry in os.scandir(path):\n if entry.is_dir(follow_symlinks=False):\n dir_stats = entry.stat()\n if fsutils.platf_is_win and dir_stats.st_file_attributes & stat.FILE_ATTRIBUTE_REPARSE_POINT:\n __local_log__('%s is a junction' % entry.name, log_func)\n pass\n else:\n total += get_tree_size(entry.path)\n elif entry.is_file(follow_symlinks=False):\n total += entry.stat().st_size\n else: # skip symbolic links\n __local_log__('skipping symoblic link %s' % entry.name, log_func)\n pass\n return total", "title": "" }, { "docid": "5b8af1c2457b5233873bca9aae6a1e18", "score": "0.63603663", "text": "def test_get_file_size():\n assert get_file_size('fixtures/e-sebern2.gif') == 1774\n assert get_file_size('fixtures/sebern1.jpg') == 37207", "title": "" } ]
8758fc68f07147cae8415af5151b8311
builds and visualizes networkx Graph for gene networks
[ { "docid": "3dd6b249120676b0e71204c060d8f639", "score": "0.7146621", "text": "def network_graph(net_dict=None):\n if net_dict == None:\n net_dict = {}\n else:\n G = nx.from_dict_of_lists(net_dict)\n plt.figure(num=None, figsize=(30, 30), dpi=80, facecolor='w', edgecolor='c')\n nx.draw_networkx(G, with_labels=True, alpha=0.5, edge_color='c', cmap=plt.cm.GnBu)\n # plt.savefig(\"metabolism_5years.png\", bbox_inches='tight')", "title": "" } ]
[ { "docid": "7dedcb61e283c29ac7d26adc5da42dbe", "score": "0.71572155", "text": "def draw_graph(self):\n pos = nx.kamada_kawai_layout(self.G)\n fig,ax = plt.subplots(figsize = (8,8))\n \n \n nx.draw_networkx_nodes(self.G,pos,ax=ax,\n nodelist = self.G.nodes(),\n node_color = [self.G.node[n]['color'] for n in self.G.nodes()]\n )\n nx.draw_networkx_edges(self.G,pos,width = 1.0)\n nx.draw_networkx_labels(self.G,pos,self.node2label,font_size=10)", "title": "" }, { "docid": "71bc77587877b8bfafe8fe1f366a8109", "score": "0.7009452", "text": "def show_graph(self):\n fig, ax = plt.subplots(1, 1, figsize=(10, 8))\n nx.draw_networkx(self.graph, ax=ax)", "title": "" }, { "docid": "450408cb5e483ca9831c4087cb0cedde", "score": "0.69891876", "text": "def GraphvizGraph(network, color, shape):\n G = graphviz.Digraph()\n \n #Add nodes to the G\n for i in range(network.nodenum):\n if(i in network.supplyseries):\n G.node(str(i + 1), color = color, fillcolor = color, shape = shape[0], style = \"filled\")\n if(i in network.transeries):\n G.node(str(i + 1), color = color, fillcolor = color, shape = shape[1], style = 'filled')\n if(i in network.demandseries):\n G.node(str(i + 1), color = color, fillcolor = color, shape = shape[2], style = 'filled')\n \n for i in range(network.nodenum):\n for j in range(i, network.nodenum):\n if(network.adjmatrix[i, j] == 1):\n G.edge(str(i + 1), str(j + 1))\n \n G.render(\"./result/\" + network.name, view = True)", "title": "" }, { "docid": "46fd852a4690a87079be72525b58285e", "score": "0.69300526", "text": "def plot_networkx_graph(G):\n\n plt.subplot(121)\n nx.draw(G, with_labels=True, font_weight='bold', node_color=range(len(list(G.nodes()))))\n plt.show()", "title": "" }, { "docid": "5f497ad0db2ef91c9db6638932c00d17", "score": "0.69082713", "text": "def plot_network(community_instance, save_name=''):\n df = community_instance.df\n\n df['l1'].max()\n df['l1'].min()\n mean = df['l1'].describe()[1]\n\n sample_graph = community_instance.sample_graph\n\n elarge = [(u, v) for (u, v, d) in sample_graph.edges(data=True) if d['weight'] > mean]\n esmall = [(u, v) for (u, v, d) in sample_graph.edges(data=True) if d['weight'] <= mean]\n pos = nx.spring_layout(sample_graph)\n\n # nodes\n nx.draw_networkx_nodes(sample_graph, pos, node_size=700)\n # edges\n nx.draw_networkx_edges(sample_graph, pos, edgelist=elarge, width=6)\n nx.draw_networkx_edges(sample_graph, pos, edgelist=esmall, width=6,\n alpha=0.5, edge_color='b', style='dashed')\n\n # labels\n nx.draw_networkx_labels(sample_graph, pos, font_size=20, font_family='sans-serif')\n\n plt.axis('off')\n # plt.show() # display\n if save_name == '':\n pass\n\n else:\n plt.savefig(save_name) # save as png", "title": "" }, { "docid": "8b40261cd82fd84ed7e4e4abfcc222df", "score": "0.6904904", "text": "def create_graph(self):", "title": "" }, { "docid": "b79d8c8852ca4f5751ca34e7ca6d29f7", "score": "0.6886077", "text": "def create_network_graph():\n\n network_graph_file.set(filedialog.asksaveasfilename(initialdir=\"/\",\n title=\"Select or Create file:\"))\n graph_network.make_utilization_graph_neat(model, network_graph_file.get(),\n display_plot=False)", "title": "" }, { "docid": "b08fc55ca45a31a60480af89b3960bfd", "score": "0.68053716", "text": "def make_graph():\n # Note: You can copy-paste this function into colab to visualize the graph.\n g = nx.MultiDiGraph()\n # First add the \"empty\" and \"activated\" nodes, which are always present.\n graph_utils.add_system_nodes(g)\n\n g.add_node(\"LowerX\", reaction=True)\n # Reactants:\n g.add_edge(\"ax\", \"LowerX\")\n g.add_edge(\"bx\", \"LowerX\")\n # Products:\n g.add_edge(\"LowerX\", \"c\")\n g.add_edge(\"LowerX\", \"by\")\n\n g.add_node(\"LowerY\", reaction=True)\n # Reactants:\n g.add_edge(\"ay\", \"LowerY\")\n g.add_edge(\"by\", \"LowerY\")\n # Products:\n g.add_edge(\"LowerY\", \"c\")\n g.add_edge(\"LowerY\", \"bx\")\n\n # Inhibit y with a product of the x-producing cycle.\n g.add_node(\"HigherX\", reaction=True)\n # Reactants:\n g.add_edge(\"c\", \"HigherX\")\n g.add_edge(\"dx\", \"HigherX\")\n # Products:\n g.add_edge(\"HigherX\", \"ay\")\n g.add_edge(\"HigherX\", \"dy\")\n\n g.add_node(\"HigherY\", reaction=True)\n g.add_edge(\"c\", \"HigherY\")\n g.add_edge(\"dy\", \"HigherY\")\n # Products:\n g.add_edge(\"HigherY\", \"ax\")\n g.add_edge(\"HigherY\", \"dx\")\n\n g.add_node(\"DestroyB\", reaction=True)\n g.add_edge(\"bx\", \"DestroyB\")\n g.add_edge(\"by\", \"DestroyB\")\n # Products:\n g.add_edge(\"DestroyB\", \"empty\")\n g.add_edge(\"DestroyB\", \"empty\")\n\n g.add_node(\"DestroyD\", reaction=True)\n g.add_edge(\"dx\", \"DestroyD\")\n g.add_edge(\"dy\", \"DestroyD\")\n # Products:\n g.add_edge(\"DestroyD\", \"empty\")\n g.add_edge(\"DestroyD\", \"empty\")\n\n # Properties of compounds\n # Color:\n g.nodes[\"ax\"][\"color\"] = (153, 204, 255) # blue 1\n g.nodes[\"bx\"][\"color\"] = (102, 154, 255) # blue 2\n g.nodes[\"dx\"][\"color\"] = (201, 15, 255) # blue 3 = purple\n\n g.nodes[\"ay\"][\"color\"] = (102, 255, 153) # green 1\n g.nodes[\"by\"][\"color\"] = (52, 255, 102) # green 2\n g.nodes[\"dy\"][\"color\"] = (0, 255, 0) # green 3\n\n g.nodes[\"c\"][\"color\"] = (255, 0, 0) # red\n\n # Reactivity:\n g.nodes[\"ax\"][\"reactivity\"] = \"low\"\n g.nodes[\"bx\"][\"reactivity\"] = \"high\"\n g.nodes[\"dx\"][\"reactivity\"] = \"high\"\n\n g.nodes[\"ay\"][\"reactivity\"] = \"low\"\n g.nodes[\"by\"][\"reactivity\"] = \"high\"\n g.nodes[\"dy\"][\"reactivity\"] = \"high\"\n\n g.nodes[\"c\"][\"reactivity\"] = \"medium\"\n\n g.nodes[\"empty\"][\"reactivity\"] = \"background\"\n\n # The following commented line documents how to set the query config for a\n # specific compound, overriding the default query configuration.\n # g.nodes[\"food1\"][\"query_config\"] = {\"radius\": 3, \"queryType\": \"diamond\"}\n\n return g", "title": "" }, { "docid": "9704c01019c831a4f7bfa7ac918af29f", "score": "0.6796611", "text": "def visualize_graph(g):\n pos = nx.spring_layout(g)\n nx.draw_networkx_labels(g, pos)\n nx.draw_networkx_nodes(g, pos, node_size=250)\n colors = [g[u][v][\"color\"] for u, v in g.edges()]\n nx.draw_networkx_edges(g, pos, arrows=True, edge_color=colors)\n plt.show()", "title": "" }, { "docid": "22f0ba756ef8dafe920d548782519afd", "score": "0.678975", "text": "def build_network(movie_title):\r\n #create the shells list for the shell_layout\r\n shells = []\r\n\r\n # create the networkx Graph recursively by web scraping IMDB\r\n G = scraper.scraper(movie_title, shells)\r\n\r\n print(shells) # logging\r\n\r\n # creat a dictionary of x,y positions by node\r\n pos = nx.shell_layout(G, nlist=shells, scale=0.5)\r\n # pos is a dictionary { nodeNumber : ([x, y]), ...}\r\n\r\n # initializing and styling the edges for plotting\r\n edge_trace = Scatter(\r\n x=[],\r\n y=[],\r\n line=Line(width=1),\r\n hoverinfo='none',\r\n mode='lines'\r\n )\r\n\r\n xvalues = []\r\n yvalues = []\r\n\r\n # getting the x, y information from each node and graphing the edges\r\n for edge in G.edges(): # [(node1, node2), (node3, node4),...]\r\n x0, y0 = pos[edge[0]] # pos = { nodeID : [x, y], nodeID : [x, y], ...}\r\n x1, y1 = pos[edge[1]]\r\n\r\n xvalues += [x0, x1]\r\n yvalues += [y0, y1]\r\n\r\n edge_trace['x'] += [x0, x1, None] # adding x and y to scatter plot\r\n edge_trace['y'] += [y0, y1, None]\r\n\r\n # initializing and styling the scatter plot which will map the nodes.\r\n node_trace = Scatter(\r\n x=[],\r\n y=[],\r\n text=[],\r\n hovertext=[],\r\n name='',\r\n textposition='bottom',\r\n textfont=dict(\r\n color='rgb(255, 255, 255)'\r\n ),\r\n mode='markers+text',\r\n marker=Marker(\r\n showscale=True,\r\n colorscale=[[0.0, 'rgb(221, 34, 49)'],\r\n [0.1111111111111111, 'rgb(218, 58, 30)'],\r\n [0.2222222222222222, 'rgb(215, 99, 26)'],\r\n [0.3333333333333333, 'rgb(212, 139, 22)'],\r\n [0.4444444444444444, 'rgb(209, 180, 19)'],\r\n [0.5555555555555556, 'rgb(189, 206, 15)'],\r\n [0.6666666666666666, 'rgb(142, 203, 12)'],\r\n [0.7777777777777778, 'rgb(94, 200, 9)'],\r\n [0.8888888888888888, 'rgb(2, 194, 6)'],\r\n [1.0, 'rgb(0, 245, 7)']],\r\n reversescale=False,\r\n colorbar=dict(\r\n title='IMDB Rating',\r\n titleside='top',\r\n ),\r\n color=[],\r\n size=[],\r\n sizeref=1,\r\n line=dict(width=2))\r\n )\r\n\r\n # function to crete annotated text on hover\r\n\r\n\r\n\r\n # setting marker information on scatter plot from the networkx Graph object\r\n for node in G.nodes():\r\n x, y = pos[node]\r\n node_trace['x'].append(x)\r\n node_trace['y'].append(y)\r\n\r\n node_trace['text'].append(G._node[node]['title'])\r\n\r\n node_trace['hovertext'].append(get_annotation(G._node[node]))\r\n\r\n node_trace['marker']['size'].append(len(G[node]))\r\n\r\n node_trace['marker']['color'].append(G._node[node]['rating'])\r\n\r\n\r\n # sizing the nodes\r\n node_trace['marker']['sizeref'] = max(node_trace['marker']['size']) / 50\r\n\r\n # initializing and styling the figure\r\n fig = Figure(\r\n data=[edge_trace, node_trace],\r\n layout=Layout(\r\n titlefont=dict(size=16),\r\n showlegend=False,\r\n plot_bgcolor='rgb(0,0,0)',\r\n paper_bgcolor='rgb(0,0,0)',\r\n hovermode='closest',\r\n margin={'l': 0, 'r': 0, 't': 10, 'b': 0, 'pad': 0},\r\n xaxis=XAxis(showgrid=False, zeroline=False, showticklabels=False),\r\n yaxis=YAxis(showgrid=False, zeroline=False, showticklabels=False)\r\n )\r\n )\r\n return fig", "title": "" }, { "docid": "06302600ac28d908e408d3497c4abce1", "score": "0.6780563", "text": "def createNXGraph( self, verbose = False ):\n try:\n if(verbose):\n print(\"creating NXGraph\")\n linds = len(self.inds)\n startTime = time.time()\n self.G = nx.Graph()\n\n for i in range(len(self.inds)):\n if( verbose and i % 10000 == 0):\n print(i)\n neighbors, results = self.getValidNeighborsInformation(self.inds[i],distances=True,figuresOfMerit=True)\n \n # Add edges from the current point (ind) to each of its valid neighbors\n edgeCosts = self.cf(*(results['figuresOfMerit'],results['distances'],0))\n \n edges = [(i,neighbors[j],{'weight':edgeCosts[j]}) for j in range(len(neighbors))]\n #edges = [(i,neighbors[j],edgeCosts[j]) for j in range(len(neighbors))]\n self.G.add_edges_from(edges)\n self.createGraphTime = time.time()-startTime\n if( verbose ):\n print(\"graph creation time: \",self.createGraphTime)\n except Exception as error:\n print(\"failed in createNXGraph()\", error)", "title": "" }, { "docid": "dcacb2bdcb97b4b53718cccacae372b3", "score": "0.67748564", "text": "def construct_network(self):\n self.network = igraph.Graph(n=self.num_p, directed=True)\n self.network.add_edges(self.genes.tolist())\n marked = ['Input {}'.format(i) for i in range(self.num_in)]\n marked += ['' for _ in range(self.num_p-self.num_in-self.num_out)]\n marked += ['Output {}'.format(i) for i in range(self.num_out)]\n self.network.vs['marked'] = marked", "title": "" }, { "docid": "0058bfb837df3fa328cc3ccdebb4c730", "score": "0.6726236", "text": "def visualize(self):\n nx.draw(self.graph, with_labels=True, font_weight=\"bold\")\n plt.show()", "title": "" }, { "docid": "66539cfef34f5c81b9c8fdbf9f75800b", "score": "0.67186356", "text": "def example_pic():\n\n def draw_graph(graph):\n \"\"\"Draws the graph using nx.draw_networkx_*.\"\"\"\n # positions for all nodes\n pos = nx.circular_layout(graph)\n\n # draw all of the things!\n nx.draw_networkx_nodes(graph, pos, nodelist=graph.nodes(),\n node_color='r', node_size=100)\n nx.draw_networkx_edges(graph, pos, width=1.0, alpha=0.5)\n\n # labels = {node: str(node) for node in graph.node.keys()}\n # nx.draw_networkx_labels(graph, pos, labels, font_size=16)\n\n # configure and config the image\n plt.figure(figsize=(5, 2.5))\n plt.axis('off')\n\n # create two graphs with the same number of edges\n num_nodes = 10\n\n # create a fully connected graph\n complete = nx.complete_graph(num_nodes)\n\n # all following drawings go into the first row, second column\n axes = plt.subplot(121)\n plt.setp(axes.get_xaxis(), visible=False)\n plt.setp(axes.get_yaxis(), visible=False)\n\n print(type(axes))\n print(dir(axes))\n\n draw_graph(complete)\n\n # create a sparse graph\n prob = 0.25 # probability of an edge between any two nodes\n sparse = nx.erdos_renyi_graph(num_nodes, prob)\n\n # all following drawings go into the first row, second column\n axes = plt.subplot(122)\n plt.setp(axes.get_xaxis(), visible=False)\n plt.setp(axes.get_yaxis(), visible=False)\n draw_graph(sparse)\n\n # place the file where it belongs\n path = os.path.join(os.environ['ERDOS_PATH'], \"content/images\", \"density.png\")\n plt.savefig(path)\n plt.show()", "title": "" }, { "docid": "e3e02f66a1068443db88fc22733947f5", "score": "0.668162", "text": "def draw_practice_graph(graph):\n nx.draw_networkx(graph)\n plt.show()", "title": "" }, { "docid": "67ef6a5cfeb42140e73111406e3921ca", "score": "0.6661195", "text": "def _build_graph(self):\n pass", "title": "" }, { "docid": "d29e19ad5a897285e4ed6be2fd2dd842", "score": "0.66500443", "text": "def draw_GO_node_structure_graph(self, go_id2gene_set, fig_output_fname, sharing_threshold=30,\\\n\t\tlabel_map=None, is_to_show=1):\n\t\tsys.stderr.write(\"Drawing GO node structure graph...\")\n\t\tg = nx.XGraph()\n\t\tgo_id_list = go_id2gene_set.keys()\n\t\tno_of_go_ids = len(go_id_list)\n\t\tfor i in range(no_of_go_ids):\n\t\t\tfor j in range(i+1, no_of_go_ids):\n\t\t\t\tgo_id1 = go_id_list[i]\n\t\t\t\tgo_id2 = go_id_list[j]\n\t\t\t\tno_of_sharing_genes = len(go_id2gene_set[go_id1]&go_id2gene_set[go_id2])\n\t\t\t\tif no_of_sharing_genes>=sharing_threshold:\n\t\t\t\t\tg.add_edge(go_id1, go_id2, no_of_sharing_genes)\n\t\tedge_width_list = []\n\t\tfor (u, v, d) in g.edges():\n\t\t\tedge_width_list.append(d/5)\n\t\tnode_size_list = []\n\t\tfor v in g:\n\t\t\tnode_size_list.append(len(go_id2gene_set[v])*4)\n\t\tpos=nx.graphviz_layout(g)\n\t\tpylab.figure(figsize=(20,20))\n\t\tnx.draw_networkx_edges(g,pos,\n\t\t\talpha=0.3,\n\t\t\twidth=edge_width_list,\n\t\t\tedge_color='m')\n\t\tnx.draw_networkx_nodes(g,pos,\n\t\t\tnode_size=node_size_list,\n\t\t\tnode_color='r',\n\t\t\talpha=0.4)\n\t\tnx.draw_networkx_edges(g,pos,\n\t\t\talpha=0.4,\n\t\t\tnode_size=0,\n\t\t\twidth=1,\n\t\t\tedge_color='k')\n\t\tabr_label_map = {}\n\t\tif label_map:\n\t\t\t#don't give a dictionary with keys more than the nodes, (it'll get error)\n\t\t\tfor key in g:\n\t\t\t\tif key in label_map:\n\t\t\t\t\tabr_label_map[key] = label_map[key]\n\t\tif abr_label_map:\n\t\t\tnx.draw_networkx_labels(g, pos, abr_label_map, fontsize=8)\n\t\telse:\n\t\t\tnx.draw_networkx_labels(g, pos, fontsize=8)\n\t\t#give a title\n\t\tpylab.title(os.path.basename(fig_output_fname))\n\t\t#write the legend\n\t\txmin, xmax, ymin, ymax = pylab.axis()\n\t\tdx = xmax - xmin\n\t\tdy = ymax - ymin\n\t\tx = 0.02*dx + xmin\n\t\ty = 0.95*dy + ymin\n\t\tpylab.text(x, y, \"edge cutoff: %s\"%sharing_threshold)\n\t\tpylab.savefig(fig_output_fname)\n\t\tif is_to_show:\n\t\t\tpylab.show()\n\t\tsys.stderr.write(\"Done.\\n\")", "title": "" }, { "docid": "873bf480fe015218641eef9c437a6bd1", "score": "0.664632", "text": "def __init__(self,**kwargs):\n self.net_params = {}\n for k,v in kwargs.items():\n self.net_params[k] = v \n #\n # node color\n self.node_color = {\"bp\":\"red\",\"bp;cholesterol\":\"green\",\"bp;cholesterol;diabetes\":\"blue\", \"bp;diabetes\":\"purple\"}\n\n self.el = pd.read_csv(self.net_params['edgelist_file'],sep = ',')\n self.el.columns = ['origin','target']\n self.edgelist = [(self.el.ix[i,:]['origin'],self.el.ix[i,:]['target']) for i in self.el.index]\n #\n #\n self.nodes = pd.read_csv(self.net_params['protein_nodes_file']).set_index('uniprot_accession').to_dict()\n #\n self.node2color ={k:self.node_color[v] for k, v in self.nodes['indications'].items()}\n self.node2label = self.nodes['uniprot_id']\n #\n # Build the Graph\n #\n self.G = nx.Graph()\n self.G.add_nodes_from([n for n in self.node2color.keys()])\n for n in self.G.nodes:\n self.G.node[n]['color'] = self.node2color[n]\n self.G.node[n]['label'] = self.node2label[n]\n self.G.nodes[n]['indications'] = self.nodes['indications'][n]\n self.G.add_edges_from(self.edgelist)", "title": "" }, { "docid": "27f3d470eb234f328acae823e3f17f44", "score": "0.6638491", "text": "def graph(self):\n # ---------------------------------------------------------\n\n N = self.G.nodes()\n d = defaultdict(list)\n E = self.G.number_of_edges()\n\n print\n \"The number of Nodes in this Network:\", N\n\n print\n \"The number of Edges in this Network:\", E\n\n fig = plt.figure()\n fig.canvas.set_window_title(\"The ERnet Topology View\")\n nx.draw_networkx(self.G)\n plt.show()\n\n g = ig.Graph(len(self.G), zip(*zip(*nx.to_edgelist(self.G))[:2]))\n self.pt(g)\n cl = g.community_fastgreedy()\n\n # print cl\n membership = cl.as_clustering().membership\n\n print\n membership\n\n self.pt(g, membership)\n\n # print g.get_all_shortest_paths (2, 33)\n\n membership.pop(0)\n for q, a in zip(N, membership):\n print\n 'The Node {0} --> Belongs to cluster {1}.'.format(q, a)\n\n # The following procedure is to get the exact nodes of each cluster\n for i in range(max(membership)):\n i += 1\n for j in range(len(N)):\n if membership[j] == i:\n d[i].append(N[j])\n\n print\n d.items()\n\n # Test the subgraphs correctness, which is the clusters\n fig = plt.figure()\n fig.canvas.set_window_title(\"Sub-Graph/Clique 1 of ERnet\")\n G3 = self.G.subgraph(d[1]) # each index in dictionary \"d\" is considered as a one cluster/subgraph of G\n nx.draw_networkx(G3)\n plt.show()\n # ---------------------------------------------------------", "title": "" }, { "docid": "aefe03fa3c476ba29e59fa30f7be80a4", "score": "0.66126156", "text": "def _buildGraph(self):\n self.G.add_nodes_from([bus for bus in self.buses])\n self.G.add_edges_from([(b.from_bus,b.to_bus) for b in self.branches])", "title": "" }, { "docid": "a3c8cf63faef370a171ee89274a3e4d2", "score": "0.65929824", "text": "def drawGraph(space):\n pos = nx.spring_layout(space)\n nx.draw_networkx_nodes(space,pos)\n eone = [(u,v) for (u,v,d) in space.edges(data=True) if d['fuel']==1]\n etwo = [(u,v) for (u,v,d) in space.edges(data=True) if d['fuel']==2]\n ethree = [(u,v) for (u,v,d) in space.edges(data=True) if d['fuel']==3]\n efour = [(u,v) for (u,v,d) in space.edges(data=True) if d['fuel']==4]\n nx.draw_networkx_edges(space,pos,edgelist=eone,width=2,edge_color='green')\n nx.draw_networkx_edges(space,pos,edgelist=etwo,width=3,edge_color='blue')\n nx.draw_networkx_edges(space,pos,edgelist=ethree,width=4,edge_color='orange')\n nx.draw_networkx_edges(space,pos,edgelist=efour,width=5,edge_color='red')\n nx.draw_networkx_labels(space,pos)\n plt.axis(\"off\")\n\n plt.show()", "title": "" }, { "docid": "327a49771e119739f9438bdbe7ae23b2", "score": "0.6573652", "text": "def show_network(self):\n NetworkDrawer.draw_network(self.graph)", "title": "" }, { "docid": "035ec2eb9d0820d1e82839ed52372c84", "score": "0.6567563", "text": "def gvgraph(self, pngfile=None):\n G = nx.DiGraph(self.graph)\n if pngfile is not None:\n dotfile = pngfile + \".dot\"\n write_dot(G, dotfile)\n cmd = (f'dot -Tpng -o{pngfile} {dotfile} ')\n with open(pngfile,'w') as f:\n subprocess.check_output(cmd, shell=True)\n return G", "title": "" }, { "docid": "bbcd575590657428958cb1472f977aa5", "score": "0.6542112", "text": "def print_graph(self):\n\n nx.draw(self.G)\n plt.show()", "title": "" }, { "docid": "082c831fbf955d8a7e6fbff1465fdbdb", "score": "0.65305865", "text": "def plot(self, **kwargs):\n\n # Get positions for all nodes\n pos = kwargs.get(\"pos\", None)\n if pos is None:\n print(\"You did not provide a neuron position dictionary. The spring layout function will be used to plot the network\", file=sys.stderr)\n pos = nx.spring_layout(self.network, weight=\"weight\");\n\n # Size of the plot\n figsize = kwargs.get(\"figsize\", (30, 30))\n plt.figure(figsize=figsize)\n\n # Nodes\n node_size = kwargs.get(\"node_size\", 600)\n node_colors = kwargs.get(\"node_colors\", self.neurons)\n a = kwargs.get('a',.5)\n disp_node_labels=kwargs.get('label_nodes',True)\n nx.draw_networkx_nodes(self.network, pos, alpha=a, node_size=node_size, cmap=plt.cm.Dark2, node_color=node_colors)\n\n _, weights = zip(*nx.get_edge_attributes(self.network, \"weight\").items())\n\n # Draw edges\n edgeweight=kwargs.get('edgeweight',2)\n edge_a = kwargs.get('edge_a',1)\n edge_color = kwargs.get('edgecolor',weights)\n edge_cmap = kwargs.get('edge_cmap',plt.cm.coolwarm)\n edgelist = kwargs.get('edgelist',self.network.edges)\n if kwargs.get(\"draw_edges\", True):\n nx.draw_networkx_edges(self.network, pos, alpha=edge_a, width=edgeweight,edge_color=edge_color,edge_cmap=edge_cmap,edgelist=edgelist)\n sm = plt.cm.ScalarMappable(cmap=edge_cmap)\n sm._A = []\n #cbar = plt.colorbar(sm, shrink=0.2, aspect=10, anchor = (0,10))\n \n \n # Labels\n \n label_edges=kwargs.get(\"label_edges\",False)\n if label_edges:\n font_size = kwargs.get(\"font_size\", 10)\n nx.draw_networkx_labels(self.network, pos, font_size=font_size)\n\n if disp_node_labels:\n font_size = kwargs.get(\"font_size\", 10)\n nx.draw_networkx_labels(self.network, pos, font_size=font_size)\n \n title = kwargs.get(\"title\", None)\n plt.title(title)\n plt.axis(\"off\")\n\n save_to_file = kwargs.get(\"save\", False)\n \n if save_to_file:\n file_name=kwargs.get(\"file_name\",\"Graph\")\n plt.savefig(file_name, dpi=300)\n\n plt.show()\n return pos", "title": "" }, { "docid": "f859810fcb95ea7d0b944dded8741b1f", "score": "0.65141207", "text": "def visualize_graph(data_instance_number, dataset, dataset_name, graph_type):\n graph_list = list()\n graph_list.append(create_instance_graph(dataset[data_instance_number]))\n if graph_type not in [\"full\", \"no\"]:\n graph_list.append(create_instance_graph(dataset[data_instance_number], graph_type))\n\n for num, graph in enumerate(graph_list):\n print(f\"The graph number {num} had {graph.number_of_edges()} edges.\")\n plt.figure(3, figsize=(8, 8))\n nx.draw(graph, nx.draw_networkx(graph), with_labels=True)\n plt.savefig(f\"{directories['graph visualizations']}/{dataset_name}_{data_instance_number}_{num}.png\")\n quit()", "title": "" }, { "docid": "22bddf36600943212c8ede41ee403ee4", "score": "0.65114856", "text": "def visual(self):\n G = nx.Graph()\n G.add_nodes_from(self.sentences)\n graph = self.text_rank.rank_m\n for i in range(graph.shape[0]):\n for j in range(graph.shape[1]):\n G.add_edges_from((i, j, graph[i][j]))\n # print(G.nodes)\n # print(G.edges)\n\n pass", "title": "" }, { "docid": "ff46628e72c295feba621b84f9360299", "score": "0.64687824", "text": "def plot_graph(self) -> None:\n plt.figure(figsize=(18,14))\n # Get the graph file\n graph_file = self._config.get_param(self.PAR_GRAPH)\n # Create the networkx graph object\n graph = nx.read_graphml(graph_file)\n # Get a layout\n pos = nx.spring_layout(graph, k=0.7, seed=5)\n # Draw the network\n nx.draw_networkx(graph, pos=pos, arrows=True,\n with_labels=True,\n node_size=450, node_color=\"#ffffff\",\n edgecolors=\"#000000\", font_size=10)\n # Add edge labels\n edge_labels = nx.get_edge_attributes(graph,'policy')\n nx.draw_networkx_edge_labels(graph, pos, edge_labels = edge_labels)\n # Moidfy the output file name\n name = '/'.join(self._config.get_output_file().split('/')[:-1]) + \"/graph.pdf\"\n # Save only if it is not present\n if not os.path.isfile(name):\n plt.savefig(name)\n plt.close()", "title": "" }, { "docid": "a0c556221f312fdb9dbf2c8319cf9c7c", "score": "0.64672756", "text": "def build_nx_graph():\n\n graph = nx.Graph(name='test')\n\n links = 0\n\n s1 = DumbSwitch('s1')\n graph.add_node('s1', attr_dict=s1.get_params())\n\n plc1 = PLC('plc1', '192.168.1.10')\n graph.add_node('plc1', attr_dict=plc1.get_params())\n\n link = EthLink(label=links, bandwidth=30, delay=0, loss=0)\n graph.add_edge('plc1', 's1', attr_dict=link.get_params())\n links += 1\n\n plc2 = PLC('plc2', '192.168.1.20')\n graph.add_node('plc2', attr_dict=plc2.get_params())\n\n link = EthLink(label=links, bandwidth=30, delay=0, loss=0)\n graph.add_edge('plc2', 's1', attr_dict=link.get_params())\n links += 1\n\n return graph", "title": "" }, { "docid": "2c697b4d152157df6db7863d2347e238", "score": "0.6460561", "text": "def visualize_graph(edges_lst):\n G = nx.Graph()\n for edge in edges_lst:\n start = edge[0]\n end = edge[1]\n weight = edge[2]\n G.add_edge(start, end, weight=weight)\n pos = nx.planar_layout(G)\n nx.draw_networkx(G, pos)\n labels = nx.get_edge_attributes(G, 'weight')\n nx.draw_networkx_edge_labels(G, pos, edge_labels=labels)\n plt.show()", "title": "" }, { "docid": "9ee1f2e6db90f59a2a7f4c83cca7dc34", "score": "0.6456277", "text": "def example_graph():\n g = nx.Graph()\n g.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'C'), ('B', 'D'), ('D', 'E'), ('D', 'F'), ('D', 'G'), ('E', 'F'), ('G', 'F')])\n return g", "title": "" }, { "docid": "04d328578bf9a4549eb850161c593bbd", "score": "0.6453227", "text": "def draw_network(self, out_dir='.'):\n if not self.graph is None:\n fig = plt.figure(figsize=(14, 8))\n nx.draw(self.graph, with_labels=True)\n fig.savefig(os.path.join(out_dir, 'network.png'))", "title": "" }, { "docid": "da1c67afeac674af04cc039df0c60dc3", "score": "0.64222246", "text": "def __update_graph(self):\n plt.cla()\n nodes = self.g.nodes(data=True)\n nx.draw_networkx(self.g, \n pos={node[0]: node[1]['pos'] for node in nodes},\n with_labels=True,\n node_color = [Simulation.__color(node) for node in nodes]\n )\n plt.draw()", "title": "" }, { "docid": "0d40833ed419f1f79ee2050e5db04268", "score": "0.63869363", "text": "def _draw_network(self):\n plt.clf()\n\n if self.num_populations == 1:\n node_sizes = 5000\n node_border_width = 1.\n else:\n node_sizes = 15000\n node_border_width = 3.\n\n vmin, vmax = 0, np.max(self.pi) + 0.1\n\n nx.draw_networkx_nodes(\n self.g,\n self.pos,\n node_size=node_sizes,\n node_color=self.node_colors,\n edgecolors=\"k\",\n cmap=plt.cm.Blues,\n vmin=vmin,\n vmax=vmax,\n linewidths=node_border_width)\n\n nx.draw_networkx_edges(\n self.g,\n self.pos,\n node_size=node_sizes,\n arrowstyle=\"->\",\n arrowsize=10,\n edge_color=self.edge_colors,\n edge_cmap=plt.cm.Blues,\n width=5)\n\n nx.draw_networkx_edge_labels(self.g, self.pos, edge_labels=self.edge_labels)\n\n if self.num_populations > 1:\n subnode_separation = 0.1\n subgraph = nx.Graph()\n for i_population in range(self.num_populations):\n subgraph.add_node(i_population)\n\n for i_strat_profile in self.g:\n x, y = self.pos[i_strat_profile]\n if self.num_populations == 1:\n node_text = \"$\\\\pi_{\" + self.state_labels[i_strat_profile] + \"}=$\"\n node_text += str(np.round(self.pi[i_strat_profile], decimals=2))\n else:\n node_text = \"\" # No text for multi-population case as plot gets messy\n txt = plt.text(\n x,\n y,\n node_text,\n horizontalalignment=\"center\",\n verticalalignment=\"center\",\n fontsize=12)\n txt.set_path_effects(\n [PathEffects.withStroke(linewidth=3, foreground=\"w\")])\n\n if self.num_populations > 1:\n sub_pos = nx.circular_layout(subgraph)\n subnode_labels = dict()\n strat_profile = utils.get_strat_profile_from_id(\n self.num_strats_per_population, i_strat_profile)\n for i_population in subgraph.nodes():\n i_strat = strat_profile[i_population]\n subnode_labels[i_population] = \"$s^{\" + str(i_population + 1) + \"}=\"\n subnode_labels[i_population] += (\n self.state_labels[i_population][i_strat] + \"$\")\n # Adjust the node positions generated by NetworkX's circular_layout(),\n # such that the node for the 1st strategy starts on the left.\n sub_pos[i_population] = (-sub_pos[i_population] * subnode_separation +\n self.pos[i_strat_profile])\n nx.draw(\n subgraph,\n pos=sub_pos,\n with_labels=True,\n width=0.,\n node_color=\"w\",\n labels=subnode_labels,\n node_size=2500)", "title": "" }, { "docid": "e8fa9da1f3cc0d702b4fc2222e176e22", "score": "0.6378314", "text": "def create_graph(network_data):\n return df_to_networkx(\n data=network_data,\n source_col=\"SrcIP\",\n target_col=\"AllExtIPs\",\n source_attrs=[\"TenantId\", \"VMName\"],\n target_attrs=[\"DestPort\", \"ResourceGroup\"],\n edge_attrs=[\"FlowType\", \"TotalAllowedFlows\"],\n )", "title": "" }, { "docid": "560350de582b78f19c25e2460e0db08b", "score": "0.6370103", "text": "def draw_rj(graph):\n nx.draw_networkx(graph)\n plt.savefig(\"romeo-and-juliet.pdf\")\n plt.show()", "title": "" }, { "docid": "0e331f5719efde297d7e3eaf84dc002b", "score": "0.63489974", "text": "def plot_Graph_svg(models, records, out_file, key_topology=\"graph\", **algopt):\n graph = models[key_topology]\n if \"coord\" not in graph:\n raise ValueError(\"Nodes coordinates ('coord') were not initialized.\")\n\n if graph[\"dimns\"] >= 3:\n print(\"#!# Crack Error: cannot plot Graphs of more than 2D #!#\")\n return\n if out_file[-4:] != \".svg\":\n out_file += \".svg\"\n\n ### Set default options ###\n _set_default_options(algopt)\n _adapt_coords(models, key_topology, algopt)\n ### Plot ###\n draw = svgw.Drawing(out_file, size=algopt[\"image_size\"])\n nbr_n = graph[\"nbr_n\"]\n nodes = graph[\"nodes\"]\n edges = graph[\"edges\"]\n coord = graph[\"coord\"]\n # Compute node and edge attributes (if needed)\n colors, circles = _compute_node_colors (models, algopt, graph[\"nbr_n\"])\n bars = _compute_node_bars (models, algopt)\n numbers = _compute_node_numbers(models, algopt)\n strokes = _compute_edge_strokes(models, algopt, graph[\"nbr_e\"])\n ### Plot ###\n # Plot the graph\n for i, pi in enumerate(coord):\n # Draw the edges until its neighbors (need to do it first or\n # the edges would traverse the circles)\n for j, e in zip(nodes[i][0], nodes[i][1]):\n if j > i: # Center not ploted yet: draw the edge before.\n draw.add(draw.line(start=pi, end=coord[j],\n stroke=\"black\", stroke_width=strokes[e]))\n # Draw the node\n draw.add(draw.circle(pi, r=algopt[\"node_radius\"],\n fill=colors[i], stroke=\"black\"))\n # Node attributes\n if circles is not None:\n _add_elem_circle(draw, pi, circles[i], algopt)\n if bars is not None:\n _add_elem_bar (draw, pi, bars[i] , algopt)\n if numbers is not None:\n _add_elem_number(draw, pi, numbers[i], algopt)\n # Plot the border\n # TODO\n\n ### End of drawing ###\n draw.save()", "title": "" }, { "docid": "2588b28328cbe27440642268c9ba675d", "score": "0.63020235", "text": "def render_graph(self, output_file):\n global X\n for k,v in G.items():\n a, b = k.split(\"\\t\")\n X.add_weighted_edges_from([(a, b, v)])\n nx.write_dot(X, output_file)", "title": "" }, { "docid": "68c1ec3f6ea6ab1ee48a85b4b5dd19b5", "score": "0.62904584", "text": "def exportToGML(self):\r\n \r\n try:\r\n if self.G:\r\n print(\"G exists\")\r\n nx.write_gml(self.G, \"graphG.gml\")\r\n except:\r\n print(\"G does not exist\")\r\n \r\n try:\r\n if self.EBG:\r\n print(\"EBG exists\")\r\n nx.write_gml(self.EBG, \"graphEBG.gml\")\r\n except:\r\n print(\"EBG does not exist\")\r\n \r\n try:\r\n if self.EBGG:\r\n print(\"EBGG exists\")\r\n nx.write_gml(self.EBGG, \"graphEBGG.gml\")\r\n except:\r\n print(\"EBGG does not exist\")\r\n \r\n try:\r\n if self.F:\r\n print(\"F exists\")\r\n nx.write_gml(self.F, \"graphF.gml\")\r\n except:\r\n print(\"F does not exist\")\r\n \r\n try:\r\n if self.P:\r\n print(\"P exists\")\r\n nx.write_gml(self.P, \"graphP.gml\")\r\n except:\r\n print(\"P does not exist\")\r\n \r\n try:\r\n if self.G1:\r\n print(\"G1 exists\")\r\n nx.write_gml(self.G1, \"graphG1.gml\")\r\n except:\r\n print(\"G1 does not exist\")\r\n \r\n try:\r\n if self.G2:\r\n print(\"G2 exists\")\r\n nx.write_gml(self.G2, \"graphG2.gml\")\r\n except:\r\n print(\"G2 does not exist\") \r\n \r\n try:\r\n if self.GGF:\r\n print(\"GGF exists\")\r\n nx.write_gml(self.GGF, \"graphGGF.gml\")\r\n except:\r\n print(\"GGF does not exist\")\r\n \r\n try:\r\n if self.GGB:\r\n print(\"GGB exists\")\r\n nx.write_gml(self.GGB, \"graphGGB.gml\")\r\n except:\r\n print(\"GGB does not exist\")\r\n \r\n try:\r\n if self.LGB:\r\n print(\"LGB exists\")\r\n nx.write_gml(self.LGB, \"graphLGB.gml\")\r\n except:\r\n print(\"LBG does not exist\")\r\n \r\n try:\r\n if self.LGF:\r\n print(\"LGF exists\")\r\n nx.write_gml(self.LGF, \"graphLGF.gml\")\r\n except:\r\n print(\"LGF does not exist\")\r\n \r\n try:\r\n if self.NFG:\r\n print(\"NFG exists\")\r\n nx.write_gml(self.NFG, \"graphNFG.gml\")\r\n except:\r\n print(\"NFG does not exist\")\r\n \r\n try:\r\n if self.NBG:\r\n print(\"NBG exists\")\r\n nx.write_gml(self.NBG, \"graphNBG.gml\")\r\n except:\r\n print(\"NBG does not exist\")", "title": "" }, { "docid": "3b387a1a28aa21ed53572bc6a018c3ae", "score": "0.6284512", "text": "def build_graph(self):\n raise NotImplementedError", "title": "" }, { "docid": "fca08f04fcd55f4721e2ec4c49fbd29a", "score": "0.6283242", "text": "def main():\n hyper = read_hypergraph('data/email-Enron/')\n graph = read_graph('data/email-Enron/')\n # hyper = read_hypergraph('data/human-genome/')\n # graph = read_graph('data/human-genome/')\n edge_fill = hyper.fill_coefficient()\n node_clus = hyper.clustering()\n # edges = [e for e in hyper.edges if 5 <= len(e) <= 8]\n # edges = [e for e in hyper.edges if 2 <= len(e) <= 4]\n edges = [e for e in hyper.edges if 2 <= len(e) <= 8]\n\n hyper_clus = {e: np.mean([node_clus[n] for n in e]) for e in edges}\n graph_clus = nx.clustering(graph)\n graph_clus = {e: np.mean([graph_clus[n] for n in e]) for e in edges}\n\n xx = [graph_clus[e] for e in edges]\n yy = [hyper_clus[e] for e in edges]\n color = [np.log(edge_fill[e]) for e in edges]\n size = [len(e)**(2.5) for e in edges]\n plt.figure(figsize=(16, 9))\n plt.scatter(xx, yy, s=size, cmap='viridis', c=color, alpha=1)\n plt.colorbar(label=r'$\\log$ fill coefficient')\n plt.title('2-to-8-hyperedges')\n plt.xlabel('mean node clustering (in the graph)')\n plt.ylabel('mean node clustering (in the hypergraph)')\n # plt.savefig('pics/email-enron-28-medium.pdf', dpi=600)\n plt.show()", "title": "" }, { "docid": "60c33a8f3390d9b6ce80dd2075781fc1", "score": "0.62783533", "text": "def plot_graph(\n self,\n save_file: bool = False,\n output_filename: str = f\"test.jpg\",\n ):\n labels = {}\n for node in list(self.graph.nodes):\n labels[node] = node.node_name\n nx.draw(self.graph, labels=labels, with_labels=True)\n if not save_file:\n plt.show()\n else:\n plt.savefig(output_filename)", "title": "" }, { "docid": "3669158e2a7e23f23657eb9e48b5c3a9", "score": "0.62777317", "text": "def Visualise(self, location=\"home\"):\n if self.graph is None:\n return \"There is no graph consider adding edges to visualise\"\n plt.tight_layout()\n nx.draw_networkx(self.graph, arrows=True, node_size=800)\n plt.savefig(location, format=\"PNG\")\n plt.clf()\n return \"Graph generated\"", "title": "" }, { "docid": "c07224766fb4a3f4c1a0da07603d19d9", "score": "0.62776786", "text": "def plot_trees(relationships):\n edges = []\n edge_labels = {}\n for rel in relationships:\n edges.append((rel[0],rel[1]))\n edge_labels[(rel[0],rel[1])] = rel[2]\n G=nx.DiGraph()\n G.add_edges_from(edges)\n pos = nx.shell_layout(G)\n nx.draw(G,pos,edge_color='black',width=1,linewidths=1,node_size=1000,node_color='gray',\n alpha=0.95,labels={node:node for node in G.nodes()}, font_size=8)\n nx.draw_networkx_edge_labels(G,pos,edge_labels=edge_labels,font_color='red')\n plt.title(\"Relationship Trees\")\n plt.show()", "title": "" }, { "docid": "f7039b4142bdf2b1b973cd9a828c03e7", "score": "0.6277546", "text": "def graph_nx(self, figsize=(15, 10), node_size=3):\n for u, v in self.TG.edges(): # reset previously red to orange\n if self.TG[u][v]['color'] == red:\n self.TG[u][v]['color'] = orange\n for u, v in self.TG.nodes.items():\n if self.TG.nodes[u]['color'] == red:\n self.TG.nodes[u]['color'] = orange\n\n for scr_id, scrambler in self.scramblers.items():\n for ior in ['in', 'out']:\n for ch, onoff in scrambler.status[ior].items():\n if onoff == 1:\n first_live_node = f\"{scr_id}-{iomap[ior]}-{ch}\"\n if self.TG.nodes[first_live_node]['color'] == grey:\n self.TG.nodes[first_live_node]['color'] = red\n cypher_node = f\"{scr_id}-{invsoutmap[ior]}-{scrambler.full_scramble(ch)}\"\n # if statement below will add the intra_scrambler edges (letter in --> cypher out)\n # stepwise as each node gets lit up\n if (first_live_node, cypher_node) not in self.TG.edges():\n self.TG.add_edge(first_live_node, cypher_node, color=red)\n\n for bid, inout in scrambler.conxns[ior].items():\n second_live_node = f\"{bid}-{iomap[inout]}-{ch}\"\n try:\n if self.TG.edges[(first_live_node, second_live_node)]['color'] == grey:\n self.TG.edges[(first_live_node, second_live_node)]['color'] = red\n except IndexError:\n if self.TG.edges[(second_live_node, first_live_node)]['color'] == grey:\n self.TG.edges[(second_live_node, first_live_node)]['color'] = red\n\n for ch, onoff in self.register['status'].items():\n if onoff == 1:\n first_live_node = f\"REG-X-{ch}\"\n for bid, inout in self.register['conxns'].items():\n second_live_node = f\"{bid}-{iomap[inout]}-{ch}\"\n try:\n if self.TG.edges[(first_live_node, second_live_node)]['color'] == grey:\n self.TG.edges[(first_live_node, second_live_node)]['color'] = red\n except IndexError:\n if self.TG.edges[(second_live_node, first_live_node)]['color'] == grey:\n self.TG.edges[(second_live_node, first_live_node)]['color'] = red\n\n self.edge_colours = [self.TG[u][v]['color'] for u, v in self.TG.edges()]\n self.node_colours = [v['color'] for u, v in self.TG.nodes.items()]\n fig, ax = plt.subplots(figsize=figsize)\n nx.draw_networkx_nodes(self.BG, pos=self.base_pos_for_nx)\n nx.draw_networkx_labels(self.BG, pos=self.base_pos_for_nx)\n nx.draw_networkx_edges(self.TG, pos=self.manual_pos, edge_color=self.edge_colours)\n if node_size > 0:\n nx.draw_networkx_nodes(self.TG, pos=self.manual_pos, node_size=node_size, node_color=self.node_colours)", "title": "" }, { "docid": "13f997d3dd2e91334030c31949b93e1e", "score": "0.62758106", "text": "def build_graph(\n nodes: List[Tuple[str, Dict]], edges: List[Tuple[str, str, Dict]]\n) -> nx.DiGraph:\n graph = nx.DiGraph()\n graph.add_nodes_from(nodes)\n graph.add_edges_from(edges)\n return graph", "title": "" }, { "docid": "12bb4b1fb1f6331b383ca6364e12f1ae", "score": "0.62723136", "text": "def displayConnectivityAndLocalGains(self, connectionmatrix, localgainmatrix, variablenames, nodepositiondictionary=None):\r\n \r\n [n, n] = localgainmatrix.shape \r\n self.G = nx.DiGraph() #this is convenient\r\n localgaindict = dict()\r\n localgaindictformat = dict()\r\n for u in range(n):\r\n for v in range(n):\r\n if (connectionmatrix[u, v] == 1):\r\n self.G.add_edge(variablenames[v], variablenames[u], localgain=round(localgainmatrix[u, v]))\r\n localgaindict[(variablenames[v], variablenames[u])] = localgainmatrix[u, v]\r\n localgaindictformat[(variablenames[v], variablenames[u])] = round(localgainmatrix[u, v], 3)\r\n \r\n posdict = nodepositiondictionary \r\n \r\n if posdict == None:\r\n posdict = nx.circular_layout(self.G)\r\n \r\n plt.figure(\"Web of connectivity and local gains\")\r\n nx.draw_networkx(self.G, pos=posdict)\r\n nx.draw_networkx_edge_labels(self.G, pos=posdict, edge_labels=localgaindictformat, label_pos=0.7)\r\n nx.draw_networkx_edges(self.G, pos=posdict, width=2.5, edge_color='k', style='solid', alpha=0.15)\r\n nx.draw_networkx_nodes(self.G, pos=posdict, node_color='y', node_size=450)\r\n plt.axis(\"off\")", "title": "" }, { "docid": "5b459c5c199b18fc829c52d546fb4235", "score": "0.6249718", "text": "def render_network_graph(self, ax=None, show=False):\n if self._renderer is None:\n self._renderer = Viewer(self.network)\n state = self.current_state\n self._renderer.render_graph(state, ax, show)", "title": "" }, { "docid": "ea1ee78b7865375130d5d36ff13393c6", "score": "0.6248063", "text": "def draw(self, G = None):\n if not G: G = self.G\n\n\n pos = nx.shell_layout(G)\n nx.draw_networkx_nodes(G, pos)\n nx.draw_networkx_labels(G, pos)\n nx.draw_networkx_edges(G, pos)\n plt.show()", "title": "" }, { "docid": "84ae7850ceb1a0b6f620fbecf824b50f", "score": "0.62463725", "text": "def visualize(inputs, outputs, connections):\n DG = nx.DiGraph()\n\n # Add input and output nodes\n for n in (inputs + outputs):\n DG.add_node(n)\n \n # Add edges (corresponding nodes are automatically added)\n # and get hidden layer nodes\n hiddens = []\n\n for c in connections:\n DG.add_edge(c.get_in_node(), c.get_out_node())\n\n if c.get_in_node() not in inputs + hiddens + outputs:\n hiddens.append(c.get_in_node())\n\n if c.get_in_node() not in inputs + hiddens + outputs:\n hiddens.append(c.get_in_node())\n \n # Match the colors (green for inputs, red for outputs, the rest is blue)\n colors = (['g'] * len(inputs)) + (['r'] * len(outputs)) + (['b'] * len(hiddens))\n \n nx.draw(DG,\n node_color = colors,\n with_labels = True,\n font_weight = 'bold'\n )\n\n plt.show()\n #plt.savefig(\"hi.png\") is necessary?\n\n return", "title": "" }, { "docid": "5e1eb6300a92bc8059bb2c2fb49bdc60", "score": "0.6244956", "text": "def create_neuron_connections_graph(datastore):\n\tG = nx.DiGraph()\n\n\tsheet_nodes_start_number = {}\n\tn = 0\n\n\tfor sheet in datastore.sheets():\n\t\tsheet_nodes_start_number[sheet] = n\n\t\tn = add_sheet_nodes_to_graph(G,datastore,n,sheet)\n\n\tconnections = datastore.get_analysis_result(identifier='Connections')\n\n\tfor conn in connections:\n\t\tc_weights = conn.weights\n\t\tc_delays = conn.delays\n\t\tadd_connection_edges(G,\n\t\t\t\t\t\t\tsheet_nodes_start_number[conn.source_name],\n\t\t\t\t\t\t\tsheet_nodes_start_number[conn.target_name],\n\t\t\t\t\t\t\tc_weights, c_delays)\n\n\treturn G", "title": "" }, { "docid": "d7420b6040a1581d862c8d486d9c39e1", "score": "0.62432015", "text": "def graphviz_analyze(self):\n self.get_alphabeta_move(save_nodes=True)\n # print \" nodes = {}\".format(len(self.nodes)) \n # return\n # self.nodes.sort(key=lambda x: (x['depth'], x['node']))\n ranks = [''] * self.search_depth\n print \"digraph chomp {\"\n print ' graph [fontname=\"Anonymous Pro\"]';\n for n in self.nodes:\n print '{} [label=<{}> shape=none];'.format(hash(n['node']), n['label'])\n if ranks[n['depth']] == '':\n ranks[n['depth']] = '{rank=same'\n ranks[n['depth']] += ' ' + str(hash(n['node']))\n for r in ranks:\n if r != '':\n print r + '};'\n for n in self.nodes:\n if n['depth'] != 0:\n print \"{} -> {};\".format(hash(n['parent']), hash(n['node']))\n print \"}\"", "title": "" }, { "docid": "af24141377aca46422f88b6d9e0c8efc", "score": "0.6241984", "text": "def build(network_list):\n\n # print of starting the process\n print('\\nThe function \"build()\" is running, please keep calm and have some coffee...')\n\n # build the graph\n graph = dict()\n for df in network_list:\n graph['edges'] = monarch_edges(df)\n graph['nodes'] = monarch_nodes(df)\n\n return graph", "title": "" }, { "docid": "5ef0ae2998456eeaa619ea6d3bf1828d", "score": "0.62338704", "text": "def build_graph(self, *args):\n pass", "title": "" }, { "docid": "7198d0e218df86da6b8c69cb271187aa", "score": "0.6231484", "text": "def plot_graph(self) -> None:\n np.random.seed(42) # For stability\n w = h = 100\n nodes = [v for v in self.di_graph.nodes.values()]\n placed = []\n min_x, max_x = 0, 0\n for n in nodes:\n p_connected = [p for p in placed if p in n.out_edge or p in n.in_edge]\n if n.pos is None:\n if len(p_connected) < 1:\n n.pos = np.random.random(2) * np.array([w, h])\n else:\n n.pos = np.mean([p.pos for p in p_connected], 0)\n min_x = min(min_x, n.pos[0])\n max_x = max(max_x, n.pos[0])\n placed.append(n)\n\n nodes = {n.n_id: n for n in placed}\n a_pad = .0\n for n in nodes.values():\n for o in n.out_edge.keys():\n dx = nodes[o].pos[0] - n.pos[0]\n dy = nodes[o].pos[1] - n.pos[1]\n dist = np.sqrt(np.square(dx) + np.square(dy))\n plt.arrow(n.pos[0], n.pos[1],\n dx, dy,\n color='k',\n length_includes_head=True,\n head_width=dist * 0.05,\n head_length=dist * 0.05,\n width=0.00001 / w\n )\n plt.text(n.pos[0] + a_pad, n.pos[1], n.n_id, fontsize=12, color='limegreen')\n for n in nodes.values():\n plt.plot(n.pos[0], n.pos[1], 'or')\n\n plt.show()", "title": "" }, { "docid": "6e3acc4a0988b72c2d7a528e5fa68697", "score": "0.6223802", "text": "def create_graph(yuv, mu, sigma, load=False):\n if load:\n G = nx.read_adjlist(\"test.adjlist\")\n else:\n G = nx.Graph()\n for i in range(yuv.shape[0]):\n for j in range(yuv.shape[1]):\n G.add_node((i, j)) \n\n # background\n G.add_node((yuv.shape[0],0))\n # foreground\n G.add_node((yuv.shape[0]+1,0))\n\n connect_edges(G, yuv, mu, sigma)\n print(\"graph created\")\n return G", "title": "" }, { "docid": "80ccd17cfadf968b70e850be64810b00", "score": "0.6215747", "text": "def visualize_debug(self, raw=False):\n\n f = plt.figure(figsize = [12.9, 9.6])\n if raw:\n all_pos = nx.get_node_attributes(self.raw_network,'loc')\n labels = {k: k[0] for k in self.raw_network.nodes}\n nx.draw_networkx(self.raw_network,\n with_labels=True, labels=labels,\n font_size = 9,\n nodelist=self.servers_idx,\n node_size=100, node_shape='s',\n pos=all_pos,\n node_color='#eba134')\n nx.draw_networkx(self.raw_network,\n with_labels=True, labels=labels,\n font_size = 9,\n nodelist=self.stations_idx,\n node_size=100, node_shape='^',\n pos=all_pos,\n node_color='#e8eb34') \n edge_labels = {k: self.raw_network.edges[k]['weight'] for k in self.raw_network.edges}\n nx.draw_networkx_edge_labels(self.raw_network, \n pos=nx.get_node_attributes(self.raw_network,'loc'),\n edge_labels=edge_labels, font_size=6) \n else:\n all_pos = nx.get_node_attributes(self.network,'loc')\n labels = {k: k[0] for k in self.network.nodes}\n nx.draw_networkx(self.network,\n with_labels=True, labels=labels,\n font_size = 9,\n nodelist=self.servers_idx,\n node_size=100, node_shape='s',\n pos=all_pos,\n node_color='#eba134')\n nx.draw_networkx(self.network,\n with_labels=True, labels=labels,\n font_size = 9,\n nodelist=self.stations_idx,\n node_size=100, node_shape='^',\n pos=all_pos,\n node_color='#e8eb34')\n nx.draw_networkx(self.network,\n with_labels=True, labels=labels,\n font_size = 9,\n nodelist=self.users_idx,\n node_size=100, node_shape='o',\n pos=all_pos,\n node_color='#34ebdf')\n edge_labels = {k: self.network.edges[k]['weight'] for k in self.network.edges}\n nx.draw_networkx_edge_labels(self.network,\n pos=nx.get_node_attributes(self.network,'loc'),\n edge_labels=edge_labels, font_size=9)\n return f", "title": "" }, { "docid": "779584fc42c5eb5d15e03a737d530673", "score": "0.62118435", "text": "def create_graphs(self):\n # print(self.graphs_name_to_label)\n print('len(self.graphs_dict)', len(self.graphs_dict))\n ##############################\n # Append to graphs list\n ##############################\n gnum = 0\n for g_name in list(self.graphs_name_to_label.keys()):\n g_label = self.graphs_name_to_label[g_name]\n graph = self.graphs_dict[g_name]\n\n if not graph.edata:\n del self.graphs_name_to_label[g_name]\n del self.graphs_dict[g_name]\n\n else:\n n_nodes = graph.number_of_nodes()\n if n_nodes > self.max_n_nodes:\n self.max_n_nodes = n_nodes\n\n ######################\n # Normalize edge\n ######################\n # edge_src, edge_dst = graph.edges()\n\n # edge_dst = list(edge_dst.data.numpy())\n # print('graph.edata ('+g_label+')', graph.edata)\n # # edge_type = list(graph.edata[GNN_EDGE_TYPES_KEY])\n # edge_lbl = list(graph.edata[GNN_EDGE_LABELS_KEY])\n\n # # print('edge_dst, edge_type', edge_dst, edge_type)\n # # _, inverse_index, count = np.unique((edge_dst, edge_type), axis=1, return_inverse=True, return_counts=True)\n # _, inverse_index, count = np.unique((edge_dst, edge_lbl), axis=1, return_inverse=True, return_counts=True)\n # degrees = count[inverse_index]\n # edge_norm = np.ones(\n # len(edge_dst), dtype=np.float32) / degrees.astype(np.float32)\n # graph.edata[GNN_EDGE_NORM] = torch.FloatTensor(edge_norm)\n\n self.graphs.append(graph)\n self.graphs_names.append(g_name)\n self.graphs_labels.append(g_label)\n\n # Save this graph to png\n # if gnum < 10:\n if True:\n # print(graph)\n # nx.draw(graph.to_networkx(), with_labels=True)\n # plt.savefig('data/graphs/{}.png'.format(g_name))\n # print(self.graphs_viz[g_name].source)\n self.graphs_viz[g_name].render(\n filename='data/graphviz/{}'.format(g_name))\n gnum += 1\n\n # print(self.graphs)\n\n if self.mapping_path is not None:\n with open(self.mapping_path) as json_file:\n mapping = json.load(json_file)\n with open(self.pickle_folder+'/../mapping.json', 'w') as f:\n json.dump(mapping, f)\n else:\n label_set = set(sorted(self.graphs_labels)) # malware: 0, benign: 1\n num_labels = len(label_set)\n mapping = dict(zip(label_set, list(range(num_labels))))\n with open(self.pickle_folder+'/../mapping.json', 'w') as f:\n json.dump(mapping, f)\n \n # mapping = self.mapping_labels\n num_labels = len(mapping)\n print('num_labels', num_labels)\n print('mapping', mapping)\n labels = [mapping[label] for label in self.graphs_labels]\n # print('labels', labels)\n # print('label_set', label_set)\n\n num_entities = len(set(self.nodes_labels))\n num_rels = len(set(self.edges_labels))\n\n labels_torch = torch.LongTensor(labels)\n print('labels_torch', labels_torch)\n\n torch.save(labels_torch, os.path.join(self.pickle_folder, LABELS))\n save_pickle(num_labels, os.path.join(self.pickle_folder, N_CLASSES))\n save_pickle(num_entities, os.path.join(self.pickle_folder, N_ENTITIES))\n save_pickle(num_rels, os.path.join(self.pickle_folder, N_RELS))\n save_pickle(self.graphs, os.path.join(self.pickle_folder, GRAPH))\n save_pickle(self.graphs_names, os.path.join(self.pickle_folder, GNAMES))\n save_pickle(self.max_n_nodes, os.path.join(\n self.pickle_folder, MAX_N_NODES))\n save_pickle(self.graphs_labels, os.path.join(\n self.pickle_folder, LABELS_TXT))\n save_txt(self.graphs_names, os.path.join(self.pickle_folder, GNAMES+'.txt'))\n\n self.data_dortmund_format = {\n GRAPH: self.graphs,\n GNAMES: self.graphs_names,\n N_CLASSES: num_labels,\n N_ENTITIES: num_entities,\n N_RELS: num_rels,\n LABELS: labels_torch,\n MAX_N_NODES: self.max_n_nodes,\n LABELS_TXT: self.graphs_labels\n }", "title": "" }, { "docid": "f4c8e1fa5559b7533f72af5887c7ca94", "score": "0.6208124", "text": "def make_as_graph(self, **kwargs):\n G = self.graph\n positions = nx.drawing.nx_agraph.pygraphviz_layout(G)\n for p1, p2 in self.tie_lines:\n pt1 = Point(positions[p1])\n pt2 = Point(positions[p2])\n line = Line([pt1, pt2], color='grey')\n self.renderer.add(line)\n\n points = []\n for p in self.stable:\n label = '%s:<br>' % p.name\n for other in G[p].keys():\n label += ' -%s<br>' % other.name\n pt = Point(positions[p], label=label)\n points.append(pt)\n if p.show_label:\n self.renderer.add(Text(pt, p.name))\n pc = PointCollection(points, color='green')\n self.renderer.add(pc)\n\n self.renderer.options['grid']['hoverable'] = True\n self.renderer.options['grid']['borderWidth'] = 0\n self.renderer.options['grid']['show'] = False\n self.renderer.options['tooltip'] = True", "title": "" }, { "docid": "045932041f6596fe40a5e05601affb7a", "score": "0.6207639", "text": "def plot_graph(self):\n pos = nx.spring_layout(self.graph)\n nx.draw(self.graph, pos, with_labels=True)\n plt.show()", "title": "" }, { "docid": "01626f64092c21eb48b9c7d810d2d1f8", "score": "0.6206532", "text": "def nx_graph(self) -> Type[nx.Graph]:\n g_nx = nx.Graph()\n d = False\n if np.all(self.coordinates[0, 2] == self.coordinates[:, 2]):\n d = True\n for v in self.vertices:\n g_nx.add_node(\n v.id, pos=tuple(v.pos[:2] if d else v.pos), atom=v.atom)\n for nn in v.neighbors:\n g_nx.add_node(\n nn.id, pos=tuple(nn.pos[:2] if d else nn.pos), atom=nn.atom)\n for nn in v.neighbors:\n g_nx.add_edge(v.id, nn.id)\n return g_nx", "title": "" }, { "docid": "ffa3beee75d51e589a2d0a7ab2dfb52c", "score": "0.6206504", "text": "def draw_graph(fg, name):\n pos = dict()\n pos.setdefault(1, [1, 3])\n pos.setdefault(2, [5, 5])\n pos.setdefault(3, [9, 5])\n pos.setdefault(4, [13, 5])\n pos.setdefault(5, [18, 5])\n pos.setdefault(6, [20, 3])\n pos.setdefault(7, [16, 3])\n pos.setdefault(8, [18, 1])\n pos.setdefault(9, [13, 1])\n pos.setdefault(10, [9, 1])\n pos.setdefault(11, [5, 1])\n pos.setdefault(12, [5, 3])\n pos.setdefault(13, [9, 3])\n pos.setdefault(14, [13, 3])\n nx.draw_networkx_nodes(fg, pos, node_size=300)\n nx.draw_networkx_edges(fg, pos)\n nx.draw_networkx_labels(fg, pos)\n nx.draw_networkx_edge_labels(\n fg, pos, edge_labels=nx.get_edge_attributes(fg, 'weight'), label_pos=0.3)\n plt.savefig(\"./pngs/graph_\" + name + \".png\")\n plt.show()", "title": "" }, { "docid": "f5f022421b3e17844463aefe7dd90c22", "score": "0.6204666", "text": "def graph(self):\n return build_nx_graph(self)", "title": "" }, { "docid": "297da218015362f563ea3f3c5909209a", "score": "0.61987", "text": "def get_huang_graph():\n a = BbnNode(Variable(0, 'a', ['on', 'off']), [0.5, 0.5])\n b = BbnNode(Variable(1, 'b', ['on', 'off']), [0.5, 0.5, 0.4, 0.6])\n c = BbnNode(Variable(2, 'c', ['on', 'off']), [0.7, 0.3, 0.2, 0.8])\n d = BbnNode(Variable(3, 'd', ['on', 'off']), [0.9, 0.1, 0.5, 0.5])\n e = BbnNode(Variable(4, 'e', ['on', 'off']), [0.3, 0.7, 0.6, 0.4])\n f = BbnNode(Variable(5, 'f', ['on', 'off']), [0.01, 0.99, 0.01, 0.99, 0.01, 0.99, 0.99, 0.01])\n g = BbnNode(Variable(6, 'g', ['on', 'off']), [0.8, 0.2, 0.1, 0.9])\n h = BbnNode(Variable(7, 'h', ['on', 'off']), [0.05, 0.95, 0.95, 0.05, 0.95, 0.05, 0.95, 0.05])\n\n bbn = Bbn() \\\n .add_node(a) \\\n .add_node(b) \\\n .add_node(c) \\\n .add_node(d) \\\n .add_node(e) \\\n .add_node(f) \\\n .add_node(g) \\\n .add_node(h) \\\n .add_edge(Edge(a, b, EdgeType.DIRECTED)) \\\n .add_edge(Edge(a, c, EdgeType.DIRECTED)) \\\n .add_edge(Edge(b, d, EdgeType.DIRECTED)) \\\n .add_edge(Edge(c, e, EdgeType.DIRECTED)) \\\n .add_edge(Edge(d, f, EdgeType.DIRECTED)) \\\n .add_edge(Edge(e, f, EdgeType.DIRECTED)) \\\n .add_edge(Edge(c, g, EdgeType.DIRECTED)) \\\n .add_edge(Edge(e, h, EdgeType.DIRECTED)) \\\n .add_edge(Edge(g, h, EdgeType.DIRECTED))\n\n return bbn", "title": "" }, { "docid": "12664bd209137a8d81064dc360f9fa91", "score": "0.6193623", "text": "def convert_proto_to_nx_graph(proto_graph, nx_graph, nx_file, overwrite):\n for collection in proto_graph.graph.collections:\n nx_graph.generate_collection(collection.collection_id, collection.name)\n logging.info(f\"Added all collection nodes from proto to nx.\")\n\n for dataset_collection in proto_graph.graph.dataset_collections:\n nx_graph.generate_dataset_collection(dataset_collection.dataset_collection_id,\n dataset_collection.collection_id,\n dataset_collection.name)\n logging.info(f\"Added all dataset collection nodes from proto to nx.\")\n\n for system_collection in proto_graph.graph.system_collections:\n nx_graph.generate_system_collection(system_collection.system_collection_id,\n system_collection.collection_id,\n system_collection.name)\n logging.info(f\"Added all system collection nodes from proto to nx.\")\n\n for dataset in proto_graph.graph.datasets:\n dataset_env = proto_graph.env_enum_to_string(dataset.env)\n nx_graph.generate_dataset(dataset.dataset_id, dataset.dataset_collection_id, dataset.regex_grouping,\n dataset.name, dataset.slo, dataset_env, dataset.description)\n logging.info(f\"Added all dataset nodes from proto to nx.\")\n\n for system in proto_graph.graph.systems:\n system_env = proto_graph.env_enum_to_string(system.env)\n criticality = proto_graph.criticality_enum_to_string(system.system_critic)\n nx_graph.generate_system(system.system_id, criticality, system.system_collection_id, system.regex_grouping,\n system.name, system_env, system.description)\n logging.info(f\"Added all system nodes from proto to nx.\")\n\n for processing in proto_graph.graph.processings:\n impact = proto_graph.processing_impact_enum_to_string(processing.impact)\n freshness = proto_graph.processing_freshness_enum_to_string(processing.freshness)\n nx_graph.generate_processing(processing.system_id, processing.dataset_id, processing.processing_id,\n impact, freshness, inputs=processing.inputs)\n logging.info(f\"Added all processing nodes from proto to nx.\")\n\n for data_integrity in proto_graph.graph.data_integrities:\n nx_graph.generate_data_integrity(data_integrity.data_integrity_id, data_integrity.dataset_collection_id,\n data_integrity.data_integrity_rec_time, data_integrity.data_integrity_volat,\n data_integrity.data_integrity_reg_time, data_integrity.data_integrity_rest_time)\n logging.info(f\"Added all data integrity nodes from proto to nx.\")\n\n # Save graph to file.\n start = time.time()\n nx_graph.save_to_file(nx_file, overwrite=overwrite)\n logging.info(f\"Finished generation and saved nx graph to file in {round(time.time() - start, 1)} seconds.\")", "title": "" }, { "docid": "a32c750657f5db1f2e23a94658e52be4", "score": "0.61916643", "text": "def draw(args):\n G = nx.read_graphml(args.graphml[0])\n draw_social_network(G, args.write)\n return \"\"", "title": "" }, { "docid": "2b07ea4ee6ddb2f4ccaa0892e385e084", "score": "0.61808676", "text": "def create_transition_graph(self):\n import networkx as nx\n G = nx.DiGraph()\n\n# state_namer = Namer('$s_%d$')\n# obs_namer = Namer('$y_%d$')\n# obsset_namer = Namer('$Y_%d$')\n#\n state_namer = Namer('s%d')\n obs_namer = Namer('y%d')\n obsset_namer = Namer('Y%d')\n cmd_namer = Namer('u%d')\n# cmd_namer = lambda x: x\n # each state is a node\n for state in self.get_all_states():\n s = state_namer(state)\n G.add_node(s)\n# G.node[s]['node_size'] = 20\n# G.node[s]['node_color'] = [0, 0, 1]\n\n T = self.transitions # (state, obs) -> state\n\n from collections import defaultdict\n edges = defaultdict(lambda: list()) # (s1,s2) -> obs1, obs2, ...\n\n for (state1, obs), state2 in T.items():\n edges[(state1, state2)].append(obs)\n\n for (state1, state2), obs_list in edges.items():\n s1 = state_namer(state1)\n s2 = state_namer(state2)\n\n obsset = tuple(sorted(map(obs_namer, obs_list)))\n obsset_name = obsset_namer(obsset)\n label = obsset_name\n print ('%s -> %s with %s' % (s1, s2, label))\n G.add_edge(s1, s2)\n# G.edge[s1][s2]['edge_color'] = [0, 0, 0]\n G.edge[s1][s2]['label'] = label\n\n\n \n # now for the commands\n #\n #\n C = self.commands # (state, obs) -> command\n \n \n # (state, cmd) => obs1, ..., obs2\n # Which observations make the agent choose cmd in state?\n statecmd2obs = defaultdict(lambda: list())\n \n # print('iterating self.commands')\n for (state, obs), cmd in C.items():\n statecmd2obs[(state,cmd)].append(obs)\n # print('%s, %s -> %s' % (state, obs, cmd))\n\n # A compact representation of the policy\n # as a map from (state, set of observations) to commands.\n\n # policy: (state, obsset) -> cmd\n policy = {}\n # print('iterating statecmd2obs')\n for (state, cmd), obsset in statecmd2obs.items():\n # print(' %s, %s => %s' % (state, cmd, obsset))\n obsset = tuple(sorted(map(obs_namer, obsset)))\n obsset_name = obsset_namer(obsset)\n \n state_name = state_namer(state)\n cmd_name = cmd_namer(cmd)\n\n # print(' or %s, %s => %s' % (state_name, cmd_name, obsset_name))\n # print(' |because %s = %s' % (obsset_name, obsset))\n key = (state_name, obsset_name)\n # print('key', key)\n assert not key in policy\n warnings.warn('writing out commands instead of naming them')\n policy[key] = cmd_name\n policy[key] = cmd\n\n # print 'all commands', set(policy.values()), set(C.values())\n\n assert len(set(policy.values())) == len(set(C.values()))\n\n print policy\n\n for n, obsset in obsset_namer.get_name2ob().items():\n print('%4s is %3d obs: %s' % (n, len(obsset), obsset))\n\n for n, cmd in cmd_namer.get_name2ob().items():\n print('%4s is %s' % (n, cmd))\n\n return dict(G=G, \n name2state=state_namer.get_name2ob(),\n name2obs = obs_namer.get_name2ob(),\n name2cmd=cmd_namer.get_name2ob(),\n name2obsset=obsset_namer.get_name2ob(),\n policy=policy)", "title": "" }, { "docid": "6678efdb22aa269170cf9f6c18e2c8ac", "score": "0.61762047", "text": "def generate_network_graph(self):\n self.G = nx.Graph()\n self.ip_pkts = self.getByLayer('IP')\n for pkt in self.ip_pkts:\n self.G.add_edge(pkt.src, pkt.dst)\n return self.G", "title": "" }, { "docid": "22303273c4abfc9117651ab2f1046227", "score": "0.6174752", "text": "def generate_graph(nodes, edges):\n assert isinstance(nodes, list)\n assert isinstance(edges, list)\n G = nx.DiGraph()\n #G.add_nodes_from(nodes)\n G.add_weighted_edges_from(edges)\n return G", "title": "" }, { "docid": "7c576c5a0c48f7ffbba1595bd350c377", "score": "0.61740005", "text": "def gexf_graph():\n\n gexf = Gexf(\"Yuanhao Wang\",\"Lego Sets Visualization Graph\")\n graph=gexf.addGraph(\"undirected\",\"static\",\"bricks graph\")\n attr_id = graph.addNodeAttribute(\"Type\", None, \"string\", force_id=\"node_type\")\n\n with open('data.txt', 'a+') as outfile:\n\n\n top_lego_sets = lego_sets()\n\n last_run_time = float('-inf')\n\n for lego_set in top_lego_sets:\n\n # Check to see if we need to wait\n cur_run_start_time = time.time()\n if cur_run_start_time - last_run_time < 1:\n time.sleep(1) # Sleep for 1 second\n \n last_run_time = cur_run_start_time\n \n # Add set as a node\n if not graph.nodeExists(lego_set['set_num']):\n node = graph.addNode(lego_set['set_num'], lego_set['set_name'], r='0', g='0', b='0') #(id, label, r, g, b)\n node.addAttribute(\"node_type\", \"set\")\n print(\"Added set: {}\".format(lego_set['set_num']))\n \n # Get all parts for set\n connection = http.client.HTTPSConnection(\"rebrickable.com\")\n headers = {\n \"Accept\": \"application/json\"\n }\n\n connection.request(\"GET\", \"/api/v3/lego/sets/{}/parts/?page_size=1000&key={}\".format(lego_set['set_num'], API_KEY), headers=headers)\n response = connection.getresponse()\n \n decoder = json.JSONDecoder()\n response = decoder.decode(response.read().decode(\"windows-1252\"))\n connection.close()\n\n # Get top 20 parts by quantity\n all_set_parts = []\n for part in response['results']: \n all_set_parts.append({'quantity': int(part['quantity']), 'color': part['color']['rgb'], 'name': part['part']['name'], 'number': part['part']['part_num'], 'part_id': '{}_{}'.format(part['part']['part_num'], part['color']['rgb'])})\n \n all_set_parts.sort(key=lambda x: x['quantity'], reverse=True)\n all_set_parts = all_set_parts[:20]\n\n json.dump(all_set_parts, outfile)\n \n # Add each part on to the graph\n for part in all_set_parts:\n part_rgb = tuple(int(part['color'][i:i+2], 16) for i in (0, 2, 4))\n if not graph.nodeExists(part['part_id']):\n node = graph.addNode(part['part_id'], part['name'], r=str(part_rgb[0]), g=str(part_rgb[1]), b=str(part_rgb[2]))\n node.addAttribute(\"node_type\", \"part\")\n # Add edge from part to set \n graph.addEdge('{}_{}'.format(part['part_id'], lego_set['set_num']) ,lego_set['set_num'], part['part_id'], part['quantity']) #(id, source, target, weight=\"\")\n print(\"Added part: {} of set: {}\".format(part['part_id'], lego_set['set_num']))\n\n output_file=open(\"2_test.gexf\",\"wb\")\n gexf.write(output_file)", "title": "" }, { "docid": "f36e8846733d50d9f7671593b7262f76", "score": "0.6171854", "text": "def network(self):\n\n nodes = {}\n\n for node_gene in self.nodes:\n nodes[node_gene.id] = Node(node_gene)\n\n links = []\n\n for link_gene in self.links:\n\n if link_gene.enabled:\n from_node = nodes[link_gene.from_node.id]\n to_node = nodes[link_gene.to_node.id]\n\n # Create link.\n link = Link(from_node, to_node, link_gene.weight, link_gene.id)\n links.append(link)\n\n # Add link to nodes.\n from_node.out_links.append(link)\n to_node.in_links.append(link)\n\n nodes = list(nodes.values()) # Already sorted correctly [bias, inputs, outputs, hidden]\n network = Network(nodes, links, self.config.num_inputs, self.config.num_outputs)\n return network", "title": "" }, { "docid": "d897c1e108c7877430aeb7d932aea7fc", "score": "0.616874", "text": "def ArbitraryGraph(self):\r\n\t\tself.G = nx.dense_gnm_random_graph(self.noNodes, self.noEdges)\r\n\t\t#self.G = {0: [15], 1: [8, 2, 3, 10], 2: [1, 10, 5, 6], 3: [1, 15], 4: [5], 5: [2, 4, 6, 15], 6: [9, 2, 12, 5], 7: [8, 11, 13], 8: [1, 14, 7], 9: [10, 6, 15], 10: [1, 2, 12, 13, 9], 11: [7], 12: [10, 13, 6], 13: [10, 15, 12, 7], 14: [8, 15], 15: [0, 3, 5, 9, 13, 14]}\r\n\r\n\t\tif type(self.G) is not dict:\r\n\t\t\tself.G = nx.to_dict_of_lists(self.G)\r\n\t\t\t\t\r\n\t\tfor i in range(0, self.noNodes):\r\n\t\t\tself.vertexList.append(i)\r\n\t\tfor key, value in self.G.iteritems():\r\n\t\t\tfor v in value:\r\n\t\t\t\tif key<v:\r\n\t\t\t\t\te = []\r\n\t\t\t\t\te.append(key)\r\n\t\t\t\t\te.append(v)\r\n\t\t\t\t\tself.GEdgeList.append(e)\r\n\t\t\r\n\t\tself.G = nx.Graph(self.G)\r\n\t\tconnComp = sorted(nx.connected_components(self.G))\r\n\t\tself.G = nx.to_dict_of_lists(self.G)\r\n\t\t\r\n\t\tconnComp = list(connComp)\r\n\t\tnoOFConnComp = len(connComp)\r\n\t\tif noOFConnComp > 1:\r\n\t\t\tprint (\"Here we are\")\r\n\t\t\tprint (connComp)\r\n\t\t\tself.G = nx.Graph(self.G)\r\n\t\t\tself.plotArbitraryGraph(self.G)\r\n\t\t\tj = 0\r\n\t\t\twhile j < noOFConnComp - 1:\r\n\t\t\t\tu = random.choice(list(connComp[j%noOFConnComp]))\r\n\t\t\t\tv = random.choice(list(connComp[(j+1)%noOFConnComp]))\r\n\t\t\t\tself.addAnEdge(self.G, self.GEdgeList, u, v)\r\n\t\t\t\tj = j + 1\r\n\t\tprint (str(self.G))\r\n\t\tself.G = nx.Graph(self.G)\r\n\t\tself.plotArbitraryGraph(self.G)\r\n\t\t#print \"see\"\r\n\t\tself.G = nx.to_dict_of_lists(self.G)", "title": "" }, { "docid": "1de1df781cb27533638330ace8850684", "score": "0.6164655", "text": "def show_map(self,G=None,IPV='192.168.110.26',cmap=plt.cm.Greens):\n if not G:\n if not self.G:\n self.G=self.get_graph(IPV)\n else:\n G=self.G\n else:\n pass \n #print [\"blue\"]+list(nodecolor_from_delay(delays)),ls_nombres\n plt.figure(figsize=(20,15)) \n return G,nx.draw_graphviz(G,with_labels=True,node_color=[G.node[nod]['atr'] for nod in G.nodes_iter()],cmap=cmap,node_size=1500)", "title": "" }, { "docid": "1f36865bc03575e09029bf59fe4462a9", "score": "0.6157239", "text": "def build_nx_graph(list_of_nodes, list_of_edges):\n \n g = nx.DiGraph()\n \n for src, label in list_of_nodes:\n g.add_node(src, label=label)\n \n for src, dst, label in list_of_edges:\n g.add_edge(src, dst, label=label)\n \n return g", "title": "" }, { "docid": "398843d09766e9d2f099d4a870ccd41b", "score": "0.6152154", "text": "def draw_net(network, in_names, out_names, view=False, filename=None):\n names = dict(enumerate(in_names+out_names))\n\n # Attributes for network nodes.\n node_attrs = {\n 'shape': 'circle',\n 'fontsize': '12',\n 'height': '0.2',\n 'width': '0.2'}\n\n # Attributes for network input nodes.\n input_attrs = {\n 'style': 'filled',\n 'shape': 'circle'}\n\n # Attributes for network output nodes.\n output_attrs = {\n 'style': 'filled',\n 'color': 'lightblue'}\n\n dot = graphviz.Digraph(format='svg', node_attr=node_attrs)\n # dot.graph_attr['rankdir'] = 'LR'\n dot.graph_attr['ranksep'] = '1.5'\n\n print(names)\n\n for nid, neuron in enumerate(network.neurons):\n name = names.get(nid, str(nid))\n if neuron.type == NeuronType.INPUT or neuron.type == NeuronType.BIAS:\n dot.node(name, _attributes=input_attrs)\n else:\n dot.node(name, _attributes=output_attrs)\n # elif neuron.type == NeuronType.OUTPUT:\n # dot.node(out_names[nid - len(in_names)], _attributes=output_attrs)\n\n for connection in network.connections:\n a = names.get(connection.source_neuron_idx, str(connection.source_neuron_idx))\n b = names.get(connection.target_neuron_idx, str(connection.target_neuron_idx))\n # b = out_names[connection.target_neuron_idx - len(in_names)]\n style = 'solid'\n color = 'darkgreen' if connection.weight > 0 else 'red'\n width = str(0.2 + abs(connection.weight)/2)\n dot.edge(a, b, _attributes={'style': style, 'color': color, 'penwidth': width})\n\n return dot.render(filename, view=view)", "title": "" }, { "docid": "2d291e9d943156e587079a01a67e7c24", "score": "0.61510545", "text": "def build_graph(self):\r\n self.add_placeholders()\r\n self.inference()\r\n self.add_loss()\r\n # self.add_accuracy()\r\n self.train()", "title": "" }, { "docid": "954700300464e8d03b147c25c13d5ca5", "score": "0.6149638", "text": "def render_graph(self, solution=None):\n if solution:\n if not self.check_solution(solution):\n print(\"Graph coloring not satisfied!\")\n else:\n print(\"Graph coloring satisfied!\")\n solution = self.solution_from_bits(solution) \n if len(solution) != self.nnodes:\n return\n\n options = { \n 'nodelist': list(range(self.nnodes)),\n 'node_size': 2000, \n 'width': 1,\n 'alpha': 0.7,\n 'node_color': solution\n }\n G = nx.Graph()\n G.add_edges_from(self.edges) \n if self.pos is None:\n self.pos = nx.spring_layout(G)\n nx.draw(G, self.pos, with_labels=True, **options)\n plt.show()", "title": "" }, { "docid": "06a14dc3bb1f0afb42d37f43fcba897e", "score": "0.6148344", "text": "def displayEigenRankGGb(self, nodepos=None):\r\n \r\n \r\n self.GGB = nx.DiGraph()\r\n for i in range(self.gbgain.n):\r\n for j in range(self.gbgain.n):\r\n if (self.gbgain.gMatrix[i, j] != 0):\r\n self.GGB.add_edge(self.gbgain.gVariables[j], self.gbgain.gVariables[i])\r\n \r\n \r\n plt.figure(\"Node Rankings: Google Gain Backward: Scaled\")\r\n rearrange = self.GGB.nodes()\r\n \r\n for node in self.GGB.nodes():\r\n self.GGB.add_node(node, importance=self.gbgain.rankDict[node])\r\n \r\n nodelabels = dict((n, [n, round(self.gbgain.rankDict[n], 3)]) for n in self.GGB.nodes())\r\n sizeArray = [self.gbgain.rankDict[var] * 10000 for var in rearrange]\r\n \r\n if nodepos == None:\r\n nodepos = nx.circular_layout(self.GGB) \r\n \r\n nx.draw_networkx(self.GGB, pos=nodepos , labels=nodelabels, node_size=sizeArray, node_color='y')\r\n nx.draw_networkx_edges(self.GGB, pos=nodepos)\r\n plt.axis(\"off\")", "title": "" }, { "docid": "c5aa35e6a0c33f2e0276e7ca63b526d7", "score": "0.61463004", "text": "def build_dot_file(self, showintro=True, addedgelabels=True,\n showedgelabels=True, edgeid=True, edgeocc=False,\n edgeuse=True, statstype=\"rel\", weightedges=False,\n edgewidthscale=1):\n\n # Write info about graph.\n dot_str = 'digraph G{\\n'\n dot_str += ' producedby=\"{}\" ;\\n'.format(self.producedby)\n dot_str += ' precedenceonly=\"{}\" ;\\n'.format(self.precedenceonly)\n dot_str += ' hypergraph=\"{}\" ;\\n'.format(self.hypergraph)\n #dot_str += ' nodestype=\"{}\" ;\\n'.format(self.nodestype)\n if self.eoi != None:\n dot_str += ' eoi=\"{}\" ;\\n'.format(self.eoi)\n if self.occurrence != None:\n dot_str += ' label=\"Occurrence = {}\" '.format(self.occurrence)\n dot_str += 'fontsize=28 labelloc=\"t\" ;\\n'\n if self.maxrank != None:\n dot_str += ' maxrank=\"{}\" ;\\n'.format(self.maxrank)\n if self.minrank != None:\n dot_str += ' minrank=\"{}\" ;\\n'.format(self.minrank)\n if self.prevcores != None:\n dot_str += ' prevcores=\"{}\" ;\\n'.format(self.prevcores)\n #dot_str += ' ranksep=1 ;\\n'\n #dot_str += ' nodesep=0.2 ;\\n' # Default nodesep is 0.25\n dot_str += ' splines=true ;\\n'\n dot_str += ' outputorder=nodesfirst ;\\n'\n dot_str += ' node [pin=true] ;\\n'\n #dot_str += ' edge [fontsize=18] ;\\n'\n # Compute some statistics to assign edge and intermediary node width.\n minpenwidth = 1 * edgewidthscale\n medpenwidth = 3 * edgewidthscale\n maxpenwidth = 6.5 * edgewidthscale\n all_weights = []\n all_numbers = []\n for hyperedge in self.hyperedges:\n if hyperedge.underlying == False:\n all_weights.append(hyperedge.weight)\n all_numbers.append(hyperedge.number)\n #for coverhyper in self.coverhypers:\n # all_uses.append(covermesh.uses)\n average_weight = statistics.mean(all_weights)\n average_number = statistics.mean(all_numbers)\n # Build drawing parameters dict.\n params = {\"average_weight\": average_weight,\n \"average_number\": average_number,\n \"minpenwidth\": minpenwidth,\n \"medpenwidth\": medpenwidth,\n \"maxpenwidth\": maxpenwidth,\n \"addedgelabels\": addedgelabels,\n \"showedgelabels\": showedgelabels,\n \"edgeid\": edgeid,\n \"edgeocc\": edgeocc,\n \"edgeuse\": edgeuse,\n \"statstype\": statstype,\n \"weightedges\": weightedges,\n \"edgewidthscale\": edgewidthscale}\n # Draw nodes.\n midranks = self.midranks\n for int_rank in range(int((self.minrank)*(midranks+1)),\n int((self.maxrank+1)*(midranks+1))):\n current_rank = int_rank/(midranks+1)\n rank_str = \"{}\".format(current_rank)\n if showintro == False and current_rank < 1:\n dot_str += \"//\"\n if current_rank%1 == 0 and current_rank <= self.maxrank:\n rank_str = str(int(current_rank))\n dot_str += ('{{ rank = same ; \"{}\" ['\n 'shape=plaintext'.format(rank_str))\n if self.rankposdict != None:\n if rank_str in self.rankposdict.keys():\n rankpos = self.rankposdict[rank_str]\n dot_str += ', pos={}'.format(rankpos)\n dot_str +='];\\n'\n else:\n rank_str = \"{:.2f}\".format(current_rank)\n dot_str += ('{{ rank = same ; \"{}\" [label=\"\", '\n 'shape=plaintext'.format(rank_str))\n if self.rankposdict != None:\n if rank_str in self.rankposdict.keys():\n rankpos = self.rankposdict[rank_str]\n dot_str += ', pos={}'.format(rankpos)\n dot_str += '];\\n'\n for node in self.eventnodes:\n if node.rank == current_rank and node.shrink == False:\n #node_shape = 'invhouse'\n node_shape = 'rectangle'\n node_color = 'lightblue'\n if node.intro == True:\n node_shape = 'ellipse'\n node_color = 'white'\n if node.label == self.eoi:\n node_shape = 'ellipse'\n node_color = 'indianred2'\n if showintro == False and node.intro == True:\n dot_str += '//'\n node_lines = textwrap.wrap(node.label, 20,\n break_long_words=False)\n dot_str += '{} '.format(node.nodeid)\n node_str = \"\"\n for i in range(len(node_lines)):\n if i == 0:\n node_str += \"{}\".format(node_lines[i])\n else:\n node_str += \"<br/>{}\".format(node_lines[i])\n # Add PDH information if not already present.\n prefix_num = \"\"\n if node.pdh != False and \":\" not in node_str:\n prefix_num = \" : {}\".format(node.pdh)\n dot_str += ('[label=<{}{}>'\n .format(node_str, prefix_num))\n dot_str += ', shape={}, style=\"filled'.format(node_shape)\n if node.pdh == False:\n dot_str += '\"'\n else:\n dot_str += ',dashed\"'\n #if node.highlighted == True:\n # dot_str += ', fillcolor=gold, penwidth=2'\n #else:\n dot_str += ', fillcolor={}'.format(node_color)\n if node.highlighted == True:\n dot_str += ', penwidth=4'\n if node.intro == True:\n dot_str += ', intro={}'.format(node.intro)\n if node.first == True:\n dot_str += ', first={}'.format(node.first)\n if node.pos != None:\n dot_str += ', pos={}'.format(node.pos)\n #dot_str += ', penwidth=2'\n dot_str += \"] ;\\n\"\n for node in self.statenodes:\n if node.rank == current_rank:\n node_shape = 'ellipse'\n node_color = 'skyblue2'\n node_lines = textwrap.wrap(node.label, 20,\n break_long_words=False)\n node_str = \"\"\n for i in range(len(node_lines)):\n if i == 0:\n node_str += \"{}\".format(node_lines[i])\n else:\n node_str += \"<br/>{}\".format(node_lines[i])\n prefix_num = \"\"\n if node.pdh != False and \":\" not in node_str:\n prefix_num = \" : {}\".format(node.pdh)\n dot_str += ('{} [label=<{}{}>'\n .format(node.nodeid, node_str, prefix_num))\n dot_str += ', shape={}, style=\"filled'.format(node_shape)\n if node.pdh == False:\n dot_str += '\"'\n else:\n dot_str += ',dashed\"'\n #if node.highlighted == True:\n # dot_str += ', fillcolor=gold, penwidth=2'\n #else:\n dot_str += ', fillcolor={}'.format(node_color)\n if node.highlighted == True:\n dot_str += ', penwidth=4'\n if node.intro == True:\n dot_str += ', intro={}'.format(node.intro)\n if node.first == True:\n dot_str += ', first={}'.format(node.first)\n if node.pos != None:\n dot_str += ', pos={}'.format(node.pos)\n if node.stdedit != None:\n dot_str += ', stded=\"{}\"'.format(node.stdedit)\n dot_str += ', ev={}'.format(node.eventid)\n #dot_str += ', penwidth=2'\n dot_str += \"] ;\\n\"\n ## Draw intermediary nodes that emulate hyperedges if two\n ## sources or more are drawn.\n #for hyperedge in self.hyperedges:\n # for midnode in mesh.midnodes:\n # if midnode.rank == current_rank:\n # # Include the midnode no matter what, but comment it\n # # if showintro is False and edge is underlying. \n # if showintro == False:\n # if mesh.underlying == True:\n # dot_str += '//'\n # dot_str += self.write_midnode(mesh, midnode,\n # average_use, minpenwidth, medpenwidth, maxpenwidth)\n # dot_str += '] ;\\n'\n # Intermediary nodes from cover edges, same as above but only\n # if showintro is False.\n #if showintro == False:\n # for covermesh in self.covermeshes:\n # for midnode in covermesh.midnodes:\n # if midnode.rank == current_rank:\n # dot_str += self.write_midnode(covermesh, midnode,\n # average_use, minpenwidth, medpenwidth, maxpenwidth)\n # dot_str += ', cover=\"True\"] ;\\n'\n # Close rank braces.\n if showintro == False and current_rank < 1:\n dot_str += \"//\"\n dot_str += \"}\\n\"\n # Draw unranked event nodes and shrank nodes.\n for node in self.eventnodes:\n if node.rank == None or node.shrink == True:\n node_lines = textwrap.wrap(node.label, 20,\n break_long_words=False)\n node_str = \"\"\n for i in range(len(node_lines)):\n if i == 0:\n node_str += \"{}\".format(node_lines[i])\n else:\n node_str += \"<br/>{}\".format(node_lines[i])\n if node.shrink == False:\n node_shape = 'ellipse'\n node_color = 'white'\n if showintro == False and node.intro == True:\n dot_str += '//'\n dot_str += '{} '.format(node.nodeid)\n dot_str += '[label=<{}>'.format(node_str)\n dot_str += ', shape={}, style=filled'.format(node_shape)\n if node.highlighted == True:\n dot_str += ', fillcolor=gold, penwidth=2'\n else:\n dot_str += ', fillcolor={}'.format(node_color)\n if node.intro == True:\n dot_str += ', intro={}'.format(node.intro)\n if node.first == True:\n dot_str += ', first={}'.format(node.first)\n if node.pos != None:\n dot_str += ', pos={}'.format(node.pos)\n dot_str += \"] ;\\n\"\n elif node.shrink == True:\n dot_str += '{} '.format(node.nodeid)\n dot_str += '[label=\"\", hlabel=<{}>'.format(node_str)\n dot_str += ', shape=circle'\n dot_str += ', style=filled'\n dot_str += ', fillcolor=white'\n dot_str += ', width=0.1'\n dot_str += ', height=0.1'\n dot_str += ', penwidth=2'\n dot_str += \"] ;\\n\"\n ## Draw unranked midnodes.\n #for mesh in self.meshes:\n # for midnode in mesh.midnodes:\n # if midnode.rank == None:\n # # Include the midnode no matter what, but comment it\n # # if showintro is False and edge is underlying. \n # if showintro == False and mesh.underlying == True:\n # dot_str += '//'\n # dot_str += self.write_midnode(mesh, midnode, average_use,\n # minpenwidth, medpenwidth, maxpenwidth)\n # dot_str += '] ;\\n'\n #if showintro == False:\n # for covermesh in self.covermeshes:\n # for midnode in covermesh.midnodes:\n # if midnode.rank == None:\n # dot_str += self.write_midnode(covermesh, midnode,\n # average_use, minpenwidth, medpenwidth, maxpenwidth)\n # dot_str += ', cover=\"True\"] ;\\n'\n # Draw invisible ranking edges.\n for int_rank in range(int((self.minrank)*(midranks+1)),\n int(self.maxrank*(midranks+1))):\n rank = int_rank/(midranks+1)\n if showintro == False and rank < 1:\n dot_str += '//'\n next_rank = rank+(1.0/(midranks+1))\n if rank%1 == 0:\n rank_str = '{}'.format(int(rank))\n else:\n rank_str = '{:.2f}'.format(rank)\n if next_rank%1 == 0:\n next_str = '{}'.format(int(next_rank))\n else:\n next_str = '{:.2f}'.format(next_rank)\n dot_str += ('\"{}\" -> \"{}\" [style=\"invis\"'.format(rank_str,\n next_str))\n if self.rankposdict != None:\n edge_str = \"{} -> {}\".format(rank_str, next_str)\n if edge_str in self.rankposdict.keys():\n edgerankpos = self.rankposdict[edge_str]\n dot_str += ', pos={}'.format(edgerankpos)\n dot_str += '] ;\\n'\n # If showintro is True, write underlying edges and do not write cover\n # edges.\n # If showintro is False, write underlying edges as comments and write\n # cover edges.\n # The method read_dot reads all underlying edges, even if commented,\n # and does not read cover edges.\n if showintro == True:\n hyperedges_to_write = self.hyperedges\n elif showintro == False:\n hyperedges_to_write = self.hyperedges + self.coverhyperedges\n edges_str = \"\"\n midid = 1\n for hyperedge in hyperedges_to_write:\n if self.hypergraph == False:\n under = hyperedge.underlying\n for subedge in hyperedge.edgelist:\n edges_str += self.write_edge(subedge, params,\n underlying=under,\n cover=hyperedge.cover,\n showintro=showintro)\n elif self.hypergraph == True:\n edges_str += self.write_hyperedge(hyperedge, midid, params,\n showintro=showintro)\n midid += 1\n dot_str += edges_str\n # Close graph.\n dot_str += \"}\"\n self.dot_file = dot_str", "title": "" }, { "docid": "dd05efdae324ffb9d3dc0b6f6e81877c", "score": "0.61458254", "text": "def generate_graph_P_nodes() -> Graph:\n datastore = load_data_from_json()\n\n graph = nx.Graph()\n for data in datastore:\n graph.add_node(data[\"codice_provincia\"], x=data[\"lat\"], y=data[\"long\"], label=data[\"sigla_provincia\"])\n\n return graph", "title": "" }, { "docid": "01a1b4d46e9f1a4e4793cd0655962c01", "score": "0.61276716", "text": "def fn_load_graph(G_type):\r\n if G_type == \"google\":\r\n # google graph prep\r\n G_path = “…\\\\web-Google.txt\"\r\n G = nx.read_edgelist(G_path,comments='#',create_using=nx.DiGraph(), nodetype=int) # load the graph\r\n # get largest weakly connected component\r\n G = max(nx.weakly_connected_component_subgraphs(G),key=len)\r\n G = G.to_undirected() # convert to undirected\r\n \r\n elif G_type == \"gen_large\":\r\n # originally used\r\n #n = 600000 # nodes\r\n #m = 3 # edges per node\r\n #G = nx.barabasi_albert_graph(n,m,seed=1) # preferential attachment\r\n G_path = \"random walks\\\\practical\\\\keeper results\\\\graph_01.txt\"\r\n nx.write_edgelist(G, G_path)\r\n G = nx.read_edgelist(G_path,nodetype=int)\r\n \r\n elif G_type == \"gen_new\":\r\n n = 50 # nodes\r\n m = 3 # edges per node\r\n G = nx.barabasi_albert_graph(n,m,seed=1)\r\n if draw_graph:\r\n pos = nx.fruchterman_reingold_layout(G)\r\n nx.draw(G,pos,with_labels=True)\r\n \r\n return G", "title": "" }, { "docid": "e38d1c170cec9e464f2b7f386f9e299d", "score": "0.6124543", "text": "def networkx(self):\n if networkx_installed:\n if self.directed:\n G = networkx.DiGraph()\n else:\n G = networkx.Graph()\n # Add nodes\n for i in range(self.n_nodes):\n G.add_node(i)\n # Copy node data\n try:\n node_attributes = self._node_attributes\n except AttributeError:\n node_attributes = None\n if node_attributes is not None:\n for i in range(self.n_nodes):\n G.nodes[i]['attributes'] = node_attributes[i]\n try:\n node_labels = self._node_labels\n except AttributeError:\n node_labels = None\n if node_labels is not None:\n for i in range(self.n_nodes):\n G.nodes[i]['label'] = node_labels[i]\n # Add edges\n for e in self.get_edge_iter(True):\n if e[3] is not None:\n G.add_edge(e[0], e[1], weight=e[2], attributes=e[3])\n else:\n G.add_edge(e[0], e[1], weight=e[2])\n G.name = self.name\n return G\n else:\n raise Exception('networkx is not installed.')", "title": "" }, { "docid": "eac1fcc44e557a8e421efa5d40a2ac22", "score": "0.611676", "text": "def VisualizeGraph(graph, output_filename):\n # different visualizations for directed and undirected graphs\n if graph.directed:\n viz_graph = nx.DiGraph()\n else:\n viz_graph = nx.Graph()\n\n # iterate over the keys for the ids\n for index in graph.vertices.keys():\n viz_graph.add_node(index)\n\n # iterate over all edges in the graph\n for edge in graph.edges.values():\n # get the source and destination for each edge in the graph\n viz_graph.add_edge(edge.source_index, edge.destination_index)\n\n # create the graph drawing structue\n A = nx.nx_agraph.to_agraph(viz_graph)\n A.layout(prog='dot')\n\n A.draw(output_filename)", "title": "" }, { "docid": "ad8fb516db2f845e161fae783043eb28", "score": "0.6104806", "text": "def make_graph():\n # TODO\n \n return 0", "title": "" }, { "docid": "61b8d35292c3bea9a259a23361ad0723", "score": "0.6102202", "text": "def compute_and_draw_network(self):\n\n if np.max(self.rhos) < self.rho_m:\n print(\"All node-to-node fixation probabilities (not including self-cycles\"\n \" are lower than neutral. Thus, no graph will be drawn.\")\n return\n\n self.g = nx.MultiDiGraph()\n self.edge_labels = {}\n self.edge_alphas = []\n rho_max = np.max(self.rhos / self.rho_m)\n rho_m_alpha = 0.1 # Transparency of neutral selection edges\n\n for i in range(self.num_profiles):\n for j in range(self.num_profiles):\n # Do not draw edge if any node involved is skipped\n if j not in self.nodes_to_skip and i not in self.nodes_to_skip:\n rate = self.rhos[i][j] / self.rho_m\n # Draws edges when fixation from one strategy to another occurs (i.e.,\n # rate > 1), or with fixation equal to neutral selection probability\n # (i.e., rate == 1). This is consistent with visualizations used in\n # finite-population literature.\n if rate > 1:\n # Compute alphas. Clip needed due to numerical precision.\n alpha = np.clip(rho_m_alpha + (1 - rho_m_alpha) * rate / rho_max,\n None, 1.)\n self.g.add_edge(i, j, weight=alpha, label=\"{:.01f}\".format(rate))\n self.edge_alphas.append(alpha)\n elif np.isclose(rate, 1):\n alpha = rho_m_alpha\n self.g.add_edge(i, j, weight=alpha, label=\"{:.01f}\".format(rate))\n self.edge_alphas.append(alpha)\n # Label edges for non-self-loops with sufficient flowrate\n if i != j and rate > 1:\n edge_string = \"$\" + str(np.round(rate, decimals=2)) + \"\\\\rho_m$\"\n else:\n edge_string = \"\"\n self.edge_labels[(i, j)] = edge_string\n\n # MultiDiGraph nodes are not ordered, so order the node colors accordingly\n self.node_colors = [self.pi[node] for node in self.g.nodes()]\n\n self.cycles = list(nx.simple_cycles(self.g))\n self.num_cycles = len(self.cycles)\n\n # Color the edges of cycles if user requested it\n if self.i_cycle_to_show >= 0:\n all_cycle_edges = [\n zip(nodes, (nodes[1:] + nodes[:1])) for nodes in self.cycles\n ]\n cur_cycle_edges = all_cycle_edges[self.i_cycle_to_show]\n self.edge_colors = []\n for u, v in self.g.edges():\n if (u, v) in cur_cycle_edges:\n self.edge_colors.append([1., 0., 0.])\n else:\n self.edge_colors.append([1. - self.g[u][v][0][\"weight\"]] * 3)\n else:\n self.edge_colors = [\n [1. - self.g[u][v][0][\"weight\"]] * 3 for u, v in self.g.edges()\n ]\n self.edge_alphas = [self.g[u][v][0][\"weight\"] for u, v in self.g.edges()]\n\n ax = plt.gca()\n\n # Centered circular pose\n self.pos = nx.layout.circular_layout(self.g)\n all_x = [node_pos[0] for node, node_pos in self.pos.items()]\n all_y = [node_pos[1] for node, node_pos in self.pos.items()]\n min_x = np.min(all_x)\n max_x = np.max(all_x)\n min_y = np.min(all_y)\n max_y = np.max(all_y)\n for _, node_pos in self.pos.items():\n node_pos[0] -= (max_x + min_x) / 2\n node_pos[1] -= (max_y + min_y) / 2\n\n # Rendering\n self._draw_network()\n if self.first_run:\n ax.autoscale_view()\n ax.set_axis_off()\n ax.set_aspect(\"equal\")\n plt.ylim(-1.3, 1.3)\n plt.xlim(-1.3, 1.3)\n if self.first_run:\n self.first_run = False\n plt.axis(\"off\")\n plt.show()", "title": "" }, { "docid": "39ea5fdb2e1063e4a72059abf8b0b8ae", "score": "0.60899764", "text": "def generate_graph_features(glycan, libr = None):\n if libr is None:\n libr = lib\n g = glycan_to_nxGraph(glycan, libr = libr)\n #nbr of different node features:\n nbr_node_types = len(set(nx.get_node_attributes(g, \"labels\")))\n #adjacency matrix:\n A = nx.to_numpy_matrix(g)\n N = A.shape[0]\n diameter = nx.algorithms.distance_measures.diameter(g)\n deg = np.array([np.sum(A[i,:]) for i in range(N)])\n dens = np.sum(deg)/2\n avgDeg = np.mean(deg)\n varDeg = np.var(deg)\n maxDeg = np.max(deg)\n nbrDeg4 = np.sum(deg > 3)\n branching = np.sum(deg > 2)\n nbrLeaves = np.sum(deg == 1)\n deg_to_leaves = np.array([np.sum(A[:,deg == 1]) for i in range(N)])\n max_deg_leaves = np.max(deg_to_leaves)\n mean_deg_leaves = np.mean(deg_to_leaves)\n deg_assort = nx.degree_assortativity_coefficient(g)\n betweeness_centr = np.array(pd.DataFrame(nx.betweenness_centrality(g), index = [0]).iloc[0,:])\n betweeness = np.mean(betweeness_centr)\n betwVar = np.var(betweeness_centr)\n betwMax = np.max(betweeness_centr)\n betwMin = np.min(betweeness_centr)\n eigen = np.array(pd.DataFrame(nx.katz_centrality_numpy(g), index = [0]).iloc[0,:])\n eigenMax = np.max(eigen)\n eigenMin = np.min(eigen)\n eigenAvg = np.mean(eigen)\n eigenVar = np.var(eigen)\n close = np.array(pd.DataFrame(nx.closeness_centrality(g), index = [0]).iloc[0,:])\n closeMax = np.max(close)\n closeMin = np.min(close)\n closeAvg = np.mean(close)\n closeVar = np.var(close)\n flow = np.array(pd.DataFrame(nx.current_flow_betweenness_centrality(g), index = [0]).iloc[0,:])\n flowMax = np.max(flow)\n flowMin = np.min(flow)\n flowAvg = np.mean(flow)\n flowVar = np.var(flow)\n flow_edge = np.array(pd.DataFrame(nx.edge_current_flow_betweenness_centrality(g), index = [0]).iloc[0,:])\n flow_edgeMax = np.max(flow_edge)\n flow_edgeMin = np.min(flow_edge)\n flow_edgeAvg = np.mean(flow_edge)\n flow_edgeVar = np.var(flow_edge)\n load = np.array(pd.DataFrame(nx.load_centrality(g), index = [0]).iloc[0,:])\n loadMax = np.max(load)\n loadMin = np.min(load)\n loadAvg = np.mean(load)\n loadVar = np.var(load)\n harm = np.array(pd.DataFrame(nx.harmonic_centrality(g), index = [0]).iloc[0,:])\n harmMax = np.max(harm)\n harmMin = np.min(harm)\n harmAvg = np.mean(harm)\n harmVar = np.var(harm)\n secorder = np.array(pd.DataFrame(nx.second_order_centrality(g), index = [0]).iloc[0,:])\n secorderMax = np.max(secorder)\n secorderMin = np.min(secorder)\n secorderAvg = np.mean(secorder)\n secorderVar = np.var(secorder)\n x = np.array([len(nx.k_corona(g,k).nodes()) for k in range(N)])\n size_corona = x[x > 0][-1]\n k_corona = np.where(x == x[x > 0][-1])[0][-1]\n x = np.array([len(nx.k_core(g,k).nodes()) for k in range(N)])\n size_core = x[x > 0][-1]\n k_core = np.where(x == x[x > 0][-1])[0][-1]\n M = ((A + np.diag(np.ones(N))).T/(deg + 1)).T\n eigval, vec = eigsh(M, 2, which = 'LM')\n egap = 1 - eigval[0]\n distr = np.abs(vec[:,-1])\n distr = distr/sum(distr)\n entropyStation = np.sum(distr*np.log(distr))\n features = np.array(\n [diameter, branching, nbrLeaves, avgDeg, varDeg, maxDeg, nbrDeg4, max_deg_leaves, mean_deg_leaves,\n deg_assort, betweeness, betwVar, betwMax, eigenMax, eigenMin, eigenAvg, eigenVar, closeMax, closeMin,\n closeAvg, closeVar, flowMax, flowAvg, flowVar,\n flow_edgeMax, flow_edgeMin, flow_edgeAvg, flow_edgeVar,\n loadMax, loadAvg, loadVar,\n harmMax, harmMin, harmAvg, harmVar,\n secorderMax, secorderMin, secorderAvg, secorderVar,\n size_corona, size_core, nbr_node_types,\n egap, entropyStation, N, dens\n ])\n col_names = ['diameter', 'branching', 'nbrLeaves', 'avgDeg', 'varDeg',\n 'maxDeg', 'nbrDeg4', 'max_deg_leaves', 'mean_deg_leaves',\n 'deg_assort', 'betweeness', 'betwVar', 'betwMax', 'eigenMax',\n 'eigenMin', 'eigenAvg', 'eigenVar', 'closeMax', 'closeMin',\n 'closeAvg', 'closeVar', 'flowMax', 'flowAvg', 'flowVar',\n 'flow_edgeMax', 'flow_edgeMin', 'flow_edgeAvg', 'flow_edgeVar',\n 'loadMax', 'loadAvg', 'loadVar', 'harmMax', 'harmMin', 'harmAvg',\n 'harmVar', 'secorderMax', 'secorderMin', 'secorderAvg', 'secorderVar',\n 'size_corona', 'size_core', 'nbr_node_types', 'egap', 'entropyStation',\n 'N', 'dens']\n feat_dic = {col_names[k]:features[k] for k in range(len(features))}\n return pd.DataFrame(feat_dic, index = [glycan])", "title": "" }, { "docid": "774bd39450015c7b2c3f0507d5b046de", "score": "0.60863566", "text": "def plot_Hypergraph_svg(models, records, out_file, key_topology=\"hypergraph\", **algopt):\n hgraph = models[key_topology]\n if \"coord\" not in hgraph:\n raise ValueError(\"Nodes coordinates ('coord') were not initialized.\")\n\n if hgraph[\"dimns\"] >= 3:\n print(\"#!# Crack Error: cannot plot Hypergraphs of more than 2D #!#\")\n return\n if out_file[-4:] != \".svg\":\n out_file += \".svg\"\n\n ### Set default options ###\n _set_default_options (algopt)\n _set_hypergraph_options(algopt)\n _adapt_coords(models, key_topology, algopt)\n ### Plot ###\n draw = svgw.Drawing(out_file, size=algopt[\"image_size\"])\n nbr_n = hgraph[\"nbr_n\"]\n nodes = hgraph[\"nodes\"]\n edges = hgraph[\"edges\"]\n coord = hgraph[\"coord\"]\n # Compute node and edge attributes (if needed)\n colors, circles = _compute_node_colors (models, algopt, hgraph[\"nbr_n\"])\n bars = _compute_node_bars (models, algopt)\n numbers = _compute_node_numbers(models, algopt)\n ### Plot ###\n lsquare = algopt[\"node_radius\"]\n # Plot the hypergraph\n # First, draw the node\n for i, pi in enumerate(coord):\n draw.add(draw.circle(pi, r=algopt[\"node_radius\"],\n fill=colors[i], stroke=\"black\"))\n # Then, draw the hyperedges\n for e, ends in enumerate(edges):\n ends = set(ends)\n l = len(ends)\n barycenter = [sum(coord[i][d] for i in ends)/l for d in range(2)]\n square = (\n (barycenter[0] - lsquare/2.0, barycenter[1] - lsquare/2.0),\n (barycenter[0] - lsquare/2.0, barycenter[1] + lsquare/2.0),\n (barycenter[0] + lsquare/2.0, barycenter[1] - lsquare/2.0),\n (barycenter[0] + lsquare/2.0, barycenter[1] + lsquare/2.0),\n )\n for end in ends:\n # find 2 closest ends of square\n dists = [dist_d2(corner, coord[end]) for corner in square]\n indices = list(range(4))\n i1 = min(indices, key=lambda i: dists[i])\n del indices[i1]\n i2 = min(indices, key=lambda i: dists[i])\n # draw a triangle from the end to the square corners\n draw.add(draw.polygon(points=[coord[end], square[i1], square[i2]],\n fill=\"black\", stroke=\"black\",\n opacity=algopt[\"hopacity\"], stroke_width=1))\n # draw the square\n edge_center = [barycenter[d] - lsquare/2 for d in range(2)]\n draw.add(draw.rect(insert=edge_center, size=(lsquare,lsquare),\n fill=\"black\", opacity=0.9))\n # Finally, add the node attributes\n if circles is not None:\n for i, pi in enumerate(coord):\n _add_elem_circle(draw, pi, circles[i], algopt)\n if bars is not None:\n for i, pi in enumerate(coord):\n _add_elem_bar (draw, pi, bars[i] , algopt)\n if numbers is not None:\n for i, pi in enumerate(coord):\n _add_elem_number(draw, pi, numbers[i], algopt)\n # Plot the border\n # TODO\n\n ### End of drawing ###\n draw.save()", "title": "" }, { "docid": "55bb9b5b376314059d4c22e53b4e7a63", "score": "0.6085628", "text": "def kg_from_ner(filename):\n kg = nx.Graph()\n with open(filename, 'r') as infile:\n for line in infile:\n line = line.split(\"#\")\n abstract_id = line[0]\n \n # Get all of the entities in this abstract\n entities = set()\n for i in range(1, len(line), 3):\n entities.add(line[i])\n entities = list(entities)\n # print (\"Entities in \" + str(abstract_id) + \": \" + str(entities))\n \n # Add all of the entities as nodes in the knowledge graph\n nodes = []\n for entity in entities:\n text = reformat(entity)\n kg.add_node(text)\n# if kg.has_node(text):\n# kg.node[text]['ids'].add(abstract_id)\n# else:\n# kg.add_node(text, ids = set([abstract_id]))\n nodes.append(text)\n # print (\"Nodes in \" + str(abstract_id) + \": \" + str(nodes))\n # Add all the co-occurrrences as edges \n for i in range(len(nodes) - 1):\n for j in range(i + 1, len(nodes)):\n node1 = nodes[i]\n node2 = nodes[j]\n if kg.has_edge(node1, node2):\n # print (\"Refining edge \" + str(node1) + \", \" + str(node2))\n kg[node1][node2]['weight'] += 1\n else:\n # print (\"Adding edge \" + str(node1) + \", \" + str(node2))\n kg.add_edge(node1, node2, weight = 1)\n return kg", "title": "" }, { "docid": "142d9fdeb1c0c9c12d036768cf07d243", "score": "0.60837245", "text": "def save_graph(G):\n pos = nx.spring_layout(G, scale=8)\n plt.axis('off')\n nx.draw_networkx_nodes(G, pos, alpha=0.5, node_size=20, node_color='red')\n nx.draw_networkx_edges(G, pos, alpha=0.3, width=0.4)\n plt.savefig(\"Cluster_Folder\" + os.path.sep + \"Graph.png\", dpi=300) # save as png", "title": "" }, { "docid": "618322f0716150e9db8fc5e591fdf05d", "score": "0.6075427", "text": "def to_agraph(self):\n\t\treturn nx.nx_agraph.to_agraph(self.G)", "title": "" }, { "docid": "e834e08183b0b89e3905ce13c74978bf", "score": "0.6075273", "text": "def create_network_graph_and_refresh():\n\n network_graph_file.set(filedialog.asksaveasfilename(initialdir=\"/\",\n title=\"Select or Create file:\"))\n graph_network.make_utilization_graph_neat(model, network_graph_file.get(),\n display_plot=False)\n open_file()", "title": "" }, { "docid": "f5dbb43cb0196dfe3f4855245936b3bc", "score": "0.6069126", "text": "def build_graph(self):\n self.add_placeholders()\n self.inference()\n self.add_loss()\n self.add_accuracy()\n self.train()", "title": "" }, { "docid": "44771751c52417416faf48f53d4906ca", "score": "0.60623986", "text": "def displayEigenRankGGf(self, nodepos=None):\r\n \r\n self.GGF = nx.DiGraph()\r\n for i in range(self.gfgain.n):\r\n for j in range(self.gfgain.n):\r\n if (self.gfgain.gMatrix[i, j] != 0):\r\n self.GGF.add_edge(self.gfgain.gVariables[j], self.gfgain.gVariables[i])\r\n \r\n \r\n plt.figure(\"Node Rankings: Google Gain Forward: Scaled\")\r\n rearrange = self.GGF.nodes()\r\n \r\n for node in self.GGF.nodes():\r\n self.GGF.add_node(node, importance=self.gfgain.rankDict[node])\r\n \r\n nodelabels = dict((n, [n, round(self.gfgain.rankDict[n], 3)]) for n in self.GGF.nodes())\r\n sizeArray = [self.gfgain.rankDict[var] * 10000 for var in rearrange]\r\n \r\n if nodepos == None:\r\n nodepos = nx.circular_layout(self.GGF) \r\n \r\n nx.draw_networkx(self.GGF, pos=nodepos , labels=nodelabels, node_size=sizeArray, node_color='y')\r\n nx.draw_networkx_edges(self.GGF, pos=nodepos)\r\n plt.axis(\"off\")", "title": "" }, { "docid": "8ae9fcc45273c7d497d96293d0e4f410", "score": "0.6052107", "text": "def visualize_graph(writer):\n for node in G.nodes(data=True):\n writer.write(\"var redSphere = viewer.entities.add({name : '\"\n + str(node[0])\n + \"', position: Cesium.Cartesian3.fromDegrees(\"\n + str(node[1][\"long_deg\"]) + \", \"\n + str(node[1][\"lat_deg\"]) + \", 0), \"\n + \"ellipsoid : {radii : new Cesium.Cartesian3(5000.0, 5000.0, 5000.0), \"\n + \"material : Cesium.Color.RED.withAlpha(1),}});\\n\")\n\n for edge in G.edges(data=True):\n print(edge)\n writer.write(\"viewer.entities.add({name : '', polyline: { positions: Cesium.Cartesian3.fromDegreesArrayHeights([\"\n + str(nodes_by_id[edge[0]][\"long_deg\"]) + \",\"\n + str(nodes_by_id[edge[0]][\"lat_deg\"]) + \",0,\"\n + str(nodes_by_id[edge[1]][\"long_deg\"]) + \",\"\n + str(nodes_by_id[edge[1]][\"lat_deg\"]) + \",0]), \"\n + \"width: 2.0, arcType: Cesium.ArcType.NONE, \"\n + \"material: new Cesium.PolylineOutlineMaterialProperty({ \"\n + \"color: Cesium.Color.YELLOW.withAlpha(1.0), outlineWidth: 0, outlineColor: Cesium.Color.BLACK})}});\\n\")", "title": "" }, { "docid": "6ae499cdf8f27e326bb21e36ddb7505e", "score": "0.6045354", "text": "def create_graph(nodes_dict, edges_dict, plot_graph=True):\n\n graph = nx.DiGraph() # create directed graph in NetworkX\n\n # Add nodes and edges to networkX graph\n for node in nodes_dict.keys():\n graph.add_node(node,\n node_id=nodes_dict[node][\"id\"],\n xy_pos=nodes_dict[node][\"xy_pos\"],\n node_type=nodes_dict[node][\"type\"])\n\n for edge in edges_dict.keys():\n graph.add_edge(edge[0], edge[1],\n edge_id=edge,\n from_node=edges_dict[edge][\"from\"],\n to_node=edges_dict[edge][\"to\"],\n weight=edges_dict[edge][\"length\"])\n\n # Plot networkX graph\n if plot_graph:\n plt.figure()\n node_locations = nx.get_node_attributes(graph, 'xy_pos')\n nx.draw(graph, node_locations, with_labels=True, node_size=100, font_size=10)\n\n return graph", "title": "" }, { "docid": "1381aedc46afc7dea85354c6e2ab4861", "score": "0.6029136", "text": "def buildGraphGexf(root, title, data, flt=[]):\n\n mapping = SpiderFootHelpers.buildGraphData(data, flt)\n graph = nx.Graph()\n\n nodelist = dict()\n ncounter = 0\n for pair in mapping:\n (dst, src) = pair\n col = [\"0\", \"0\", \"0\"]\n\n # Leave out this special case\n if dst == \"ROOT\" or src == \"ROOT\":\n continue\n\n if dst not in nodelist:\n ncounter = ncounter + 1\n if dst in root:\n col = [\"255\", \"0\", \"0\"]\n graph.node[dst]['viz'] = {'color': {'r': col[0], 'g': col[1], 'b': col[2]}}\n nodelist[dst] = ncounter\n\n if src not in nodelist:\n ncounter = ncounter + 1\n if src in root:\n col = [\"255\", \"0\", \"0\"]\n graph.add_node(src)\n graph.node[src]['viz'] = {'color': {'r': col[0], 'g': col[1], 'b': col[2]}}\n nodelist[src] = ncounter\n\n graph.add_edge(src, dst)\n\n gexf = GEXFWriter(graph=graph)\n return str(gexf).encode('utf-8')", "title": "" }, { "docid": "41cfb6257c8b4468fb5e1c6a14933258", "score": "0.6025907", "text": "def gene_regulatory_network(self):\n if not hasattr(self, 'network'):\n self.construct_network()\n\n GRN = igraph.Graph(n=self.num_g, directed=True)\n es = list()\n for p in range(self.num_p)[self.num_in:-self.num_out]:\n es += list(product(self.network.incident(p, mode='IN'),\n self.network.incident(p, mode='OUT')))\n GRN.add_edges(es)\n\n return GRN", "title": "" }, { "docid": "71e5d21d9f1889d7062ede586a1dd8a2", "score": "0.60162044", "text": "def drawGraphOfTheCycle(self):\n labels2 = {edge: round(self.H.get_edge_data(edge[0], edge[1])[\"Weight\"],3) for edge in self.H.edges()}\n circPos = nx.circular_layout(self.G)\n circPos2 = nx.circular_layout(self.H)\n\n plt.figure(figsize = (13,8))\n nx.draw_networkx_nodes(self.G, circPos)\n nx.draw_networkx_labels(self.G,circPos)\n nx.draw_networkx_nodes(self.H, circPos)\n nx.draw_networkx_edges(self.G,circPos, alpha = 0.1)\n nx.draw_networkx_edges(self.H,circPos, alpha = 1)\n #nx.draw_networkx_edge_labels(H, circPos, edge_labels=labels2)\n plt.axis(\"off\")\n plt.show()", "title": "" }, { "docid": "db867d53b6b8740235ba70bbf10bda4c", "score": "0.6014233", "text": "def show_graph(ids: Dict[int, Currency], graph: np.ndarray):\n n = len(graph)\n\n g = nx.Graph()\n\n for i in range(n):\n for j in range(n):\n weight = graph[i][j]\n if weight < OVER_COST:\n g.add_edge(ids[i]._code, ids[j]._code, weight=weight)\n\n plt.plot()\n layout = nx.shell_layout(g)\n nx.draw(g, layout, with_labels=True)\n edge_labels = nx.get_edge_attributes(g, \"weight\")\n for key, weight in edge_labels.items():\n edge_labels[key] = round(edge_labels[key], 4)\n nx.draw_networkx_edge_labels(g, pos=layout, label_pos=0.5, font_color='b', edge_labels=edge_labels, font_size=9)\n plt.show()", "title": "" }, { "docid": "29a9396e6411e0cba6004d6a3ebce09e", "score": "0.60095775", "text": "def main():\r\n\r\n # Graph import\r\n G = nx.read_edgelist('data/graph.txt', comments='#')\r\n valid_graph = nxadapter.nx2nk(G)\r\n\r\n # Training and test graphs creation\r\n test_graph = lp.RandomLinkSampler.byPercentage(valid_graph, 0.9)\r\n train_graph = lp.RandomLinkSampler.byPercentage(test_graph, 0.7)\r\n\r\n # Training and testing sets creation\r\n testing_set = lp.MissingLinksFinder(test_graph).findAtDistance(2)\r\n training_set = lp.MissingLinksFinder(train_graph).findAtDistance(2)\r\n\r\n # Label creation\r\n y_train = list(map(partial(assign_label, graph=test_graph), training_set))\r\n y_test = list(map(partial(assign_label, graph=valid_graph), testing_set))\r\n\r\n # Concatenation of labels with samples\r\n train = concatenate(training_set, y_train)\r\n test = concatenate(testing_set, y_test)\r\n trainingSet = train.nodes.values\r\n testingSet = test.nodes.values\r\n\r\n # Feature engineering\r\n trainLPs = [\r\n lp.CommonNeighborsIndex(train_graph), lp.JaccardIndex(train_graph),\r\n lp.AdamicAdarIndex(train_graph), lp.ResourceAllocationIndex(train_graph),\r\n lp.PreferentialAttachmentIndex(train_graph), lp.AdjustedRandIndex(train_graph),\r\n lp.NeighborhoodDistanceIndex(train_graph), lp.TotalNeighborsIndex(train_graph),\r\n lp.SameCommunityIndex(train_graph), lp.UDegreeIndex(train_graph),\r\n lp.VDegreeIndex(train_graph)\r\n ]\r\n\r\n testLPs = [\r\n lp.CommonNeighborsIndex(test_graph), lp.JaccardIndex(test_graph),\r\n lp.AdamicAdarIndex(test_graph), lp.ResourceAllocationIndex(test_graph),\r\n lp.PreferentialAttachmentIndex(test_graph), lp.AdjustedRandIndex(test_graph),\r\n lp.NeighborhoodDistanceIndex(test_graph), lp.TotalNeighborsIndex(test_graph),\r\n lp.SameCommunityIndex(test_graph), lp.UDegreeIndex(test_graph), lp.VDegreeIndex(test_graph)\r\n ]\r\n\r\n X_train = lp.getFeatures(trainingSet, *trainLPs)\r\n X_test = lp.getFeatures(testingSet, *testLPs)\r\n\r\n # Concatenate features with samples and labels\r\n features = ['CN', 'JC', 'AA', 'RA', 'PA', 'AR', 'ND', 'TN', 'SC', 'UD', 'VD']\r\n train_features = pd.DataFrame(X_train, columns=features)\r\n test_features = pd.DataFrame(X_test, columns=features)\r\n train = pd.concat([train, train_features], axis=1)\r\n test = pd.concat([test, test_features], axis=1)\r\n\r\n # Export files as csv\r\n train.to_csv('data/train.csv', sep=';', header=True, decimal='.', encoding='utf-8', index=False)\r\n test.to_csv('data/test.csv', sep=';', header=True, decimal='.', encoding='utf-8', index=False)", "title": "" } ]
3ffbcacc52783be322e74793a47343d7
Test the snowball stemmer for a standard sentence
[ { "docid": "4cc82bab7bc6d9f2da54a652f7ecf595", "score": "0.80717385", "text": "def test_apply_nltk_snowball_stemmer():\n stemmer = 'Snowball'\n expected_output = ['the', 'fox', 'was', 'quick', 'walk', 'by',\n 'the', 'seashor', 'in', 'the', 'morn', 'daylight.']\n tokens = tokenizer.whitespace_tokenize(PHRASE_1, stemmer)\n assert expected_output == tokens", "title": "" } ]
[ { "docid": "92efc9072ac1da607e9e9ee87c27dfe2", "score": "0.78030926", "text": "def test_stemming(self):\n\n s = \"Toko-Ekambi scores, assisted by Mendes!\"\n t = Tokenizer(stem=True)\n self.assertEqual([ \"toko\", \"ekambi\", \"score\", \"assist\", \"mend\" ], t.tokenize(s))", "title": "" }, { "docid": "8d9f7254fcde7c8d7f8482516c3178b8", "score": "0.7567415", "text": "def test_tokenize_verbs_with_stemming(self):\n\n text = \"I have no idea how we balance this but with a midfield with Pogba and Ndombele its curtains for the low blocks that once haunted us.\"\n t = Tokenizer(pos=[ 'VB', 'VBG', 'VBD', 'VBN', 'VBP', 'VBZ' ], stem=True)\n self.assertEqual([ 'have', 'balanc', 'haunt' ], t.tokenize(text))\n\n t = Tokenizer(pos=[ 'VB', 'VBG', 'VBD', 'VBN', 'VBP', 'VBZ' ], stem=True)\n text = \"Is Rojo really starting if he doesn't shoot from 40 yards\"\n self.assertEqual([ 'start', 'doe', 'shoot' ], t.tokenize(text))", "title": "" }, { "docid": "6b55b30ddb5f2583ae803f1cad805dba", "score": "0.72505116", "text": "def test_no_stemming(self):\n\n s = \"Toko-Ekambi scores, assisted by Mendes!\"\n t = Tokenizer(stem=False)\n self.assertEqual([ \"toko\", \"ekambi\", \"scores\", \"assisted\", \"mendes\" ], t.tokenize(s))", "title": "" }, { "docid": "a098f48e6ed404bfd93f57aa6a7acdd2", "score": "0.69843405", "text": "def stemming():\n \n new_sent = request.get_json()\n sent = new_sent['sentence']\n\n function = joblib.load('bahasa-engine.pkl')\n resultToken = function[0]['stemming'].stem(sent)\n \n return jsonify(createJSONStemsList(resultToken.split()))", "title": "" }, { "docid": "8a21c1ce4c32e7c7522f58e241199f6c", "score": "0.6861778", "text": "def snowball_stem_question(question):\n \"\"\"More accurate than Porter Stemmer and ignores stopwords\"\"\"\n stemmer = SnowballStemmer(\"english\", ignore_stopwords=True)\n stemmed_question = []\n \n [stemmed_question.append(stemmer.stem(w)) for w in question]\n\n return stemmed_question", "title": "" }, { "docid": "e702deb55ab5811d501907630327cadd", "score": "0.6783675", "text": "def stem_text(text, stemmer='snowball'):\n #text = remove_inside_braces(text) The function remove_inside_braces does not exist. We need to see what it does\n tokens = word_tokenize(text)\n if stemmer == 'snowball':\n text_stem = \" \".join([sb_stem.stem(w) for w in tokens])\n else:\n text_stem = \" \".join([pt_stem.stem(w) for w in tokens])\n\n return text_stem", "title": "" }, { "docid": "6bdffb1b78e11ee97d608ddc723cd38f", "score": "0.6772592", "text": "def preprocess_document(self, text):\n ss = SnowballStemmer(\"english\")\n tokenized = word_tokenize(text)\n tokenized = list(filter(lambda x: x not in STOPWORDS, tokenized))\n tokenized = list(set(tokenized))\n tokenized = list(map(lambda x: x.lower(), tokenized))\n lemmatized = list(\n map(lambda x: WordNetLemmatizer().lemmatize(x, pos=\"v\"), tokenized)\n )\n stemmed = list(map(lambda x: ss.stem(x), lemmatized))\n zipped_list = list(zip(stemmed, tokenized))\n for zip_elem in zipped_list:\n (stem, token) = zip_elem\n if (stem != token) and (stem not in self.stemmed_dict.keys()):\n self.stemmed_dict[stem] = [str(token)]\n return stemmed", "title": "" }, { "docid": "f5f907d33469d22166908e576650edb0", "score": "0.6769848", "text": "def test_apply_nltk_porter_stemmer():\n stemmer = 'Porter'\n expected_output = ['the', 'fox', 'wa', 'quickli', 'walk', 'by',\n 'the', 'seashor', 'in', 'the', 'morn', 'daylight.']\n tokens = tokenizer.whitespace_tokenize(PHRASE_1, stemmer)\n assert expected_output == tokens", "title": "" }, { "docid": "a138d8ee2adc1d8ced7ca07d9ff62cee", "score": "0.6737699", "text": "def stemmatize_words_(self) -> None:\n\n stemmer = SnowballStemmer(\"portuguese\")\n self.stems = [stemmer.stem(word) for word in self.tokens]\n self.unique_stems = list(\n set([stemmer.stem(word) for word in self.unique_tokens])\n )", "title": "" }, { "docid": "821eb6d5cd97d3750416c326926445f3", "score": "0.6698263", "text": "def noun_stem (s): \n if s in identical_plurals:\n return s\n if s[-3:] == 'men':\n s = s[:-3]+'man'\n else:\n s = verb_stem(s)\n return s", "title": "" }, { "docid": "3245c0a13f58f4d7845584e0b59d846a", "score": "0.6684386", "text": "def noun_stem (s): \n # add code here\n if (s in unchanging_plurals):\n return s\n if (s.endswith('men')):\n return s[:-2] + 'an'\n else:\n return verb_stem(s)", "title": "" }, { "docid": "7d2c0f8e4cf104477e5eae95e6b9d27b", "score": "0.6655804", "text": "def verb_stem(s):\n ret = \"\"\n exceptionsDict = {\n \"has\": \"have\",\n \"does\" : \"do\"\n } # Should exceptions be ignored? https://piazza.com/class/jkuzor9eypxov?cid=240\n if not re.match(\".*(a|e|i|o|u|s|x|y|z|ch|sh)s\", s): # Rule 1\n ret = s[:-1]\n elif re.match(\".*(a|e|i|o|u)ys\", s): # Rule 2\n ret = s[:-1]\n elif len(s) >= 5 and re.match(\".*ies\", s) and not re.match(\".*(a|e|i|o|u)ies\", s): # Rule 3\n ret = (s[:-3] + \"y\")\n elif re.match(\".*ies\", s) : # Rule 4\n ret = s[:-1]\n elif re.match(\".*(o|x|ch|sh|ss|zz)es\", s): # Rule 5\n ret = s[:-2]\n elif re.match(\".*(se|ze)s\", s) and not re.match(\".*(sse|zze)s\", s): # Rule 6\n ret = s[:-1]\n elif re.match(\".*es\",s) and not re.match(\".*(i|o|s|x|z|ch|sh)es\",s):\n ret = s[:-1]\n else:\n pass\n if not s in brown_vbz and not ret in brown_vb:\n ret = \"\"\n if s in exceptionsDict:\n ret = exceptionsDict[s]\n return ret", "title": "" }, { "docid": "515805a98296d54ec647eea30913a980", "score": "0.65731335", "text": "def test_nouns_only_one_sentence(self):\n\n text = \"I have no idea how we balance this but with a midfield with Pogba and Ndombele its curtains for the low blocks that once haunted us.\"\n t = Tokenizer(pos=[ 'NN', 'NNS', 'NNP', 'NNPS' ])\n self.assertEqual([ 'idea', 'midfield', 'Pogba', 'Ndombele', 'curtains', 'blocks' ], t._pos(text))\n\n t = Tokenizer(pos=[ 'NN', 'NNS', 'NNP', 'NNPS' ])\n text = \"Is Rojo really starting if he doesn't shoot from 40 yards\"\n self.assertEqual([ 'rojo', 'yard' ], t.tokenize(text))", "title": "" }, { "docid": "c25fd002b37f16ef5afb034a0fe4522d", "score": "0.6568531", "text": "def stem_sentence(sentence):\n words = sentence.split()\n stemmed_words = [stem(w) for w in words]\n stemmed_sentence = ' '.join(stemmed_words)\n return stemmed_sentence", "title": "" }, { "docid": "c29e252b3bf4a13f21bd95c5b0b264fe", "score": "0.6533578", "text": "def stem_words(words, fixed_words=[], verbose=False):\n stemmer = SnowballStemmer('english')\n stems = []\n for word in words:\n if \"@\" in word or word in fixed_words:\n stem = word\n else:\n stem = stemmer.stem(word)\n stems.append(stem)\n if verbose:\n print(stems)\n return stems", "title": "" }, { "docid": "0ead4f923aefed2a3a130b487be6dea7", "score": "0.65070546", "text": "def embers_stem(x):\n x = x.lower()\n if isinstance(x, unicode) == False:\n x = x.decode('utf-8', 'ignore')\n try:\n stemmer = SnowballStemmer('spanish')\n x1 = FeatureCountVectorizer.preprocess_unicode_text(x, stemmer.stem)\n if (x1 == ''):\n x1 = x\n # print x1\n stemmer = SnowballStemmer('english')\n x2 = FeatureCountVectorizer.preprocess_unicode_text(x, stemmer.stem)\n if (x2 == ''):\n x2 = x\n # print x2\n stemmer = SnowballStemmer('portuguese')\n x3 = FeatureCountVectorizer.preprocess_unicode_text(x, stemmer.stem)\n if (x3 == ''):\n x3 = x\n # print x3\n # print 'success'\n return min(x1, x2, x3, key=lambda x: len(x))\n except:\n return x", "title": "" }, { "docid": "0e432665f6ec12eab377cb3e8b7160de", "score": "0.6497004", "text": "def no_plural_stemmer(word):\n word = word.lower()\n if word.endswith('s') and not (word in PROTECTED_WORDS\n or word.endswith('sis')):\n stemmed_word = stemmer.stem(word)\n if len(stemmed_word) == len(word) - 1:\n word = stemmed_word\n return word", "title": "" }, { "docid": "fa314c46924f9324d372432ceef8d82f", "score": "0.6489065", "text": "def stem(self) -> str:", "title": "" }, { "docid": "27558e3a5b72708c39190444d6259722", "score": "0.6463934", "text": "def stem(self, s):\n return s", "title": "" }, { "docid": "bb818196e4382752ad8ff652fb63c689", "score": "0.6447286", "text": "def test_stemming_cache(self):\n\n s = \"Toko-Ekambi scores, assisted by Mendes!\"\n t = Tokenizer(stem=True)\n start = time.time()\n self.assertEqual([ \"toko\", \"ekambi\", \"score\", \"assist\", \"mend\" ], t.tokenize(s))\n elapsed = time.time() - start\n start = time.time()\n self.assertEqual({ 'toko', 'ekambi', 'scores', 'assisted', 'mendes' }, set(t.stem_cache.keys()))\n self.assertEqual('toko', t.stem_cache.get('toko'))\n self.assertEqual('ekambi', t.stem_cache.get('ekambi'))\n self.assertEqual('score', t.stem_cache.get('scores'))\n self.assertEqual('assist', t.stem_cache.get('assisted'))\n self.assertEqual('mend', t.stem_cache.get('mendes'))\n t.tokenize(s)\n self.assertLess(time.time() - start, elapsed)", "title": "" }, { "docid": "d04bb42fb461f886f33d317a9fc1278f", "score": "0.64285135", "text": "def stemmer(word):\n stemmer = Stemmer()\n return stemmer.stem(BeautifulSoup(word, \"html.parser\").text)", "title": "" }, { "docid": "6bb62ba46b9fd92b31ded57f17929326", "score": "0.64221615", "text": "def stematize(sentence):\n stop_words = set(stopwords.words(\"english\"))\n word_tokens = word_tokenize(sentence)\n\n filtered_sentence = []\n for w in word_tokens:\n if w not in stop_words:\n filtered_sentence.append(w)\n stemmed = []\n for w in filtered_sentence:\n stemmed.append(sno.stem(w))\n\n return stemmed", "title": "" }, { "docid": "5d6fe237ccb93539dddbc99bdb0256a8", "score": "0.6410119", "text": "def statement_stemmer(stmt):\n stmt_stems = []\n\n #only stem statements with strings (used in statement_corpus() below)\n if type(stmt) is float:\n return\n \n #stmt_text_str = rev_text.encode('utf-8') ##deprecated.\n stmt_text_str = stmt.lower()\n \n \n sentence_list = nltk.sent_tokenize(stmt_text_str)\n \n for sent in sentence_list:\n word_list = nltk.word_tokenize(sent)\n for word in word_list: #compare with WordNet Corpus\n wn_word = wordnet.morphy(word)\n if wn_word is not None:\n wn_stem = PorterStemmer().stem_word(wn_word)\n stmt_stems.append(wn_stem)\n return stmt_stems", "title": "" }, { "docid": "c2d6edc8127abf001fcc3f3450a44060", "score": "0.6395337", "text": "def porter_stem(tokenized_text: []) -> []:\n if tokenized_text is not None:\n if len(tokenized_text) is not 0:\n ps = PorterStemmer()\n\n # array containing no stop words\n preprocessed = [ps.stem(plural) for plural in tokenized_text]\n\n if len(preprocessed) is not 0:\n return preprocessed\n else:\n return None\n else:\n return None\n else:\n return None", "title": "" }, { "docid": "1c61b54ccc6a35ac55de54094f445039", "score": "0.63898325", "text": "def pre_proc(self,text):\n\t\tfinal_text=[]\n\t\tlancas=LancasterStemmer()\n\t\ttext=text.split(\"\\n\")\n\t\tfor line in text:\n\t\t\tsent=line.split(\".\")\n\t\t\tfor i in sent:\n\t\t\t\ttemp_sent=i.split()\n\t\t\t\tfinal_sent=[]\n\t\t\t\tfor word in temp_sent:\n\t\t\t\t\tif word not in stopwords.words():\t#checks if not a stop word\n\t\t\t\t\t\tfinal_sent.append(lancas.stem(word))\t#stems the word\n\t\t\t\tfinal_text.append(final_sent)\t\t\t\t\t\n\t\tif len(final_text)==1: final_text=final_text[0]\n\t\treturn final_text", "title": "" }, { "docid": "5345352cedfe296e55d5c81f45b4424f", "score": "0.6385435", "text": "def test_middle_high_german_stemmer(self):\n stemmed = middle_high_german_stemmer(\"Man lūte dā zem münster nāch gewoneheit\")\n target = ['man', 'lut', 'dâ', 'zem', 'munst', 'nâch', 'gewoneheit']\n\n self.assertEqual(stemmed, target)", "title": "" }, { "docid": "a6f8451dd53546df949c18d481c8f4f0", "score": "0.63839865", "text": "def stemming():\n # from nltk.stem.snowball import SnowballStemmer --> can also be used\n from nltk.stem.porter import PorterStemmer\n plurals = ['die', 'died', 'dying', 'dies', 'died',\n 'responsive', 'responsivity', 'unresponsive']\n stemmer = PorterStemmer()\n singles = [stemmer.stem(single) for single in plurals]\n print(\"plural:\", plurals)\n print(\"singles:\", singles)\n singles = []\n for single in plurals:\n singles.append(stemmer.stem(single))", "title": "" }, { "docid": "7733791843af111cc169c44e004ed82f", "score": "0.63709253", "text": "def stemming(li):\n stemmer = SnowballStemmer(\"english\", ignore_stopwords=True)\n for i in li:\n for j in range(len(i)):\n i[j] = str(stemmer.stem(i[j]))\n print 'Words stemmed.'", "title": "" }, { "docid": "7efda73e79fbe31e3c520d9091f4b1fd", "score": "0.6369623", "text": "def stem(self, sentence):\n return [self.stemmer.stem(x) for x in sentence]", "title": "" }, { "docid": "7e6fd7252fcad50fc1642b06db89f950", "score": "0.63488054", "text": "def my_stem(word):\n if word in do_not_stem_list:\n return word\n if word in my_stem_ref:\n return my_stem_ref[word]\n else:\n return ps.stem(word)", "title": "" }, { "docid": "6c19e5ca8cd3bb57e93858a999bc4e0a", "score": "0.63368195", "text": "def stem_training_data(stemming_data=str):\n porter = nltk.PorterStemmer()\n stemming_data = stemming_data.lower().split()\n m2 = map(lambda x: porter.stem(x), stemming_data)\n return ' '.join(m2)", "title": "" }, { "docid": "ac5d8a04eb78f0fc314b778ce0177661", "score": "0.63215375", "text": "def stem(self, token: str):\n return self._stemmer.stem(token)", "title": "" }, { "docid": "0ec0314047e7a380c936532651d258b5", "score": "0.6290733", "text": "def StemTokenize(text: str):\r\n lancaster = LancasterStemmer()\r\n stems = [lancaster.stem(word) for word in word_tokenize(text)]\r\n stems = list(filter(lambda token: len(token) > 3, stems))\r\n return stems", "title": "" }, { "docid": "94f3fb93193b82e423853d746dfcbf5f", "score": "0.6289811", "text": "def stem_words(tokens):\n print(\" \")\n print(\"-------- Stemming -------\")\n stemmer = PorterStemmer()\n stems = []\n for w in tokens:\n stems.append(stemmer.stem(w))\n return stems", "title": "" }, { "docid": "c2f5d01b31185414a08629263d76ce89", "score": "0.62744", "text": "def stem(word):\n\treturn stemmer.stem(word.lower())", "title": "" }, { "docid": "0567fdbb1edacd0fc0c99bdb7f9056c4", "score": "0.62672144", "text": "def stemming(self, tokens):\n for i in range(len(tokens)):\n tokens[i] = self.stemmer.stem(tokens[i])\n return tokens", "title": "" }, { "docid": "e6986e4602dd3c820eeedc9e8b26b20c", "score": "0.62636244", "text": "def tokenize_and_preprocess_bystatement(stng):\n\n neg_tokens, sentences = add_negation_remove_propnouns(stng) #Adds negation, removes Proper Nouns\n translator = str.maketrans('', '', string.punctuation)\n stripped = [w.translate(translator) for w in neg_tokens] #Removes remaining punctuation\n words = [word for word in stripped if word.isalpha()] #Removes non-alphabetic words\n stop_words = set(stopwords.words('english'))\n stop_words_withneg = []\n for i in stop_words: #Add stopwords and negation of stopwords\n stop_words_withneg.append(i)\n stop_words_withneg.append('not'+i)\n words = [w for w in words if not w in stop_words_withneg] #Remove stopwords\n porter = PorterStemmer()\n stemmed = [porter.stem(word) for word in words] #Stem words\n return stemmed", "title": "" }, { "docid": "a0c8cad56b3c6ddfc18303484dba137c", "score": "0.62424606", "text": "def execute(self):\n return [self.stemmer.stem(token).lower() for token in self.tokens]", "title": "" }, { "docid": "1a9fd4f1a481adbf34b44167bcf181c4", "score": "0.6233352", "text": "def test_nouns_proper(self):\n\n text = \"Tanguy Ndombele told Jose Mourinho he never wants to play for him again following a clash earlier this week\"\n t = Tokenizer(pos=[ 'NN', 'NNS', 'NNP', 'NNPS' ])\n self.assertEqual([ 'Tanguy', 'Ndombele', 'Jose', 'Mourinho', 'clash', 'week' ], t._pos(text))", "title": "" }, { "docid": "f6d08d7bf9918600ba26660e073b72ab", "score": "0.622889", "text": "def stem_term(self, text):\n for i in range(len(text)):\n text[i]=self.stemmer.stem(text[i])\n return text", "title": "" }, { "docid": "19adbfd085ffd1c550e82d7c991cdcbc", "score": "0.6228068", "text": "def stem_text(frame):\n for col in ['employer', 'loan title']:\n frame.ix[frame[col].isnull(), col] = 'nan'\n frame[col] = frame[col].map(stem_sentence)", "title": "" }, { "docid": "e8a875eddebe29bb183a7b7c43689497", "score": "0.622457", "text": "def test_word_normalization(self):\n\n t = Tokenizer(normalize_words=True, character_normalization_count=2, stem=False, min_length=2)\n\n s = \"YYYYYEEESSSSSSSSS OOXXXXXXXXXXXX!!!\"\n self.assertEqual([ \"yes\", \"ox\" ], t.tokenize(s))\n\n s = \"GOOOOOOOOOOOOOOAAAAALL!!!\"\n self.assertEqual([ \"goal\" ], t.tokenize(s))", "title": "" }, { "docid": "d2725611a013260680cf9abe7455ac99", "score": "0.62123394", "text": "def doc_preproc(self, doc):\n stemmer = SnowballStemmer(\"english\")\n processed = []\n tokens = gensim.utils.simple_preprocess(doc)\n for token in tokens:\n if token not in gensim.parsing.preprocessing.STOPWORDS and len(token) > 3:\n processed.append(stemmer.stem(WordNetLemmatizer().lemmatize(token, pos='v')))\n return processed", "title": "" }, { "docid": "11d9034235cc3027ed634cc26ca1d507", "score": "0.6188769", "text": "def stem_doc(self, document):\n processed_doc = \"\"\n for sent in sent_tokenize(document):\n lemmatized_sent = \"\"\n for token in wordpunct_tokenize(sent):\n lemmatized_token = self.stemmer.stem(token)\n lemmatized_sent += lemmatized_token + ' '\n processed_doc += lemmatized_sent.strip()\n\n return processed_doc", "title": "" }, { "docid": "c43323208010de895b2e9bc04963ff89", "score": "0.61826605", "text": "def __init__():\n\n word = \"\" # buffer for word to be stemmed\n k = 0\n k0 = 0\n offset = 0 # j is a general offset into the string", "title": "" }, { "docid": "e3a4c5569876bddc246cab73da80b3e0", "score": "0.6182376", "text": "def stemmatize_words(self) -> None:\n stemmer = rslp.RSLPStemmer()\n self.stems = [stemmer.stem(word) for word in self.tokens]\n self.unique_stems = list(\n set([stemmer.stem(word) for word in self.unique_tokens])\n )", "title": "" }, { "docid": "b91c618fb992c4295fcbdfa5c7663807", "score": "0.61728567", "text": "def stem_words(inStr):\n\treturn stem_text(inStr)", "title": "" }, { "docid": "0fe39e969a8569319d7d7cfec24ab2da", "score": "0.6168769", "text": "def stem(text):\n ps = nltk.porter.PorterStemmer()\n text = ' '.join([ps.stem(word) for word in text.split()])\n return text", "title": "" }, { "docid": "2d072f66b7a97712ae0e8086e4ab640f", "score": "0.61627257", "text": "def parseOutText(all_text):\n\n # from nltk.stem import SnowballStemmer\n # stemmer = SnowballStemmer('turkish')\n\n words = \"\"\n ### remove punctuation\n text_string = all_text.translate(string.maketrans(\"\", \"\"), string.punctuation)\n ### project part 2: comment out the line below\n #words = text_string\n\n ### split the text string into individual words, stem each word,\n ### and append the stemmed word to words (make sure there's a single\n ### space between each stemmed word)\n #text_string = text_string.replace('\\n', '') \n wordList = text_string.split()\n wordList = [i for i in wordList if not hasNumbers(i) and not 'aha' in i]\n wordList = filter(None, wordList)\n \n # wordList = [stemmer.stem(word) for word in wordList]\n\n words = ' '.join(wordList) \n return words", "title": "" }, { "docid": "9c0ccc9723b835c8792a8209e30470ed", "score": "0.61520416", "text": "def test_run():\n\n text = \"The first time you see The Second Renaissance it may look boring. Look at it at least twice and definitely watch part 2. It will change your view of the matrix. Are the human people the ones who started the war? Is AI a bad thing?\"\n print(\"--- Sample text ---\", text, sep=\"\\n\")\n \n sentences = sent_tokenize(text)\n print(\"\\n--- Sentences ---\")\n print(sentences)\n \n print(\"\\n--- Words ---\")\n for sent in sentences:\n print(sent)\n print(word_tokenize(sent))\n print() # blank line for readability", "title": "" }, { "docid": "e6ab1bd87b01e601e57edfed6d7e5c7a", "score": "0.61464256", "text": "def __word_stemming(str_input):\n result = []\n ps = PorterStemmer()\n\n for word in str_input:\n result.append(ps.stem(word))\n\n return result", "title": "" }, { "docid": "29f0d1cad2e82575f6d8e7a7ee5bd235", "score": "0.6139751", "text": "def stem(w):\n stemmer = PorterStemmer()\n try:\n return stemmer.stem(w)\n except UnicodeDecodeError:\n return w", "title": "" }, { "docid": "b8fc95916ae4d554082f0188141d0b4b", "score": "0.61373067", "text": "def test_nouns_only_multiple_sentence(self):\n\n text = \"Night Call is now out on Xbox One and Nintendo Switch! We're so proud to see the game there and hope you'll enjoy your ride in Paris.\"\n t = Tokenizer(pos=[ 'NN', 'NNS', 'NNP', 'NNPS' ])\n self.assertEqual([ 'Night', 'Call', 'Xbox', 'One', 'Nintendo', 'Switch', 'game', 'ride', 'Paris' ], t._pos(text))", "title": "" }, { "docid": "e8e755b5691270d53c04e0fbc7158c8c", "score": "0.6125966", "text": "def test_no_stopwords(self):\n\n s = \"Gelson Martins tries to shove the referee.\"\n t = Tokenizer(stem=False)\n self.assertEqual([ 'gelson', 'martins', 'tries', 'shove', 'the', 'referee' ], t.tokenize(s))", "title": "" }, { "docid": "5af1c0c772410367758f60045e4edff2", "score": "0.6122596", "text": "def stem(word, stemmer=PORTER, **kwargs):\n if stemmer == PORTER:\n return _stemmer.stem(decode_utf8(word).lower(), **kwargs)\n if stemmer == LEMMA:\n if word.__class__.__name__ == \"Word\":\n if word.lemma is not None:\n return word.lemma\n if word.pos == \"NNS\":\n return singularize(word.string.lower())\n if word.pos.startswith(\"VB\"):\n return conjugate(word.string.lower(), \"infinitive\") or word\n return singularize(word)\n return word", "title": "" }, { "docid": "5abf187ae8d70b23a5fc74e0ec402f5c", "score": "0.61198366", "text": "def features(tokens, index):\n \n # init the stemmer\n stemmer = SnowballStemmer('english')\n \n # Pad the sequence with placeholders\n tokens = [('[START2]', '[START2]'), ('[START1]', '[START1]')] + list(tokens) + [('[END1]', '[END1]'), ('[END2]', '[END2]')]\n # history = ['[START2]', '[START1]'] + list(history)\n \n # shift the index with 2, to accommodate the padding\n index += 2\n \n word, pos = tokens[index]\n prevword, prevpos = tokens[index - 1]\n prevprevword, prevprevpos = tokens[index - 2]\n nextword, nextpos = tokens[index + 1]\n nextnextword, nextnextpos = tokens[index + 2]\n # previob = history[index - 1]\n previob = ''\n contains_dash = '-' in word\n contains_dot = '.' in word\n allascii = all([True for c in word if c in string.ascii_lowercase])\n \n allcaps = word == word.capitalize()\n capitalized = word[0] in string.ascii_uppercase\n \n prevallcaps = prevword == prevword.capitalize()\n prevcapitalized = prevword[0] in string.ascii_uppercase\n \n nextallcaps = prevword == prevword.capitalize()\n nextcapitalized = prevword[0] in string.ascii_uppercase\n \n return {\n 'word': word,\n 'lemma': stemmer.stem(word),\n 'pos': pos,\n 'all-ascii': allascii,\n \n 'next-word': nextword,\n 'next-lemma': stemmer.stem(nextword),\n 'next-pos': nextpos,\n \n 'next-next-word': nextnextword,\n 'nextnextpos': nextnextpos,\n \n 'prev-word': prevword,\n 'prev-lemma': stemmer.stem(prevword),\n 'prev-pos': prevpos,\n \n 'prev-prev-word': prevprevword,\n 'prev-prev-pos': prevprevpos,\n \n 'prev-iob': previob,\n \n 'contains-dash': contains_dash,\n 'contains-dot': contains_dot,\n \n 'all-caps': allcaps,\n 'capitalized': capitalized,\n \n 'prev-all-caps': prevallcaps,\n 'prev-capitalized': prevcapitalized,\n \n 'next-all-caps': nextallcaps,\n 'next-capitalized': nextcapitalized,\n }", "title": "" }, { "docid": "5c4f4ba08d42f4cb3b6bbaff4bd59956", "score": "0.6103472", "text": "def test_tokenize_nouns_only(self):\n\n text = \"I've gone through many revisions of this area, but I think this might be the one.\"\n t = Tokenizer(pos=[ 'NN', 'NNS', 'NNP', 'NNPS' ])\n self.assertEqual([ 'taxi', 'day', 'age' ], t.tokenize('Taxis can really go everywhere in this day and age'))", "title": "" }, { "docid": "1a9b1701affdff72f232fa97f30eb856", "score": "0.610148", "text": "def test_set_stems_separately(self):\n self.stemedSmallFont.vstem = 120\n self.stemedSmallFont.hstem = 50", "title": "" }, { "docid": "2197840e65dc6335f82bbc3198e0e5ee", "score": "0.6086208", "text": "def stem(tokens):\n return [stemmer.stem(t) for t in tokens if t.isalpha()]", "title": "" }, { "docid": "1949bdb716aa4ed569440ce800a190fa", "score": "0.60719466", "text": "def execute(self):\r\n\r\n #step 1 - 5\r\n self.start_stemming_process()\r\n\r\n #step 6\r\n if self.dictionary.contains(self.current_word):\r\n self.result = self.current_word\r\n else:\r\n self.result = self.original_word", "title": "" }, { "docid": "0760fdf40787f4287daa8323f6e51a01", "score": "0.6066294", "text": "def noun_stem (s):\n stem = ''\n if s in unchanging_plurals_list:\n stem = s\n elif re.match(r'^[a-zA-Z]*men$', s.lower()):\n stem = s[:-2] + 'an'\n elif re.match(r'^[a-z]*(?![sxyz(ch)(sh)aeiou])[a-z]s$', s.lower()):\n stem = s[:-1]\n elif re.match(r'^[a-z]*[aeiou]ys$', s.lower()):\n stem = s[:-1]\n elif len(s) > 4 and re.match(r'^[a-z]*(?![aeiou])[a-z]ies$', s.lower()):\n stem = s[:-3] + 'y'\n elif len(s) == 4 and re.match(r'(?![aeiou])[a-z]ies$', s.lower()):\n stem = s[:-1]\n elif re.match(r'^[a-z]*[o|x|(ch)|(sh)|(ss)|(zz)]es$', s.lower()):\n stem = s[:-2]\n elif re.match(r'^[a-z]*(?![s])[a-z]ses$', s.lower()) or re.match(r'^[a-z]*(?![z])[a-z]zes$', s.lower()):\n stem = s[:-1]\n elif re.match(r'^[a-z]*(?![iosxz(ch)(sh)])[a-z]es$', s.lower()):\n stem = s[:-1]\n else:\n stem = \"\"\n\n return stem", "title": "" }, { "docid": "2c55e1cec76ade2a18d11d7f363fd458", "score": "0.606569", "text": "def _check_stems(self, s, infls): \n\t\tmatch_stems = []\n\n\t\t# For each of the inflections that is a match, strip the inflection from the end of the word\n\t\t# and look up the stripped word (w) in the stems\n\t\tfor infl in infls:\n\t\t\tw = re.sub ( infl['ending'] + \"$\", \"\", s )\n\n\t\t\tfor stem in self.stems:\n\t\t\t\tif w == stem['orth']: \n\n\t\t\t\t\t# If the inflection and stem identify as the same part of speech\n\t\t\t\t\tif (\n\t\t\t\t\t\t\tinfl['pos'] == stem['pos']\n\t\t\t\t\t\tor (\n\t\t\t\t\t\t\t\tinfl['pos'] in [\"VPAR\", \"V\"]\n\t\t\t\t\t\t\tand stem['pos'] in [\"VPAR\", \"V\"]\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t):\n\n\t\t\t\t\t\t# Ensure the inflections apply to the correct stem decl/conj/etc\n\t\t\t\t\t\tif infl['n'][0] == stem['n'][0]:\n\t\t\t\t\t\t\tis_in_match_stems = False \n\n\t\t\t\t\t\t\t# If this stem is already in the match_stems list, add infl to that stem\n\t\t\t\t\t\t\tfor i, mst in enumerate(match_stems):\n\t\t\t\t\t\t\t\tif stem == mst['st']:\n\t\t\t\t\t\t\t\t\tmatch_stems[i]['infls'].append( infl )\n\t\t\t\t\t\t\t\t\tis_in_match_stems = True\n\n\t\t\t\t\t\t\tif not is_in_match_stems:\n\t\t\t\t\t\t\t\tmatch_stems.append({ 'st':stem, 'infls':[infl] })\n\n\t\t# While we're working out the kinks in the word form taxonomies\t\n\t\tif len(match_stems) == 0:\n\t\t\t# and look up the stripped word (w) in the stems\n\t\t\tfor infl in infls:\n\t\t\t\tw = re.sub ( infl['ending'] + \"$\", \"\", s )\n\n\t\t\t\tfor stem in self.stems:\n\t\t\t\t\tif w == stem['orth']: \n\n\t\t\t\t\t\t# If the inflection and stem identify as the same part of speech\n\t\t\t\t\t\tif (\n\t\t\t\t\t\t\t\tinfl['pos'] == stem['pos']\n\t\t\t\t\t\t\tor (\n\t\t\t\t\t\t\t\t\tinfl['pos'] in [\"VPAR\", \"V\"]\n\t\t\t\t\t\t\t\tand stem['pos'] in [\"VPAR\", \"V\"]\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t):\n\t\t\t\t\t\t\t# Ensure it's the base form \n\t\t\t\t\t\t\tif infl['n'][0] == 0:\n\t\t\t\t\t\t\t\tis_in_match_stems = False \n\n\t\t\t\t\t\t\t\t# If this stem is already in the match_stems list, add infl to that stem\n\t\t\t\t\t\t\t\tfor i, mst in enumerate(match_stems):\n\t\t\t\t\t\t\t\t\tif stem == mst['st']:\n\t\t\t\t\t\t\t\t\t\tmatch_stems[i]['infls'].append( infl )\n\t\t\t\t\t\t\t\t\t\tis_in_match_stems = True\n\n\t\t\t\t\t\t\t\tif not is_in_match_stems:\n\t\t\t\t\t\t\t\t\tmatch_stems.append({ 'st':stem, 'infls':[infl] })\n\n\n\n\t\treturn match_stems", "title": "" }, { "docid": "b75dafcc4ee40446c3014ba6245275dd", "score": "0.60482186", "text": "def stemmer_fun_stop(sentence,stopwords):\n token_words=word_tokenize(sentence)\n stem_sentence=[]\n for word in token_words:\n if word not in stopwords:\n stem_sentence.append(word)\n stem_sentence.append(\" \") \n return \"\".join(stem_sentence)", "title": "" }, { "docid": "64476b7056ca88e099fd819d1c67c0df", "score": "0.6045204", "text": "def stemmer(content):\n ps = nltk.PorterStemmer()\n\n bodyStems = []\n\n specialStems = []\n\n specialWhitelist = ['h1', 'h2', 'h3', 'h4', 'h5', 'h6',\n 'strong',\n 'title',\n 'b',\n 'i',\n 'em'\n ]\n\n bodyWhitelist = ['html',\n 'span',\n 'p',\n 'a'\n 'ul', 'li', 'ol',\n 'small'\n ]\n\n bodyOutput = ''\n specialOutput = ''\n\n soup = BeautifulSoup(content, 'lxml')\n text = soup.find_all(text = True)\n\n for t in text:\n # filters special tags we want\n if t.parent.name in specialWhitelist:\n\n specialOutput += '{} '.format(t)\n # filters 'body' tags we want\n elif t.parent.name in bodyWhitelist:\n\n bodyOutput += '{} '.format(t) # appends the text\n\n\n\n specialWords = nltk.word_tokenize(specialOutput)\n bodyWords = nltk.word_tokenize(bodyOutput)\n\n for sw in specialWords:\n # Append stems from special Tags\n if (sw.isascii()):\n specialStems.append(ps.stem(sw))\n for bw in bodyWords:\n # Append stems from body Tags\n if (bw.isascii()):\n bodyStems.append(ps.stem(bw))\n\n\n\n return specialStems, bodyStems", "title": "" }, { "docid": "ead20abcb4897f4e7e9e241e43bc2395", "score": "0.60418934", "text": "def stemming(tokens):\n stemmer = PorterStemmer()\n return [stemmer.stem(token) for token in tokens]", "title": "" }, { "docid": "cbfb9f599d191fa0754dfcb4aa18778b", "score": "0.60339904", "text": "def stem_sentences(sentences):\n # swap digits for 0\n sentences = [re.sub(_DIGIT_RE, \"0\", sentence) for sentence in sentences]\n # make lower case\n sentences = [sentence.lower() for sentence in sentences]\n\n return sentences", "title": "" }, { "docid": "c5696c9d43b49661efe48d6240baa0d0", "score": "0.6031612", "text": "def text_analysis(jarvis, s):\n sentence = jarvis.input(\"Write down the text: \")\n tokens = nltk.word_tokenize(sentence)\n tagged = nltk.pos_tag(tokens)\n entities = nltk.chunk.ne_chunk(tagged)\n print('Analysis result: ')\n print(entities)", "title": "" }, { "docid": "a09683c1269ea53f4f70487623eb1f40", "score": "0.6018044", "text": "def stem(text):\n ps = nltk.PorterStemmer()\n\n stemmed_text = []\n for sentence in text:\n stemmed_sentence = []\n for word in sentence:\n stemmed_sentence.append(ps.stem(word.lower()))\n stemmed_text.append(stemmed_sentence)\n\n return stemmed_text", "title": "" }, { "docid": "d4fdb83d2b8da3b34e39d1aeb5f59af1", "score": "0.6008122", "text": "def test_normalize_and_tokenize(self):\n rouge = PythonRouge(use_porter_stemmer=False, remove_stopwords=False)\n original = 'Xu Wenli, Wang Youchai, and Qin Yongmin, leading dissidents and '\\\n 'prominent members of the China-Democracy-Party, were found guilty of subversion ' \\\n 'and sentenced to 13, 11, and 12 years in prison, respectively.'\n expected = 'xu wenli wang youchai and qin yongmin leading dissidents and ' \\\n 'prominent members of the china democracy party were found guilty of subversion ' \\\n 'and sentenced to 13 11 and 12 years in prison respectively'.split()\n actual = rouge.normalize_and_tokenize_sentence(original)\n assert expected == actual\n\n rouge = PythonRouge(use_porter_stemmer=True, remove_stopwords=False)\n expected = 'xu wenli wang youchai and qin yongmin lead dissid and promin '\\\n 'member of the china democraci parti be find guilti of subvers and sentenc '\\\n 'to 13 11 and 12 year in prison respect'.split()\n actual = rouge.normalize_and_tokenize_sentence(original)\n assert expected == actual\n\n rouge = PythonRouge(use_porter_stemmer=False, remove_stopwords=True)\n expected = 'xu wenli wang youchai qin yongmin leading dissidents prominent '\\\n 'members china democracy party found guilty subversion sentenced 13 11 '\\\n '12 years prison'.split()\n actual = rouge.normalize_and_tokenize_sentence(original)\n assert expected == actual\n\n rouge = PythonRouge(use_porter_stemmer=True, remove_stopwords=True)\n expected = 'xu wenli wang youchai qin yongmin lead dissid promin member china '\\\n 'democraci parti find guilti subvers sentenc 13 11 12 year prison'.split()\n actual = rouge.normalize_and_tokenize_sentence(original)\n assert expected == actual", "title": "" }, { "docid": "29a74d507f2cdf287324a0d4ddbb4979", "score": "0.6003309", "text": "def preprocess(document: str, stem: Optional[bool] = True) -> str:\n # change sentence to lower case\n document = document.lower()\n # tokenize into words\n words = word_tokenize(document)\n # remove stop words\n words = [word for word in words if word not in stopwords.words(\"english\")]\n\n if stem:\n words = [stemmer.stem(word) for word in words] # apply stemming if stem is True\n else:\n words = [\n word_lemmatizer.lemmatize(word, pos=\"v\") for word in words\n ] # apply lemmatization if not stem\n\n # join words to make sentence\n documents = \" \".join(words)\n\n return documents", "title": "" }, { "docid": "1c8a947dbe3ff2fe7276d2e6f0273380", "score": "0.5997507", "text": "def stem_sentence(self, arabic_sentence: str) -> str:\n arabic_words = arabic_sentence.split(\" \")\n arabic_stem_words = [self.stemmer.stem(word) for word in arabic_words]\n return \" \".join(arabic_stem_words)", "title": "" }, { "docid": "b76a82e75ec8d556c1161b7cf57362c8", "score": "0.5984263", "text": "def process_text(text, stem=True):\n #text = text.translate(None, string.punctuation)\n text = text.translate(string.punctuation)\n #print(\"text\", text)\n tokens = word_tokenize(text)\n #print(\"process_text tokens\", tokens)\n \n if stem:\n stemmer = PorterStemmer()\n tokens = [stemmer.stem(t) for t in tokens]\n #print(\"stemmer tokens:\", tokens)\n\n return tokens", "title": "" }, { "docid": "aa4023e2be0e36972cc24b28959fcf2c", "score": "0.5976794", "text": "def GetSpecialStem(self):\r\n # Because both the \"future\" and \"conditional\" tenses have very similar irregulars,\r\n # we create a function that both those functions can use\r\n\r\n irregulars = {\"saber\":\"sabr\", \\\r\n \"poner\":\"pondr\", \\\r\n \"poder\":\"podr\", \\\r\n \"salir\":\"saldr\", \\\r\n \"tener\":\"tendr\", \\\r\n \"venir\":\"vendr\", \\\r\n \"hacer\":\"har\", \\\r\n \"querer\":\"querr\", \\\r\n \"decir\":\"dir\", \\\r\n \"haber\":\"habr\"}\r\n\r\n if irregulars.has_key(self.infinitive):\r\n return irregulars[self.infinitive]\r\n else:\r\n return -1", "title": "" }, { "docid": "44590f2c41fd524a9639e3b6519d7f62", "score": "0.5970283", "text": "def stemmatize_nonstopping_words_(self) -> None:\n stemmer = SnowballStemmer(\"portuguese\")\n self.nonstopping_stems = [stemmer.stem(word) for word in self.reduced_tokens]\n self.unique_nonstopping_stems = list(set(self.nonstopping_stems))", "title": "" }, { "docid": "0f31537fe4bd14c2870ed98adf7752dc", "score": "0.59642446", "text": "def normalize_text(text): \n # split into words\n tokens = nltk.tokenize.word_tokenize(text,language='spanish', preserve_line=False)\n # convert to lower case\n tokens = [w.lower() for w in tokens] \n # remove punctuation from each word\n table = str.maketrans('', '', string.punctuation)\n stripped = [w.translate(table) for w in tokens] \n # remove remaining tokens that are n<<<<<<<<<<<<<<<<<<<<<\n words = [word for word in stripped if word.isalpha()] \n # stop word and remove accent\n def strip_accents(s):\n return ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn')\n stop_words = set(spanish_stopwords)\n words = [strip_accents(w) for w in words if not w in stop_words] \n stemmer = SnowballStemmer(\"spanish\")\n out = \"\"\n for word in words:\n out += stemmer.stem(word)+\" \" \n return out", "title": "" }, { "docid": "25c04d8b1009abb00af4e0903ffe7d16", "score": "0.594771", "text": "def stem(word: str) -> str:\n word = word.lower().strip()\n return stemmer.stem(word)", "title": "" }, { "docid": "81162bbf8ba7e64d6743f6290913a81c", "score": "0.5944887", "text": "def features(tokens, index, history):\r\n \r\n # init the stemmer\r\n stemmer = SnowballStemmer('english')\r\n \r\n # Pad the sequence with placeholders\r\n tokens = [('[START2]', '[START2]'), ('[START1]', '[START1]')] + list(tokens) + [('[END1]', '[END1]'), ('[END2]', '[END2]')]\r\n history = ['[START2]', '[START1]'] + list(history)\r\n \r\n # shift the index with 2, to accommodate the padding\r\n index += 2\r\n \r\n word, pos = tokens[index]\r\n \r\n prevword, prevpos = tokens[index - 1]\r\n prevprevword, prevprevpos = tokens[index - 2]\r\n nextword, nextpos = tokens[index + 1]\r\n nextnextword, nextnextpos = tokens[index + 2]\r\n previob = history[index - 1]\r\n contains_dash = '-' in word\r\n contains_dot = '.' in word\r\n isnumber = is_number(word)\r\n allascii = all([True for c in word if c in string.ascii_lowercase])\r\n \r\n allcaps = word == word.capitalize()\r\n capitalized = word[0] in string.ascii_uppercase\r\n \r\n prevallcaps = prevword == prevword.capitalize()\r\n prevcapitalized = prevword[0] in string.ascii_uppercase\r\n \r\n nextallcaps = prevword == prevword.capitalize()\r\n nextcapitalized = prevword[0] in string.ascii_uppercase\r\n \r\n return {\r\n 'word': word,\r\n 'lemma': stemmer.stem(word),\r\n 'pos': pos,\r\n 'all-ascii': allascii,\r\n 'number': isnumber,\r\n \r\n 'next-word': nextword,\r\n 'next-lemma': stemmer.stem(nextword),\r\n 'next-pos': nextpos,\r\n \r\n 'next-next-word': nextnextword,\r\n 'nextnextpos': nextnextpos,\r\n \r\n 'prev-word': prevword,\r\n 'prev-lemma': stemmer.stem(prevword),\r\n 'prev-pos': prevpos,\r\n \r\n 'prev-prev-word': prevprevword,\r\n 'prev-prev-pos': prevprevpos,\r\n \r\n 'prev-iob': previob,\r\n \r\n 'contains-dash': contains_dash,\r\n 'contains-dot': contains_dot,\r\n \r\n 'all-caps': allcaps,\r\n 'capitalized': capitalized,\r\n \r\n 'prev-all-caps': prevallcaps,\r\n 'prev-capitalized': prevcapitalized,\r\n \r\n 'next-all-caps': nextallcaps,\r\n 'next-capitalized': nextcapitalized,\r\n }", "title": "" }, { "docid": "47c17c5a1d2942bc2e867ef637b77218", "score": "0.59402734", "text": "def stem_word(word):\n porter_stemmer = PorterStemmer()\n return porter_stemmer.stem(word)", "title": "" }, { "docid": "636f2ac96b7a470c1c5be4c2efb7be72", "score": "0.5936818", "text": "def __stem_words(self, words):\n if isinstance(words, str):\n words = words.split(' ')\n\n stemmer = LancasterStemmer()\n stems = []\n for word in words:\n stem = stemmer.stem(word)\n stems.append(stem)\n return stems", "title": "" }, { "docid": "1f12f67c4886d71464f64c361716d1f2", "score": "0.5935774", "text": "def test_stopwords_nltk_beginning(self):\n\n s = \"The Premier League clubs have spent 291M on agent fees\"\n t = Tokenizer(stem=False, stopwords=list(stopwords.words(\"english\")))\n self.assertEqual([ 'premier', 'league', 'clubs', 'spent', '291m', 'agent', 'fees' ], t.tokenize(s))", "title": "" }, { "docid": "7445dfec65c157657a357d4fdff8abb7", "score": "0.59283185", "text": "def process_text(text,stem=True):\n stop_words = set(stopwords.words('english'))\n \n text = text.translate(\"None\")\n text = re.sub('can\\'t', 'can not', text)\n text = re.sub('n\\'t', ' not', text)\n text = re.sub(' da ', ' directaccess ', text)\n text = re.sub('direct access', 'directaccess', text)\n text = re.sub(r'[^\\w]', ' ', text)\n\n tokens = word_tokenize(text)\n punctuation = re.compile(r'[-.?!,\":;()|0-9]')\n\n tokens = [punctuation.sub(\"\",w) for w in tokens]\n tokens = [w for w in tokens if not w in stop_words]\n tokens = [w for w in tokens if not w in stopwords_custom]\n tokens = [i for i in tokens if i not in string.punctuation]\n\n counter = 0\n## for i in tokens:\n## if i == 'da':\n## tokens[counter] = 'directaccess'\n## counter += 1 \n\n if stem:\n stemmer = SnowballStemmer(\"english\")\n tokens = [stemmer.stem(t) for t in tokens]\n\n tokens = [w for w in tokens if not w in special_handling_words]\n \n return tokens", "title": "" }, { "docid": "557ca9b24d7cb05101fd7f163a53b6c4", "score": "0.5919503", "text": "def test_no_word_normalization(self):\n\n s = \"YYYYYEEESSSSSSSSS OOXXXXXXXXXXXX!!!\"\n t = Tokenizer(normalize_words=False, character_normalization_count=2, stem=False, min_length=2)\n self.assertEqual([ \"yyyyyeeesssssssss\", \"ooxxxxxxxxxxxx\" ], t.tokenize(s))", "title": "" }, { "docid": "c15227a6c26f10ffcf15b5d77cdcc51c", "score": "0.59156567", "text": "def stem(word: str) -> str:\n return porter_stemmer.stem(word.lower())", "title": "" }, { "docid": "960ef7cfdd5aa721a0aadb9a9e8c3324", "score": "0.5912455", "text": "def test_define_textface_for_labeling_stem(): # ***Incomplete test\n ##########################\n # Arrange.\n clade_name = \"clade_name\"\n\n ##########################\n # Act.\n #x = define_textface_for_labeling_stem(clade_name)\n\n ##########################\n # Assert.\n assert True == True # ***Temporary.", "title": "" }, { "docid": "9d28c072f36d6a1158a8554e761f931a", "score": "0.59115344", "text": "def stem_term(self, token):\n return self.stemmer.stem(token)", "title": "" }, { "docid": "b0aef70286e3ea8f37b8eb2b8077f036", "score": "0.5910213", "text": "def stem(\n word: str, exceptions: Dict[str, str] = dict(), rem_umlauts: bool = True\n) -> str:\n\n word = normalize_middle_high_german(\n word, to_lower_all=False, to_lower_beginning=True\n )\n\n if word in exceptions:\n return exceptions[word]\n\n if word[0].isupper() is True or word in STOPS:\n return word\n\n return _stem_helper(word, rem_umlaut=rem_umlauts)", "title": "" }, { "docid": "6b69e72abc90ef9c4ce7bc41ada94796", "score": "0.59098035", "text": "def _check_stems(self, s, infls): \n\t\tmatch_stems = []\n\n\t\t# For each of the inflections that is a match, strip the inflection from the end of the word\n\t\t# and look up the stripped word (w) in the stems\n\t\tfor infl in infls:\n\t\t\tw = re.sub ( infl['ending'] + \"$\", \"\", s )\n\n\t\t\tfor stem in self.stems:\n\t\t\t\tif w == stem['orth']: \n\n\t\t\t\t\t# If the inflection and stem identify as the same part of speech\n\t\t\t\t\tif (\n\t\t\t\t\t\t\tinfl['pos'] == stem['pos']\n\t\t\t\t\t\tor (\n\t\t\t\t\t\t\t\tinfl['pos'] == \"VPAR\"\n\t\t\t\t\t\t\tand stem['pos'] == \"V\"\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t):\n\n\t\t\t\t\t\t# Ensure the inflections apply to the correct stem decl/conj/etc\n\t\t\t\t\t\tif infl['n'][0] == stem['n'][0]:\n\t\t\t\t\t\t\tis_in_match_stems = False \n\n\t\t\t\t\t\t\t# If this stem is already in the match_stems list, add infl to that stem (if not already an infl in that stem list)\n\t\t\t\t\t\t\tfor i, mst in enumerate(match_stems):\n\t\t\t\t\t\t\t\tif stem == mst['st']:\n\t\t\t\t\t\t\t\t\tis_in_match_stems = True\n\n\t\t\t\t\t\t\t\t\t# So the matches a stem in the match_stems. Is it unique to that stem's infls. If so, append it to that stem's infls. \n\t\t\t\t\t\t\t\t\tis_in_stem_infls = False\n\t\t\t\t\t\t\t\t\tfor stem_infl in mst['infls']:\n\t\t\t\t\t\t\t\t\t\tif stem_infl['form'] == infl['form']:\n\t\t\t\t\t\t\t\t\t\t\tis_in_stem_infls = True\n\t\t\t\t\t\t\t\t\t\t\t# we found a match, stop looking\n\t\t\t\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t\t\t\t\tif not is_in_stem_infls:\n\t\t\t\t\t\t\t\t\t\tmst['infls'].append( infl )\n\n\n\n\t\t\t\t\t\t\tif not is_in_match_stems:\n\t\t\t\t\t\t\t\tmatch_stems.append({ 'st':stem, 'infls':[infl] })\n\n\n\t\treturn match_stems", "title": "" }, { "docid": "76a10b1172b68fa990ef47eff7ecda76", "score": "0.5909802", "text": "def clean_sentence(document):\n # convert to lowercase\n document = document.lower()\n # remove stopword\n document = [word for word in word_tokenize(document) if word not in STOPLIST]\n # remove punctuation\n document = [word for word in document if word.isalpha()]\n # stem words\n return \" \".join(stem_doc(document))", "title": "" }, { "docid": "93f8c0cd834aa9af060a21ace888488e", "score": "0.5899036", "text": "def find_stem(arr):\n # Determine size of the array\n n = len(arr)\n\n # Take first word from array\n # as reference\n s = arr[0]\n ll = len(s)\n\n res = \"\"\n for i in range(ll):\n for j in range(i + 1, ll + 1):\n # generating all possible substrings of our ref string arr[0] i.e s\n stem = s[i:j]\n k = 1\n for k in range(1, n):\n # Check if the generated stem is common to to all words\n if stem not in arr[k]:\n break\n\n # If current substring is present in all strings and its length is\n # greater than current result\n if k + 1 == n and len(res) < len(stem):\n res = stem\n\n return res", "title": "" }, { "docid": "9ea806d44518a631c95cf27a9f6d98ce", "score": "0.5893373", "text": "def stemming(tweet_list):\n return [PorterStemmer().stem(word) for word in tweet_list]", "title": "" }, { "docid": "e0408bbc2a34add09daf4b87d713ca98", "score": "0.5889198", "text": "def set_stemming(self, v):\n self.use_stemming = v", "title": "" }, { "docid": "9171e41e0bc924e628540ecf09d9b3ea", "score": "0.5888226", "text": "def test_sentences(self):\r\n used_inx = []\r\n sentences = []\r\n for translation in self.__wordbook:\r\n if translation.class_type == \"s\":\r\n sentences.append(translation) # only append sentences\r\n for i in range(5):\r\n if i >= len(sentences):\r\n break\r\n word_index = r.randint(0, len(sentences) - 1)\r\n while word_index in used_inx:\r\n word_index = r.randint(0, len(sentences) - 1)\r\n used_inx.append(word_index)\r\n translation = sentences[word_index]\r\n sentence = translation.reverse[0].split(\" \")\r\n random = r.randint(0, len(sentence) - 1)\r\n missing_word = sentence[random]\r\n sentence[random] = \" \" + \"_\" * len(missing_word) + \" \"\r\n print(\" \".join(sentence), \"(\" + \" \".join(translation.forward) + \")\")\r\n self.fill_in_sentence(translation, missing_word)", "title": "" }, { "docid": "4d2f89bdc0fa87473b8db121f6d13747", "score": "0.5882565", "text": "def stem(self, word):\n word = word.lower()\n\n r1 = self._r1_scandinavian(word, self.__vowels)\n\n # STEP 1\n for suffix in self.__step1_suffixes:\n if r1.endswith(suffix):\n if suffix in (\"erte\", \"ert\"):\n word = \"\".join((word[:-len(suffix)], \"er\"))\n r1 = \"\".join((r1[:-len(suffix)], \"er\"))\n\n elif suffix == \"s\":\n if (word[-2] in self.__s_ending or\n (word[-2] == \"k\" and word[-3] not in self.__vowels)):\n word = word[:-1]\n r1 = r1[:-1]\n else:\n word = word[:-len(suffix)]\n r1 = r1[:-len(suffix)]\n break\n\n # STEP 2\n for suffix in self.__step2_suffixes:\n if r1.endswith(suffix):\n word = word[:-1]\n r1 = r1[:-1]\n break\n\n # STEP 3\n for suffix in self.__step3_suffixes:\n if r1.endswith(suffix):\n word = word[:-len(suffix)]\n break\n\n return word", "title": "" }, { "docid": "2a8f8031d77a678b0b21abc86166fff0", "score": "0.58703095", "text": "def test_middle_high_german_stemmer_dictionary(self):\n exception_dic = {\"biuget\": \"biegen\"}\n stemmed = middle_high_german_stemmer(\"swaȥ kriuchet unde fliuget und bein zer erden biuget\", rem_umlauts=False,\n exceptions=exception_dic)\n target = ['swaȥ', 'kriuchet', 'unde', 'fliuget', 'und', 'bein', 'zer', 'erden', 'biegen']\n\n self.assertEqual(stemmed, target)", "title": "" }, { "docid": "cea19ad829d56584054cda7a7fe4aed8", "score": "0.58670235", "text": "def test_sentence_tokenizer_latin_punkt(self):\n target = [\n \"O di inmortales!\",\n \"ubinam gentium sumus?\",\n \"in qua urbe vivimus?\",\n \"quam rem publicam habemus?\",\n \"Hic, hic sunt in nostro numero, patres conscripti, in hoc orbis terrae sanctissimo gravissimoque consilio, qui de nostro omnium interitu, qui de huius urbis atque adeo de orbis terrarum exitio cogitent!\",\n \"Hos ego video consul et de re publica sententiam rogo et, quos ferro trucidari oportebat, eos nondum voce volnero!\",\n \"Fuisti igitur apud Laecam illa nocte, Catilina, distribuisti partes Italiae, statuisti, quo quemque proficisci placeret, delegisti, quos Romae relinqueres, quos tecum educeres, discripsisti urbis partes ad incendia, confirmasti te ipsum iam esse exiturum, dixisti paulum tibi esse etiam nunc morae, quod ego viverem.\",\n ] # pylint: disable=line-too-long\n tokenizer = LatinPunktSentenceTokenizer()\n tokenized_sentences = tokenizer.tokenize(self.latin_text)\n self.assertEqual(tokenized_sentences, target)", "title": "" }, { "docid": "1b5be6ac643a2c93f3f8726f2c074392", "score": "0.5841694", "text": "def test_sentence_tokenizer_sanskrit(self):\n text = \"\"\"श्री भगवानुवाच भूय एव महाबाहो श्रृणु मे परमं वचः। यत्तेऽहं प्रीयमाणाय वक्ष्यामि हितकाम्यया।।\nन मे विदुः सुरगणाः प्रभवं न महर्षयः। अहमादिर्हि देवानां महर्षीणां च सर्वशः।।\"\"\"\n target = [\n \"श्री भगवानुवाच भूय एव महाबाहो श्रृणु मे परमं वचः।\",\n \"यत्तेऽहं प्रीयमाणाय वक्ष्यामि हितकाम्यया।।\",\n \"न मे विदुः सुरगणाः प्रभवं न महर्षयः।\",\n \"अहमादिर्हि देवानां महर्षीणां च सर्वशः।।\",\n ]\n tokenizer = SanskritRegexSentenceTokenizer()\n tokenized_sentences = tokenizer.tokenize(text)\n self.assertEqual(tokenized_sentences, target)", "title": "" }, { "docid": "7124fc55e2233f88b2f4e87380a46bd4", "score": "0.58339745", "text": "def book_stemming(words_list):\n porter = PorterStemmer()\n for i in range(len(words_list)):\n words_list[i] = porter.stem(words_list[i])\n return words_list", "title": "" }, { "docid": "565e996bad415955833ce2065d383250", "score": "0.5819575", "text": "def test_case_folding(self):\n\n s = \"#BREAKING Nine illegal miners killed by rival workers in S. Africa say police\"\n t = Tokenizer(case_fold=True, stem=False)\n self.assertEqual([ \"breaking\", \"nine\", \"illegal\", 'miners', 'killed', 'rival', 'workers', 'africa', 'say', 'police' ], t.tokenize(s))", "title": "" }, { "docid": "3806a2976bca9ed225a919cd0da42cba", "score": "0.581834", "text": "def stem_words(words):\r\n stemmer = LancasterStemmer()\r\n stems = []\r\n for word in words:\r\n stem = stemmer.stem(word)\r\n stems.append(stem)\r\n return stems", "title": "" }, { "docid": "787bf0fd93d334cc5fc792ff587acc6b", "score": "0.5815122", "text": "def do_stemmer(df, stop_language='spanish'):\n ## Como nos llegan tickets en dos idiomas añadimos las palabras de ambos idiomas\n stop = get_stop_words(stop_language) + get_stop_words('english')\n ## Añdimos nuestras propias palabras\n stop += [\"buenas\", \"buenos\" ,\"cid\", \"dias\", \"gracias\", \"hola\",\"mucho\",\"mucha\" ,\"poder\",\"proyecto\",\"please\" ,\"saludo\",\"tardes\",\"www\",\"habia\"]\n stop += ['ahora',\n 'algun',\n 'alguna',\n 'amanecia interrumpio',\n 'amanecia interrumpio relato',\n 'amanecia interrumpio relato habian',\n 'amanecia interrumpio relato habian dado',\n 'aquel',\n 'asi',\n 'aun',\n 'cada',\n 'vez',\n 'mas',\n 'cualquier',\n 'cosa',\n 'cuanto',\n 'dado',\n 'darse',\n 'debe',\n 'debia',\n 'despues',\n 'dia noche',\n 'dia siguiente',\n 'diez años',\n 'diez mil',\n 'dijo',\n 'dijo',\n 'dio',\n 'habia',\n 'mas',\n 'podia',\n 'podian',\n 'mismo',\n 'si',\n 'tal',\n 'tan',\n 'puede',\n 'pueden ser',\n 'pues',\n 'puso',\n 'toda',\n 'todas',\n 'vease tambien',\n 'primer lugar',\n 'varias',\n 'dos',\n 'largo',\n 'hacia'\n 'uno','una','unos','una','aquella','aquello','aquel',\n 'hace',\n 'muchas',\n 'mucho',\n 'muchos',\n 'mucha',\n 'pueden',\n 'puedo',\n 'unas',\n 'abrio puerta',\n 'arriba abajo',\n 'aqui alla',\n 'habian',\n 'doña','don','señor','señora','hizo','quedo',\n 'fuerza sino', \n 'quedo perplejo',\n 'parece haber',\n 'parece ser',\n 'parecia haber',\n 'mayor parte',\n 'mañana siguiente',\n 'media hora',\n 'hoy dia',\n 'iba ser',\n 'iii pag',\n 'haber hecho',\n 'habria podido',\n 'hacer cosas',\n 'hacia arriba',\n 'hacia atras',\n 'hacia puerta',\n 'hacia tiempo',\n 'decir verdad',\n 'dejo caer',\n 'demasiado tarde',\n 'derecha izquierda',\n 'di cuenta',\n 'dia anterior',\n 'dia noche',\n 'dia siguiente',\n 'casi siempre',\n 'cierto dia',\n 'cierto modo',\n 'cinco años',\n 'aqui alla',\n 'arriba abajo',\n 'aunque solo',\n 'año nuevo',\n 'años edad',\n 'buena parte',\n 'ninguna parte',\n 'noche anterior',\n 'noche dia',\n 'nunca visto',\n 'partido comunista',\n 'podria haber',\n 'podria ser',\n 'press cambridge',\n 'primer lugar',\n 'quiere decir',\n 'quiero decir',\n 'sentido comun',\n 'seria mejor',\n 'tras haber',\n 'tres años',\n 'tres cuatro',\n 'tres meses',\n 'voz alta',\n 'voz baja',\n ]\n stop_words_generated_tokens = ['abajo', 'abrio', 'alla', 'alta', 'amanecia', 'anterior', 'aqui', 'aren', 'arriba', 'atras', 'aunque', 'año', 'años', 'baja', 'buena', 'caer', 'cambridge', 'can', 'casi', 'cierto', 'cinco', 'comun', 'cosas', 'couldn', 'cuatro', 'cuenta', 'decir', 'dejo', 'demasiado', 'di', 'dia', 'didn', 'diez', 'doesn', 'edad', 'haber', 'habria', 'hacer', 'hacia', 'hadn', 'hasn', 'haven', 'hecho', 'hora', 'hoy', 'iba', 'iii', 'isn', 'let', 'll', 'lugar', 'mayor', 'mañana', 'media', 'mejor', 'meses', 'modo', 'mustn', 'ninguna', 'noche', 'nuevo', 'nunca', 'pag', 'parece', 'parecia', 'parte', 'partido', 'podido', 'podria', 'puerta', 'quiere', 'quiero', 're', 'relato', 'sentido', 'ser', 'seria', 'shan', 'shouldn', 'siempre', 'siguiente', 'sino', 'solo', 'tambien', 'tarde', 'tiempo', 'tras', 'tres', 've', 'vease', 'visto', 'wasn', 'weren', 'won', 'wouldn']\n stop += stop_words_generated_tokens\n ps = SpanishStemmer()\n\n a=[]\n df[\"stem\"]=\"n\"\n for i, row in df.iterrows():\n a.append( ps.stem(row[\"text\"]).replace('fuerza sino', '').replace('acceder', 'acceso').replace('user', 'usuario').replace('access', 'acceso').replace('usuarios', 'usuario').replace('abrio puerta','').replace('acto seguido','')\n \n )\n df[\"stem\"]= a \n return df,stop", "title": "" } ]
eae8cfaa42b7253926e1f91971dd93f0
Translates the 'ChannelState' header's value into an int, setting it to `None` if coercion fails.
[ { "docid": "2513640b7b2e6a1c6c5cfd6d18d0dbd1", "score": "0.5043914", "text": "def process(self):\n (headers, data) = _Event.process(self)\n generic_transforms.to_int(headers, ('ChannelState',), None)\n return (headers, data)", "title": "" } ]
[ { "docid": "287a079864609786d6c5a8a05404306f", "score": "0.547437", "text": "def native_value(self) -> float | int | None:\n if self._is_counter:\n return float(self._channel.get_counter_state())\n return float(self._channel.get_state())", "title": "" }, { "docid": "6ec397624e497f27179664a8a1250315", "score": "0.54621285", "text": "def state_to_int(self,state):\n\t\t\n\t\treturn self._state_to_int(state)", "title": "" }, { "docid": "2a2df7acf429558fece16d3b28be4587", "score": "0.52378726", "text": "def _coerce_state_from_string(self, value: Union[int, str]) -> str:\n try:\n return self.STATE_MAP[int(value)]\n except KeyError:\n _LOGGER.error('Unknown state: %s', value)\n return STATE_UNKNOWN", "title": "" }, { "docid": "c4036a34d1296a6f5736fc6e183bb3aa", "score": "0.5211634", "text": "def StateFromInt(idmState):\n keys = [i for key, value in IDMStates.items() if value == idmState]\n if len(keys) != 1:\n return \"\"\n else:\n return keys[0]", "title": "" }, { "docid": "543b1b6f9448809f5764f577350bc152", "score": "0.50898165", "text": "def maybe_int(value):\n if value is None:\n return 0\n else:\n return int(value)", "title": "" }, { "docid": "54d378f1d98a0483b1b29d5b14539533", "score": "0.50564903", "text": "def parse_integer(self):\n if self.is_integer():\n return int(self.parse_word())\n else:\n return None", "title": "" }, { "docid": "5db53926d97ada8754989718e9aeeb1c", "score": "0.5050677", "text": "def IntFromState(idmState):\n return IDMStates[idmState]", "title": "" }, { "docid": "21764aa3999b2ff26d3ddc105968f69c", "score": "0.5020893", "text": "def convert(self, value):\n if value is None and self._skip_none:\n return value\n try:\n return int(value)\n except (ValueError, TypeError):\n raise ConvertError('Value (%s) is not a number e.g. 1, ${1}' % str(value))", "title": "" }, { "docid": "5d8086500b0e08336f1fb31e462559fa", "score": "0.5015194", "text": "def _maybe_int(value):\n if value is None:\n return 0\n else:\n return int(value)", "title": "" }, { "docid": "d76fe619db190267632ebb4e85aefcc3", "score": "0.5006031", "text": "def _parse_int(self, integer):\n try:\n return int(integer)\n except (ValueError, TypeError):\n return None", "title": "" }, { "docid": "84e0a829c85d52f828877606c01d1f10", "score": "0.5003577", "text": "def interpret_header(self, response, header):\n out = response.headers.get(header)\n if out is not None:\n return int(out)\n return out", "title": "" }, { "docid": "c93bda6530751dbb6739f842d11e8370", "score": "0.4999251", "text": "def toInt(self):\n return None", "title": "" }, { "docid": "62cb12458501bb404b0b2e83d36b53da", "score": "0.4896106", "text": "def _get_int(self, val):\n if val is None:\n return 0\n else:\n return int(val)", "title": "" }, { "docid": "cb2a8dc83b93b4e8003439763b2379e7", "score": "0.48716787", "text": "def attempt_int_conv(self,val):\n try:\n return int(val)\n except ValueError:\n return val", "title": "" }, { "docid": "62746b36a22adbce63333ed7768d500c", "score": "0.48682228", "text": "def _status_int__get(self):\n return int(self.status.split()[0])", "title": "" }, { "docid": "0835445634c838cd0f4cb5fb3195e77f", "score": "0.4851545", "text": "def as_int(value) -> int | None:\n if value is None:\n return None\n\n return int(value)", "title": "" }, { "docid": "e10d6ef9893e9401ee91520299ea7ce9", "score": "0.48430055", "text": "def load(self, raw_value):\r\n\t\ttry:\r\n\t\t\tvalue = int(raw_value)\r\n\t\texcept:\r\n\t\t\tvalue = None\r\n\t\treturn value", "title": "" }, { "docid": "86c61e79f5273714a65d0a189e5ce9d5", "score": "0.48063537", "text": "def _parse_int(self, raw: Optional[str]) -> Optional[int]:\n raw = self._parse_string(raw)\n if raw is None:\n return raw\n return int(raw)", "title": "" }, { "docid": "a682a288bffa6bbd1d8521b7a38bf22c", "score": "0.48043934", "text": "def parse_hexinteger(self):\n if self.is_hexinteger():\n return int(self.parse_word(), 16)\n else:\n return None", "title": "" }, { "docid": "075072606e7a404e7ba05ef3a843bbdd", "score": "0.47822532", "text": "def cast(self, x):\r\n if x.lower() == \"null\":\r\n return None\r\n else:\r\n return int(x)", "title": "" }, { "docid": "70a8f6404142ba1927302a7d5577547d", "score": "0.47710904", "text": "def state_to_int(self, state):\n nidx = []\n #value -> number\n for i in range(4):\n val = state[0, i]\n ni = np.digitize(val, self.stateBins[i])\n nidx.append(ni)\n\n #numbers -> String -> Int\n return int(''.join(str(x) for x in nidx))", "title": "" }, { "docid": "2804736d78297c1cf08b2da742938469", "score": "0.4765518", "text": "def _convert_raw_valve_to_int(raw):\n if str(raw).lower() == \"c\":\n n = 7\n elif str(raw).lower() == \"v\":\n n = 8\n else:\n n = convert_raw_to_int(raw)\n return n", "title": "" }, { "docid": "1e050782df1e47768c69faa1a4e7da53", "score": "0.4762956", "text": "def convert_value(self, type, payload):\n relevant = payload[22:26]\n if type == \"uint8\":\n return int.from_bytes(relevant, byteorder=\"little\", signed=False)\n elif type == \"int32\":\n return int.from_bytes(relevant, byteorder=\"little\", signed=True)\n elif type == \"uint32\":\n return int.from_bytes(relevant, byteorder=\"little\", signed=False)\n print(\"oops\", type)\n return int.from_bytes(relevant, byteorder=\"little\", signed=True)", "title": "" }, { "docid": "4ef36e06b179fbba9b75ae3040fd16ab", "score": "0.47449985", "text": "def numerize_state(state):\n numerized = 1 if state[2] else 0\n return (state[0], state[1], numerized)", "title": "" }, { "docid": "da0b69f0485b387f48e60a3ac1f86007", "score": "0.47092506", "text": "def get_state_int(db, key):\n if key in db:\n return int(db[key])\n else:\n return 0", "title": "" }, { "docid": "ee4606417b5f5073fbe91e2dbf0dbe80", "score": "0.4705646", "text": "def to_int(value, skip_none=False):\n return ToInt(skip_none=skip_none).convert(value)", "title": "" }, { "docid": "0b628f8e12ae007db3401c5d3209d027", "score": "0.47002482", "text": "def read_nullable_int32(self, field_name: str) -> typing.Optional[int]:", "title": "" }, { "docid": "92fbea525ee9057a87e934a11c2d69a4", "score": "0.4691564", "text": "def __get_boiler_state_number(self, state_name):\n states = {\n 'Preparation': 1, 'Heating up': 2, 'Pre-heating': 3, 'Ignition': 4, 'Heating': 5,\n 'Cleaning': -1, 'Shutdown wait': -2, 'Shutdown wait 1': -3, 'Shutdown wait 2': -4,\n 'Fault': -5\n }\n try:\n state_number = states[state_name]\n except KeyError:\n state_number = 0\n return state_number", "title": "" }, { "docid": "13326e8061895adc7a9d35385a7e481e", "score": "0.4690885", "text": "def convert_to_intc(paramname):\n if not isinstance(paramname, cty.c_int):\n try:\n retv0 = int(paramname)\n except ValueError:\n raise XFormsTypeError(\"Provided parameter '%s' is of %s, \"\n \"but an 'int'/'c_int' type should be used.\" % \\\n (paramname, type(paramname)))\n retv = cty.c_int(retv0)\n return retv\n else:\n return paramname", "title": "" }, { "docid": "ee424f9951a00e755247a2b258d676b3", "score": "0.46897933", "text": "def get_channel(ccc: int) -> int:\n return ccc & 0xF", "title": "" }, { "docid": "7a7015135a2e3fb3d78696c036f2afcc", "score": "0.46831268", "text": "def _get_truth_and_convert_to_number(x):\n\n truth_value = x.get('truth')\n\n return int(Truth.to_int.get(truth_value))", "title": "" }, { "docid": "3821eb2ce1264477ee58f228aa68e003", "score": "0.46786115", "text": "def color_to_int(c):\n color_to_int = {\"b\": BLACK , \"w\": WHITE, \"e\": EMPTY, \n \"BORDER\": BORDER}\n return color_to_int[c]", "title": "" }, { "docid": "03a07468746e4925eff346e2c387c2fd", "score": "0.4675935", "text": "def parse_int(v: Any) -> Union[int, None]:\n try:\n return int(v)\n except ValueError:\n return None", "title": "" }, { "docid": "93cf693e4c0e7a985e272852ddcf0a21", "score": "0.46395797", "text": "def read_nullable_int16(self, field_name: str) -> typing.Optional[int]:", "title": "" }, { "docid": "5d26e88997ff1510b6bee7d04d1842af", "score": "0.46317142", "text": "def parseenum(self, payload, partial=False):\n res = self.parsetlv('\\x0a', payload, partial)\n if partial:\n return (ord(res[0]), res[1])\n else:\n return ord(res[0])", "title": "" }, { "docid": "65ab41ab588b055a0ac5b6a7e0bd6375", "score": "0.46245044", "text": "def convert_string_to_int_or_none(string: str) -> Optional[int]:\n if not string:\n return None\n return int(string.replace(\",\", \"\"))", "title": "" }, { "docid": "f6860866053618014f3d3fc61a56f1e2", "score": "0.4582383", "text": "def tryToInt(self, value):\n temp_int = 0\n try:\n temp_int = int(value)\n except ValueError:\n temp_int = 0\n return temp_int", "title": "" }, { "docid": "5ff1dce85c993f54481abb141a84fcfb", "score": "0.458035", "text": "def int_or_none(value):\n try:\n result = int(value)\n except (ValueError, TypeError):\n result = None\n return result", "title": "" }, { "docid": "570cfd2a9217bf17bfb2cc3b1a2fb7f5", "score": "0.4566313", "text": "def convert_clearances(clearance: str) -> int:\n c = {'SC': 3, 'DV': 4, 'CTC': 2, 'BPSS': 1}\n return c[clearance]", "title": "" }, { "docid": "0e946078367d42539f734bea60ccabe6", "score": "0.45633447", "text": "def church_to_int(n):\n \"*** YOUR CODE HERE ***\"", "title": "" }, { "docid": "fb12b05d2977fc7ada9105471cbe3824", "score": "0.4555117", "text": "def handler_from_int(value):\n return handler_from_bit_length(value.bit_length())", "title": "" }, { "docid": "970cc308fd31f79ce233336428929014", "score": "0.45535022", "text": "def to_int(self):\n return int(self.value)", "title": "" }, { "docid": "20f0ac02a684852b7278231e3885f3ea", "score": "0.45505667", "text": "def _status_decoder(status_int):\n status_dict = {1: 'Green',\n 2: 'Red',\n 3: 'Success',\n 4: 'Failed'\n }\n return status_dict[status_int]", "title": "" }, { "docid": "7f9cdfec9048a7a435ab3c428c72c22f", "score": "0.45052633", "text": "def asIntOrNone(v):\n return asIntOrDefault(v)", "title": "" }, { "docid": "71bbe4c2d5bb5d05ca148358c9dd7eb6", "score": "0.4494229", "text": "def __extract_int(cls, header, location):\n\n try:\n return int(cls.__extract_field(header, location))\n except ValueError:\n raise Ai1wmError('invalid header field')", "title": "" }, { "docid": "598f4ed1e8d4f0620962ef01d1054bd2", "score": "0.44865847", "text": "def int(val: 'Optional[b_int]' = None) -> PythonValue:\n return PythonValue(Int(val))", "title": "" }, { "docid": "30d31c154379e94efea822fb96bf2190", "score": "0.44746062", "text": "def _get_channel_index(self, channel_name):\n channel_names = [channel[\"name\"] for channel in self._header[\"channels\"]]\n try:\n channel_index = channel_names.index(channel_name)\n except ValueError:\n channel_index = None\n\n return channel_index", "title": "" }, { "docid": "e96c1b73adb1e9b7e423af4b15f86470", "score": "0.447083", "text": "def get_channel_id(self, channel: ServiceChannel) -> int:\n\n # Core and Ack are fixed, don't need mapping\n if channel == ServiceChannel.Core:\n return self.CHANNEL_CORE\n elif channel == ServiceChannel.Ack:\n return self.CHANNEL_ACK\n\n if channel not in self._channel_mapping:\n raise ChannelError(\n f\"Channel ID not found for ServiceChannel: {channel}\"\n )\n return self._channel_mapping[channel]", "title": "" }, { "docid": "1290f4dac3277faa3c077e1aaca44fc5", "score": "0.44676653", "text": "def get_int(self, key: str) -> int:\n try:\n data = int(value.decode(\"utf-8\"))\n except Exception:\n data = 0\n return data", "title": "" }, { "docid": "e5342bfbe220d17e2811bc2025944787", "score": "0.44650385", "text": "def getInteger(self):\r\n if self.isInteger():\r\n return self.value\r\n else:\r\n return None", "title": "" }, { "docid": "5ed7fb87f22be3102ac219834eb3de85", "score": "0.4464581", "text": "def __parsenumber__(self, value):\n value = str(value)\n if value[:2] == '0x':\n if not value[2:].translate(None, string.digits):\n return int(value, 16)\n\n elif value[:1] == '0':\n if not value.translate(None, string.digits):\n return int(value, 8)\n\n else:\n return int(value)\n\n raise ValueError('Invalid number {0}'.format(value))", "title": "" }, { "docid": "4632c31a1f97a9e365549aab51577db1", "score": "0.44598037", "text": "def str_to_status(self, value):\n if type(value) == int:\n return value\n elif value.upper() == 'OPEN':\n return self.opened\n elif value.upper() == 'CLOSED':\n return self.closed\n elif value.upper() == 'ACTIVE':\n return self.active", "title": "" }, { "docid": "476627e9e92064b21817a3eff3a62704", "score": "0.44582298", "text": "def test_local_int_value(redis_dict_factory):\n redis_dict = redis_dict_factory(re_md_channel_name=\"test_local_int_value\")\n redis_dict[\"local_int\"] = -1\n assert redis_dict[\"local_int\"] == -1", "title": "" }, { "docid": "ab1e2e9d2f0953b2b06830075de16cef", "score": "0.4455113", "text": "def state(self):\n state_value = getattr(self._node, STICK_API[self.sensor_id][ATTR_STATE])\n if state_value is not None:\n return float(round(state_value, 3))\n return None", "title": "" }, { "docid": "5ceac05ace7f5ce69ed4712896e5d76d", "score": "0.44533315", "text": "def convert_back(self, formatted_value: typing.Optional[str]) -> typing.Optional[int]:\n formatted_value = re.sub(\"[+-](?!\\d)|(?<=\\.)\\w*|[^-+0-9]\", \"\", formatted_value) if self.__fuzzy and formatted_value else None\n if formatted_value:\n return int(formatted_value)\n else:\n return None if self.__pass_none else 0", "title": "" }, { "docid": "861edac3e99e6a4db6ec48c4f552ac59", "score": "0.44506353", "text": "def read_int32(self, field_name: str) -> int:", "title": "" }, { "docid": "e90bdbbaaf42ec1fda76ae56541777ca", "score": "0.4447079", "text": "def track_num_to_int(track_num_str):\n\n if track_num_str == '':\n return -1\n\n if '/' in track_num_str:\n track_num_str = re.sub('(/(.*)$)', '', track_num_str)\n\n try:\n track_num = int(track_num_str)\n except ValueError:\n track_num = -1\n\n return track_num", "title": "" }, { "docid": "c38a409f725464a1226700a001839271", "score": "0.44417208", "text": "def intOrNone(val):\n if val is None:\n return None\n else:\n return int(val)", "title": "" }, { "docid": "f5dbd5fd6d9941cc41ca7fe6761d06db", "score": "0.44415003", "text": "def write_nullable_int32(self, field_name: str, value: typing.Optional[int]) -> None:", "title": "" }, { "docid": "68ddceb4688781de2b44a00abc37e6e8", "score": "0.44255853", "text": "def read_compact_uint(self) -> int:\n value = self.read_uint8()\n if value <= 252:\n return value\n elif value == 0xfd:\n return self.read_uint16()\n elif value == 0xfe:\n return self.read_uint32()\n elif value == 0xff:\n return self.read_uint64()", "title": "" }, { "docid": "3b762c0a994262285b3b45f6f567f259", "score": "0.44074324", "text": "def _int_or_none(str):\n try:\n retval = int(str)\n except ValueError:\n retval = ND_INT\n\n if retval == -9999:\n retval = ND_INT\n\n return retval", "title": "" }, { "docid": "894bc21525a35deea834376c47d1d00f", "score": "0.43989322", "text": "def convert_to_uintc(paramname):\n if not isinstance(paramname, cty.c_int):\n try:\n retv0 = int(paramname)\n except ValueError:\n raise XFormsTypeError(\"Provided parameter '%s' is of %s, \"\n \"but an 'int_pos'/'c_uint' type should be used.\" % \\\n (paramname, type(paramname)))\n else:\n retv = cty.c_uint(retv0)\n return retv\n else:\n return paramname", "title": "" }, { "docid": "040efac86d8c0835639c9c14e874bde0", "score": "0.4391081", "text": "def process(self):\n (headers, data) = _Event.process(self)\n \n try:\n (h, m, s) = (int(v) for v in headers['Duration'].split(':'))\n headers['Duration'] = s + m * 60 + h * 60 * 60\n except Exception:\n headers['Duration'] = -1\n \n generic_transforms.to_int(headers, ('ChannelState',), None)\n \n return (headers, data)", "title": "" }, { "docid": "d4e0e5ad9d6e9a5bb3ee31e3cd685fd8", "score": "0.4391043", "text": "def safe_int(val):\n try:\n return int(val)\n except ValueError:\n return None", "title": "" }, { "docid": "1d20b0a54d540c784263a7c18c3a1970", "score": "0.43879238", "text": "def _freq_to_channel(self, freq):\n ret = None\n freq_dict = {'2.412 GHz': 1, '2.417 GHz': 2, '2.422 GHz': 3,\n '2.427 GHz': 4, '2.432 GHz': 5, '2.437 GHz': 6,\n '2.442 GHz': 7, '2.447 GHz': 8, '2.452 GHz': 9,\n '2.457 GHz': 10, '2.462 GHz': 11, '2.467 GHz': 12,\n '2.472 GHz': 13, '2.484 GHz': 14 }\n try:\n ret = freq_dict[freq]\n except KeyError:\n print \"Couldn't determine channel number for frequency: \" + str(freq)\n return ret", "title": "" }, { "docid": "518e005dafb59a9ac941a43dbbcbf90c", "score": "0.43753976", "text": "def _int_from_json(value, field):\n if _not_null(value, field):\n return int(value)", "title": "" }, { "docid": "103fe8041c0adeb4c089f5e7eca82273", "score": "0.4372268", "text": "def toInt(key):\n try:\n v = int(options[key])\n except:\n print(\"Warning: --%s must be a valid integer value!\" % (key))\n sys.exit(1)\n\n return v", "title": "" }, { "docid": "42b9be73deb2863400e7128a8d2625d9", "score": "0.43630415", "text": "def _parse_int(node, key):\n element = node.find(key)\n if element is not None:\n return int(element.text)\n else:\n return None", "title": "" }, { "docid": "4bce4417bb0d1d1be412a75a57169471", "score": "0.43605906", "text": "def as_int(self) -> int:\n if self.the_id == \"bad-entity-id\":\n return -1\n return int(self.the_id)", "title": "" }, { "docid": "d448d0648699d8b6a25cfb899b9f2e05", "score": "0.43600205", "text": "def read_int(self) -> int:\n raise NotImplementedError()", "title": "" }, { "docid": "0ef51838196134693fae50188dd9ec75", "score": "0.43523064", "text": "def get_int(self, section, prop):\n value_string = self.get(section, prop)\n if value_string == \"\":\n return None\n try:\n value_int = int(value_string)\n # we could also try to handle port name\n # strings (ie, 'http') here with getservbyname\n except (ValueError, TypeError):\n raise ValueError(\n \"Section: %s, Property: %s - Integer value expected\"\n % (section, prop))\n return value_int", "title": "" }, { "docid": "66e701ee3a19b9b3a143757b88fb3908", "score": "0.43521556", "text": "def _toInt(self, input):\r\n try: iret = int(input)\r\n except: return None\r\n else: return iret", "title": "" }, { "docid": "421326d2da5c70ba3ba8341e7ab108d8", "score": "0.43514058", "text": "def to_int(self) -> int:\r\n try:\r\n return int(self.string)\r\n except ValueError:\r\n raise CannotConvertToError(\"int\")", "title": "" }, { "docid": "c8438c1f9a7126344fbc3462ad1a8b61", "score": "0.43506628", "text": "def nullint(value: t.Optional[t.Any]) -> t.Optional[int]:\n return int(value) if value else None", "title": "" }, { "docid": "85ef2ca6ff36104172f85fcb11b1f72c", "score": "0.43489364", "text": "def parse_int(value, *, default=None):\n try:\n return int(value)\n except ValueError:\n return default", "title": "" }, { "docid": "9147dd666452946f7486163b7c58a933", "score": "0.4348282", "text": "def _clean(channel):\n return map(lambda x: int(x.real), channel)", "title": "" }, { "docid": "e7f3989c64a3c6f5e59dbebd68902b48", "score": "0.4343338", "text": "def to_positive_int(value: str) -> Union[int, None]:\n value = re.sub(r\"[^\\d./]\", \"\", value) if value else \"\"\n value = re.sub(r\"\\.$\", \"\", value)\n try:\n return int(value)\n except ValueError:\n return None", "title": "" }, { "docid": "e054f9fc543e5aff52096d3053012e7e", "score": "0.43416163", "text": "def __init__(self, state):\n if isinstance(state, int):\n if state is EventState.Unknown0:\n self.__state = EventState.Unknown0\n elif state is EventState.NotTransmitted:\n self.__state = EventState.NotTransmitted\n elif state is EventState.Unknown2:\n self.__state = EventState.Unknown2\n elif state is EventState.Unknown3:\n self.__state = EventState.Unknown3\n elif state is EventState.Completed:\n self.__state = EventState.Completed\n elif state is EventState.Failed:\n self.__state = EventState.Failed\n elif state is EventState.Unknown:\n self.__state = EventState.Unknown\n else:\n raise ValueError(\"Unknown state init \" + str(state))\n elif isinstance(state, str):\n # more states are missing\n if state == \"NOT_TRANSMITTED\":\n self.__state = EventState.NotTransmitted\n elif state == \"TRANSMITTED\":\n self.__state = EventState.Transmitted\n elif state == \"IN_PROGRESS\":\n self.__state = EventState.InProgress \n elif state == \"COMPLETED\":\n self.__state = EventState.Completed\n elif state == \"FAILED\":\n self.__state = EventState.Failed\n else:\n raise ValueError(\"Unknown state init '\" + state + \"'\")\n else:\n raise ValueError(\n \"EventState init can only be called with int or str.\")", "title": "" }, { "docid": "05a1ebb40be0e34ad86d5cf97d680d37", "score": "0.43400937", "text": "def convert_state_code_to_num(state_code):\n state_abbr_dict = [(\"01\", \"AL\"), (\"02\", \"AK\"), (\"04\", \"AZ\"), (\"05\", \"AR\"),\n (\"06\", \"CA\"), (\"08\", \"CO\"), (\"09\", \"CT\"), (\"10\", \"DE\"),\n (\"11\", \"DC\"), (\"12\", \"FL\"), (\"13\", \"GA\"), (\"15\", \"HI\"),\n (\"16\", \"ID\"), (\"17\", \"IL\"), (\"18\", \"IN\"), (\"19\", \"IA\"),\n (\"20\", \"KS\"), (\"21\", \"KY\"), (\"22\", \"LA\"), (\"23\", \"ME\"),\n (\"24\", \"MD\"), (\"25\", \"MA\"), (\"26\", \"MI\"), (\"27\", \"MN\"),\n (\"28\", \"MS\"), (\"29\", \"MO\"), (\"30\", \"MT\"), (\"31\", \"NE\"),\n (\"32\", \"NV\"), (\"33\", \"NH\"), (\"34\", \"NJ\"), (\"35\", \"NM\"),\n (\"36\", \"NY\"), (\"37\", \"NC\"), (\"38\", \"ND\"), (\"39\", \"OH\"),\n (\"40\", \"OK\"), (\"41\", \"OR\"), (\"42\", \"PA\"), (\"44\", \"RI\"),\n (\"45\", \"SC\"), (\"46\", \"SD\"), (\"47\", \"TN\"), (\"48\", \"TX\"),\n (\"49\", \"UT\"), (\"50\", \"VT\"), (\"51\", \"VA\"), (\"53\", \"WA\"),\n (\"54\", \"WV\"), (\"55\", \"WI\"), (\"56\", \"WY\"), (\"72\", \"PR\")]\n\n state_num = \"*\"\n for tup in state_abbr_dict:\n if tup[1] == state_code.upper():\n state_num = tup[0]\n\n return state_num", "title": "" }, { "docid": "80372b2082dd300d8297ac084cbeb67a", "score": "0.43346402", "text": "def json_to_int(self, number_string, default=True):\n if number_string.isdigit():\n return int(number_string)\n if default:\n return 0\n return None", "title": "" }, { "docid": "56a47e3ad1dd09d6d8825c9bd6165744", "score": "0.43339217", "text": "def _decode_port_status(cls, reason):\n\n return cls._port_status_codes.get(reason, \"UNKNOWN\")", "title": "" }, { "docid": "3e63358a58ef0e3bafdbfdd2acb1b708", "score": "0.43317416", "text": "def parse_int(value):\n \n return int(value) if value.strip() != '' else 0", "title": "" }, { "docid": "b6f3efc623ddea682fe0e6c53186ae97", "score": "0.43244857", "text": "def convert(s):\n x = -1\n try:\n x = int(s)\n print(\"Convertion succeeded! x=\",x)\n except (ValueError,TypeError) as e:\n print(\"Convertion error : {}\".format(str(e)))\n return x", "title": "" }, { "docid": "6d97077b118a1059dcfd9de020fcf90e", "score": "0.43179882", "text": "def update_state(self, value):\n\t\t_LOGGER.debug(\"update sensor %s with value %s\", self._type, value)\n\t\tif not self.ready:\n\t\t\treturn\n\n\t\tif value.isdigit():\n\t\t\tself._state = int(value)\n\t\telse:\n\t\t\tself._state = value\n\n\t\tself.async_schedule_update_ha_state()", "title": "" }, { "docid": "c2d256b4de1352374109dfb6e969c5fe", "score": "0.4317513", "text": "def process_state(self, state):\r\n state = tf.cast(state, tf.float32)\r\n state /= self.config.high\r\n\r\n return state", "title": "" }, { "docid": "cf2c0a5edb37352b2bbd84d4665df187", "score": "0.4317495", "text": "def ret_int(potential):\n try:\n return int(potential)\n except:\n return None", "title": "" }, { "docid": "2c3ddb249bd203d452c60651702f335a", "score": "0.4315744", "text": "def _unstructure_enum(self, obj: Enum) -> Any:\n return obj.value", "title": "" }, { "docid": "8ee32e1762665d6d40b667a455e2f704", "score": "0.43155596", "text": "def db_value(self, value) -> int:\n if value is not None:\n if type(value) != int:\n return int(value.replace(':', '').replace('.', '').replace('-', ''), 16)\n else:\n return value", "title": "" }, { "docid": "8ee32e1762665d6d40b667a455e2f704", "score": "0.43155596", "text": "def db_value(self, value) -> int:\n if value is not None:\n if type(value) != int:\n return int(value.replace(':', '').replace('.', '').replace('-', ''), 16)\n else:\n return value", "title": "" }, { "docid": "cc778d2ad0c24bc683711b7038d86f01", "score": "0.43096808", "text": "def _missing_(cls, value: 'int') -> 'HIAlgorithm':\n if not (isinstance(value, int) and 0 <= value <= 65535):\n raise ValueError('%r is not a valid %s' % (value, cls.__name__))\n if 10 <= value <= 12:\n #: Unassigned\n return extend_enum(cls, 'Unassigned_%d' % value, value)\n if 14 <= value <= 65535:\n #: Unassigned\n return extend_enum(cls, 'Unassigned_%d' % value, value)\n return super()._missing_(value)", "title": "" }, { "docid": "11a62beef563d63614849383fc046bc6", "score": "0.43038574", "text": "def get_as_nullable_integer(self, key: str) -> Optional[int]:\n value = self.get(key)\n return IntegerConverter.to_nullable_integer(value)", "title": "" }, { "docid": "7b9e65ffde7bae4c0b7a8ba27ae9db15", "score": "0.43036366", "text": "def get_value(self):\n return int(self.protocol)", "title": "" }, { "docid": "90566879b437edfacc6c36e6b457b10c", "score": "0.42988098", "text": "def _set_decoder_state(self, position):\n bits = np.unpackbits(\n np.array([position], dtype=np.uint32).byteswap().view(np.uint8)\n )\n self.current_state = bits[-self.n:]", "title": "" }, { "docid": "8aef275244212d31a86a39b452ed9df4", "score": "0.42981523", "text": "def _parse_int(self, value):\n try:\n return int(value.replace(',', ''))\n except ValueError:\n # If we can't parse it for an int return the OG string\n return value", "title": "" }, { "docid": "23fc2fde0c22d58dc0a91fdc07105885", "score": "0.4289289", "text": "def _parse_counter(self, raw_counter):\n\t\tif not raw_counter.isdigit() or int(raw_counter) <= 0:\n\t\t\traise TypeError('Expecting non-zero, numeric value.')\n\n\t\tresult = int(raw_counter)\n\t\treturn result", "title": "" }, { "docid": "bcd68c0335169d1e97bb30aa2071371c", "score": "0.4289231", "text": "def channelToFloor(self, channel):\r\n if channel in INPUT.SENSORS:\r\n floor = INPUT.SENSORS.index(channel)+1\r\n type = INPUT.SENSORS\r\n\r\n elif channel in INPUT.UP_BUTTONS:\r\n floor = INPUT.UP_BUTTONS.index(channel)+1\r\n type = INPUT.UP_BUTTONS\r\n\r\n elif channel in INPUT.DOWN_BUTTONS:\r\n floor = INPUT.DOWN_BUTTONS.index(channel)+2 ##Because down-button does not exist.\r\n type = INPUT.DOWN_BUTTONS\r\n\r\n elif channel in INPUT.IN_BUTTONS:\r\n floor = INPUT.IN_BUTTONS.index(channel)+1\r\n type = INPUT.IN_BUTTONS\r\n\r\n elif channel in OUTPUT.UP_LIGHTS:\r\n floor = OUTPUT.UP_LIGHTS.index(channel)+1\r\n type = OUTPUT.UP_LIGHTS\r\n\r\n elif channel in OUTPUT.DOWN_LIGHTS:\r\n floor = OUTPUT.DOWN_LIGHTS.index(channel)+2 ##Because down-button does not exist.\r\n type = OUTPUT.DOWN_LIGHTS\r\n\r\n elif channel in OUTPUT.IN_LIGHTS:\r\n floor = OUTPUT.IN_LIGHTS.index(channel)+1\r\n type = OUTPUT.IN_LIGHTS\r\n\r\n elif channel in OUTPUT.FLOOR_LIGHTS:\r\n floor = OUTPUT.FLOOR_LIGHTS.index(channel)+1\r\n type = OUTPUT.FLOOR_LIGHTS\r\n\r\n else:\r\n raise WrongChannelException()\r\n return (floor,type)", "title": "" }, { "docid": "0b411e038726eabff8b729136db42d11", "score": "0.42786512", "text": "def get_int_or_none(value):\n if type(value) is str:\n return int(value)\n else:\n return value", "title": "" }, { "docid": "3957b149be15c6740112ff8e93e38498", "score": "0.42775536", "text": "def parse_int(value, *, default=None):\n\n try:\n return int(value)\n except ValueError:\n return default", "title": "" }, { "docid": "ba9f9e80c784b9e447fc06f743cc6673", "score": "0.42746192", "text": "async def _state_from_remote_value(self) -> None:\n if self.remote_value.value is not None:\n await self._set_internal_state(self.remote_value.value)", "title": "" } ]
b49c68ba0c9455036b1440dd3ec0fe4b
Print the closest prime number larger than number.
[ { "docid": "62104e576ee10c707b481e00ad87ba49", "score": "0.66522646", "text": "def print_next_prime(number):\r\n index = number\r\n while True:\r\n index += 1\r\n if is_prime(index):\r\n print(index)", "title": "" } ]
[ { "docid": "6a2f3578406c458b1eb613a213a82f68", "score": "0.6859278", "text": "def main():\n\n number = int(input())\n if number > 1:\n least_factor = min(prime_factors(number))\n else:\n least_factor = 1\n\n if least_factor != number:\n print(number - number // least_factor)\n else:\n print(number - 1)", "title": "" }, { "docid": "e111805acbfe71b754a614f0ebfc6c5d", "score": "0.6648313", "text": "def print_next_prime(number):\n while True:\n number +=1\n if (is_prime(number) is True):\n print number\n break", "title": "" }, { "docid": "564bb42255be72db8a85f483581690ca", "score": "0.66018647", "text": "def ten_thousand_first_prime():\n # natural numbers, starting with 2\n n = 2\n # prime number counter\n count = 0\n # the newest prime number found\n largest = 0\n while count < 10001:\n if is_prime(n) is True:\n count += 1\n largest = n\n n += 1\n print(largest)", "title": "" }, { "docid": "4de1b8b9336341ded7606c7b87f06279", "score": "0.633975", "text": "def problem3():\n n = 600851475143\n largest = -1\n # brute force\n for x in range(1, int(n ** 0.5)):\n if n % x == 0 and is_prime(x):\n print 'largest is now {0}'.format(x)\n largest = x\n\n print largest", "title": "" }, { "docid": "74ef08e19cf58d957dabc8c69053cfb7", "score": "0.6109681", "text": "def make_primes(number):\n listofprimes = [2]\n starter = 3\n result = 2\n\n while starter < number:\n if all (starter%num for num in listofprimes if num <= sqrt(starter)) != 0:\n listofprimes.append(starter)\n result += starter\n starter += 2\n\n return listofprimes[-1], result", "title": "" }, { "docid": "d7480a06ce5124c6522fb3e1cdfaa8d7", "score": "0.60681033", "text": "def next_prime(num):\n is_new_prime= False\n for current_number in range(num, num*2): # next prime has to be between num and num*2\n is_new_prime = True\n for number in range(2, current_number -1): ## is_new_prime set to False if number can be divided within this loop\n if current_number % number == 0 : \n is_new_prime= False\n break;\n if is_new_prime == True:\n print(f\"{current_number} is the next prime after {num}\")\n return current_number", "title": "" }, { "docid": "7c6ff194789feba5cc38acd9c87839eb", "score": "0.6062941", "text": "def findNthPrime (n):\n candidate = 3\n counter = 1\n while (counter < n):\n if isPrime(candidate):\n counter += 1\n candidate += 2\n\n candidate -= 2\n print(candidate)", "title": "" }, { "docid": "7d920f8d869d96ab8d0fb9e99761eb6f", "score": "0.60466623", "text": "def main():\r\n \r\n i = 2\r\n p = 0\r\n n = 10001\r\n \r\n while True:\r\n if isPrime(i):\r\n p += 1\r\n if p == n:\r\n break\r\n i += 1\r\n \r\n print i\r\n return 0", "title": "" }, { "docid": "e1f6b1433f0c6bd443d4c4206ed6604c", "score": "0.6001692", "text": "def primefactor():\r\n\r\n primecheck = 2\r\n primeanswer = maxvalue\r\n while primecheck < primeanswer:\r\n if (primeanswer%primecheck == 0):\r\n primeanswer = primeanswer/primecheck\r\n print(primecheck)\r\n else:\r\n primecheck = primecheck + 1\r\n print(primecheck)", "title": "" }, { "docid": "49a5a72a699525b43f86dd63cbf0320a", "score": "0.5980322", "text": "def main(number=None):\n if number and number > 2:\n click.echo(True if rust_lib.is_prime(number) else False)\n else:\n click.echo(\"Please supply an integer argument greater than 2. The \"\n \"console script will tell you if it is a prime number\")", "title": "" }, { "docid": "d6662ef8299d80b712fd63b2debb377f", "score": "0.597868", "text": "def find_prime(n):\n\tfor i in primes[::-1]:\n\t\tif( n>= i and n%i==0):\n\t\t\treturn i", "title": "" }, { "docid": "d2c33df43358eed5cfc794f4f983847e", "score": "0.59655327", "text": "def is_prime2(num):\n\n num_sqrt=int(math.sqrt(num))+1\n if num%2==0:\n print(\"This is not a prime number\", num) \n else:\n for i in range(3, num_sqrt, 2):\n if num % i == 0:\n print(\"This is NOT a prime number\", num, i)\n break\n else:\n print(\"This is the prime number\", num)", "title": "" }, { "docid": "bcae04279a4cd3eebb67071653cd0dc9", "score": "0.5954269", "text": "def pollard_p_minus_1(n, limit=100000):\n p_list = sieves.primes(limit)\n pow_list = []\n\n for p in p_list:\n pk = p\n while pk <= limit:\n pow_list.append((pk, p))\n pk *= p\n\n pow_list.sort()\n c = 13\n\n for (count, (pk, p)) in enumerate(pow_list):\n c = pow(c, p, n)\n if count % 100 == 0:\n g = gcd(c - 1, n)\n if 1 < g < n:\n return g\n\n return gcd(c - 1, n)", "title": "" }, { "docid": "cef138e132fa58cdfba259298f501fc8", "score": "0.59527504", "text": "def compute():\n num = 9 # start with the first odd composite number\n primes = [2, 3, 5, 7]\n\n while True:\n if isPrime(num):\n primes.append(num)\n else:\n for i in primes:\n if math.sqrt((num - i) / 2) == int(math.sqrt((num - i) / 2)):\n break # follows goldbach conjucture!\n else:\n return num # we got our number that doesnt follow goldbach conjucture\n num += 2", "title": "" }, { "docid": "27767e56118794c50d47ad74126e2633", "score": "0.5911437", "text": "def num_theory(digits, width, num):\n # Only keep the digits to the right of the decimal point\n n = str(pi_expansion(digits, num)).split('.')[-1]\n # Loop through all the possible windows\n for i in window(n, width):\n # Check if the window is a prime number\n if prime(int(''.join(i))):\n # Return the first prime number\n return int(''.join(i))\n return 'No Prime'", "title": "" }, { "docid": "7184826567a27899b233decfb4121cb3", "score": "0.58670175", "text": "def check_number(numk):\r\n\r\n num_maxi = pow(2, int(numk))\r\n num_max = num_maxi - 1 # Max value of the range\r\n num_min = pow(2, int(numk) - 1) # Min value of the range\r\n # Number of primes in this range\r\n primes = (num_max / log(num_max)) - (num_min / log(num_min)) # use of prime function as in the slides\r\n primes = round(primes)\r\n\r\n rang = num_max - num_min # Selecting a random number in the above range\r\n\r\n loop = rang - primes + 1 # loop this many times no prime found\r\n\r\n number = random.randrange(num_min, num_max)\r\n i = 0\r\n run = miller_rabin(number)\r\n while run is False and i < loop + 1:\r\n number = random.randrange(num_min, num_max)\r\n run = miller_rabin(number)\r\n i += 1\r\n if run is True:\r\n print(number)\r\n else:\r\n print(\"No prime found\")", "title": "" }, { "docid": "3c90fe63f2e71ec1b41c0b1d3bcbbfcd", "score": "0.58264285", "text": "def primeNumWorker(x):\n endrange = int(x)+1\n for num in range(1,endrange):\n prime = True\n for i in range(2,num):\n if (num%i==0):\n prime = False\n if prime:\n print(num, sep=' ',end=' ',flush=True)\n print('\\n')\n return", "title": "" }, { "docid": "27490837271c470316140760cda0c040", "score": "0.57826966", "text": "def getPrime(n):\n\n p = getrandbits(n)\n while not isProbablePrime(p):\n p = getrandbits(n)\n return p", "title": "" }, { "docid": "889f8d019c226761156fe44941872355", "score": "0.57794464", "text": "def largest_prime_factor(input_num):\n\n # if input is less than 2, there are no prime factors less than 2, return None\n if input_num < 2:\n return None\n\n # set current lowest prime to lowest prime factor (2)\n curr_lpf = 2\n # loop while our current input is greater than our current lpf\n while input_num > curr_lpf:\n # if division results in whole number, divide input by curr_lpf and reset to 2\n if input_num % curr_lpf == 0:\n input_num = input_num // curr_lpf\n curr_lpf = 2\n # else move onto next largest factor\n else:\n curr_lpf += 1\n return curr_lpf", "title": "" }, { "docid": "ec5f153e625d794211bcb7b2264b6c2a", "score": "0.57505876", "text": "def getPrimesBelow(n):\n return getPrimesBetween(1, n)", "title": "" }, { "docid": "24baef2452ed40202798ae829de1f80f", "score": "0.57408494", "text": "def find_prime_one_by_one(num):\n if is_prime(num):\n print('Next prime number is :', num)\n return True\n else:\n # print('it is not prime')\n return False", "title": "" }, { "docid": "099ca29fa180e4c7bcfc1597619a3bc7", "score": "0.5719608", "text": "def HillPrime(self, p):\n if p < 0:\n return 2 * p + 1\n return (((1 + 5 * p ** 2) ** (1 / 2)) - ((10 * (p ** 2)) / (2 * (1 + 5 * p ** 2) ** (1 / 2)))) / (\n 1 + 5 * p ** 2)", "title": "" }, { "docid": "0c02876196a62da2432fbeec88764100", "score": "0.56811017", "text": "def sqrt(number):\n # return int(number ** 0.5)\n if number is None:\n return 0\n\n if number < 0:\n return 0\n\n if number == 0 or number == 1:\n return number\n\n nums = list(range(1, number))\n index = number // 2\n\n while (nums[index] * nums[index]) > number:\n index //= 2\n\n while (nums[index] * nums[index]) < number:\n index += 1\n\n if (nums[index] * nums[index]) == number:\n return nums[index]\n else:\n return nums[index - 1]", "title": "" }, { "docid": "3e6cb81701d6af7841f6ddc840a1c0d0", "score": "0.56700516", "text": "def display_primality(n):\r\n\r\n if is_prime(n):\r\n print(\"{0} is a prime number\".format(n))\r\n else:\r\n print(\"{0} is a composite number\".format(n))", "title": "" }, { "docid": "61abe21cb100bc5a1fc7432a73f1681a", "score": "0.56499684", "text": "def find_nt_factor(num):\n for i in range(2, math.ceil(math.sqrt(num)) + 1):\n if num % i == 0:\n return i\n return None", "title": "" }, { "docid": "912d874f497d7974f0127a305f265806", "score": "0.56368667", "text": "def problem3(n):\n for i in range(2,n):\n if n%i == 0 and is_prime(n/i):\n return n/i", "title": "" }, { "docid": "9a965bdf2092c2f4cccc401c40bf0bc9", "score": "0.56249297", "text": "def prime_method1(start, end):\n target, number = 0, range(start, end + 1)\n while number[target] < number[-1]:\n for each_num in number:\n if (each_num % number[target] == 0) and (\n number[target] < each_num):\n number.remove(each_num)\n target += 1\n return number", "title": "" }, { "docid": "6da8316615f7d5b2f3d4401864a21b33", "score": "0.5621598", "text": "def first_prime_over(n):\n for x in range(n+1, n**2):\n if is_prime(x):\n return x", "title": "" }, { "docid": "4fd112d751780adef43d851063810965", "score": "0.56192964", "text": "def largest_prime_factor(n):\n #Setting the largest_number as 2 and reducing the given_input_number n by 2.\n largest_number = 0\n while n % 2 == 0:\n largest_number = 2\n n = n/2\n \n #At this point you will have your n as odd, so we skip all the even test cases by haiving a steps of 2 in range.\n\n for i in range(3, int(math.sqrt(n)+1),2):\n while n % i == 0:\n largest_number = i\n n = n/i\n \n \n if n>2:\n largest_number = n\n \n return int(largest_number)", "title": "" }, { "docid": "2d6b2d41004c3c3487f4ed782e72f487", "score": "0.5618331", "text": "def sqrt(number):\r\n \r\n if number < 0:\r\n print(number, ' is invalid, must be greater than or equal to 0')\r\n return None\r\n\r\n if number < 1:\r\n return 0\r\n\r\n curr_num = number\r\n prev_num = None\r\n\r\n while True:\r\n curr_square = curr_num ** 2\r\n next_square = (curr_num+1)**2\r\n \r\n if curr_square == number or (curr_square < number and next_square > number):\r\n break\r\n elif curr_square > number:\r\n prev_num = curr_num\r\n curr_num = curr_num//2\r\n else:\r\n curr_num = (curr_num+prev_num)//2\r\n \r\n return curr_num", "title": "" }, { "docid": "9a045b537a1b17e67b818eb9ac9aa3a0", "score": "0.56178564", "text": "def is_prime(n):\n if n == 2:\n return True\n if n == 3:\n return True\n if n % 2 == 0:\n return False\n if n % 3 == 0:\n return False\n\n i = 5L\n w = 2L\n\n # print 'N', n\n while i * i <= long(math.sqrt(n) / 2):\n if n % i == 0:\n return False\n\n # print 'iw', i, w\n i += w\n w = 6 - w\n\n return True", "title": "" }, { "docid": "3853ba1be24996d07441ff01d58ca9e0", "score": "0.56138855", "text": "def primes_less_than_n(n):\n count = pari(n).primepi()\n return list(primes_first_n(count))", "title": "" }, { "docid": "10c5a1ff46e00d93e6cc7c64ff30bd63", "score": "0.55955034", "text": "def is_number_prime(number):\n\n \"\"\"\n This is the main logic behind reducing the numbers to check for as factors\n if N = a * b; where a<=b and a,b C (1, N)\n then, a * b >= a*a;\n which leads to => a*a <= N\n => a <= sqrt(N)\n Hence checking only till the square root of N \n \"\"\"\n upper_lim = Math.floor(Math.sqrt(number)) + 1\n is_prime = True if number != 1 else False\n\n for i in range(2, upper_lim):\n if number % i == 0:\n is_prime = False\n break\n # The moment there is a divisor of 'number', break the iteration, as the number is not prime\n\n return is_prime", "title": "" }, { "docid": "328346ce7c2972ce9e9953a5dcc2c655", "score": "0.55844384", "text": "def prime(cls, x):\n return x * (1 - x)", "title": "" }, { "docid": "3b31d6bb0f81896f7d44f26fee8de3ed", "score": "0.5567108", "text": "def prime_checker(number):\n for nums in range(2, number//2+1):\n if number % nums == 0:\n print(\"It's not a prime number.\")\n return\n print(\"It's a prime number.\")", "title": "" }, { "docid": "a485966588ee3246cfdd3c4a7f57c470", "score": "0.556179", "text": "def part2():\r\n h = 0\r\n for b in range(106500, 123500+1, 17):\r\n if not isPrime(b):\r\n h += 1\r\n return h", "title": "" }, { "docid": "dd96b5f920f3374f25f871b7567560db", "score": "0.55525845", "text": "def FindPrime(number):\n\n if number <= 1:\n return False\n\n for element in range(2, number):\n if number % element == 0:\n return False\n\n return True", "title": "" }, { "docid": "b62a43b02d0b68c84df917105ce5640d", "score": "0.55492604", "text": "def max_divi(number1, number2):\r\n minnum = abs(min(number1, number2))\r\n maxnum = abs(max(number1, number2))\r\n if minnum == 0 and maxnum == 0:\r\n print \"NONE\"\r\n elif minnum == 0:\r\n print maxnum\r\n elif maxnum % minnum == 0:\r\n print minnum\r\n else:\r\n maxnum = maxnum % minnum\r\n max_divi(maxnum, minnum)", "title": "" }, { "docid": "4f7f4b5ca3dfcea7c17b02fefbcd2e50", "score": "0.5546296", "text": "def primes(maximum_value, verbose=True):\n known_nonprimes = set()\n range_stop = int(maximum_value) + 1\n\n for cursor in range(2, range_stop):\n if cursor not in known_nonprimes:\n more_nonprimes = range(cursor ** 2, range_stop, cursor)\n known_nonprimes.update(more_nonprimes)\n\n if verbose:\n print('found prime: %12d' % cursor)\n\n yield cursor", "title": "" }, { "docid": "6d757194dbda67186f4ee6cdd6ef75c8", "score": "0.5536238", "text": "def pe007(n = 10001):\n \n # assume avg prime gap of 1/10\n gap = n // 10\n \n count = 0\n for p in primes(n*gap):\n count += 1\n if count == n:\n return p\n #print count, p", "title": "" }, { "docid": "22a8e3dff4d631399b7427382a00d734", "score": "0.55334574", "text": "def max_prime_palindrome(n):\n sorted_primes = sorted(primes(n), reverse=True)\n\n for i in sorted_primes:\n if is_palindrome(i):\n return i", "title": "" }, { "docid": "d6a58dce0b8b6e4ccbdbdbb0f2dd4485", "score": "0.55306405", "text": "def problem49():\n limit = 10000\n primes, primeset = util.primes(limit)\n\n for a in primes:\n permset = set([int(x) for x in util.permutations(str(a))])\n for b in util.permutations(str(a)):\n b = int(b)\n if b > a and b in primeset:\n c = b + (b-a)\n if c in primeset and c in permset and c != 8147:\n return \"{}{}{}\".format(a, b, c)", "title": "" }, { "docid": "e997919247a743f58f4953736bc83a1f", "score": "0.5511142", "text": "def prime(cls, x):\n return 1 - x * x", "title": "" }, { "docid": "0f51c3a1e2fcbdd5041deef0cc3d840f", "score": "0.55018204", "text": "def print_ans(N):\n q, mod = divmod(N,2)\n if(mod == 0):\n print(q - 1)\n else:\n print(q)", "title": "" }, { "docid": "9b55883589950f5cce4cb999f29f9a4f", "score": "0.5488143", "text": "def largest_sqrt(k):\n last_num = 1\n while last_num * last_num < last_num:\n last_num *= 2\n last_num /= last_num\n while last_num * last_num <= k:\n last_num += 1\n return last_num - 1", "title": "" }, { "docid": "f69b7ce81a929c84e464926807745c58", "score": "0.5457017", "text": "def isPrime(number):\r\n return 2 in [pow(2, number, number)]", "title": "" }, { "docid": "6cfe1d88453be67cd7cd1720d99a526a", "score": "0.5452028", "text": "def problem41():\n from random import randint\n\n def is_prime(n):\n if pow(2, n, n) != 2:\n return False\n for i in xrange(50):\n a = randint(2, n-1)\n if util.gcd(n, a) == 1 and pow(a, n-1, n) != 1:\n return False\n return True\n\n best = 2143\n for j in xrange(1, 10):\n for i in util.permutations(''.join(map(str, xrange(1, j+1)))):\n i = int(i)\n if i > best:\n if is_prime(i):\n best = i\n return best", "title": "" }, { "docid": "a242ff00d4ec6ee20c3493fbc5bc50d4", "score": "0.54391694", "text": "def getNextOddPrime(n):\n n += 2\n while True:\n if isPrime(n):\n return n\n n += 2", "title": "" }, { "docid": "b52d6c5668cd2416ce454cd1a9060ec9", "score": "0.5434158", "text": "def triangleNumber(limit):\n\n satisfied = False\n i = 0\n while not satisfied:\n triangle = int((i*(i+1)/2))\n inBetween = primeFactorize(triangle)\n needed = numOfDivisors(inBetween)\n if needed > 250:\n print(needed)\n if needed > limit:\n satisfied = True\n else:\n i += 1\n\n return triangle", "title": "" }, { "docid": "39c21b09c33e4a6e918e350f308b2297", "score": "0.542012", "text": "def eulerNum(n) :\n num = 0\n for i in range(1, n) :\n if isRelPrime(n, i) :\n num = num + 1\n \n return num", "title": "" }, { "docid": "57e8d4b1feca352982f0234295e387c9", "score": "0.5419281", "text": "def is_prime(number):\n if number <= 3:\n return number > 1\n if number % 2 == 0 or number % 3 == 0:\n return False\n i = 5\n while i * i <= number:\n if number % i == 0 or number % (i + 2) == 0:\n return False\n i = i + 6\n return True", "title": "" }, { "docid": "cdf6ec3dee288948654a8869bad4b794", "score": "0.54009336", "text": "def main():\n global primes\n noOfPrimes = 3\n sl = 2\n c = 9\n test = float(noOfPrimes)/(2*sl+1)\n while test >= 0.10:\n sl += 2\n for i in xrange(0,3):\n c += sl\n if primes.is_prime(c):\n noOfPrimes += 1\n c += sl\n test = float(noOfPrimes)/(2*sl+1)\n print sl + 1", "title": "" }, { "docid": "1f97b555b3d7eed1da7d5c0dd1549d61", "score": "0.5399384", "text": "def legendre(n):\n\tfor p in range(n*n+1, (n+1)*(n+1)):\n\t\tif (my_math.is_prime(p)):\n\t\t\treturn p\n\t\n\treturn 0", "title": "" }, { "docid": "a43ba2b5f34b071adee092a51ba6c8e9", "score": "0.5391948", "text": "def prime(number):\n prime_lst = []\n for num in range(2, number + 1):\n if num > 1:\n for i in range(2, num):\n if (num % i) == 0:\n break\n else:\n prime_lst.append(num)\n return prime_lst", "title": "" }, { "docid": "013ef36a6ef1ac81c047e66076ecfb9f", "score": "0.5388846", "text": "def test_is_prime(n, result):\n from is_prime import is_prime\n assert is_prime(n) == result", "title": "" }, { "docid": "6a4c0358c2487d9ae85a57585963470c", "score": "0.5386493", "text": "def solve3(lines):\n h = 0\n for b in range(108100, 125117, 17):\n if not is_prime(b):\n h += 1\n return h", "title": "" }, { "docid": "04d57dd34c3022f02993051bb7db192d", "score": "0.53776026", "text": "def getPrimesBelowN(max):\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n primes = []\n for i in xrange(max+1):\n if isPrime(i):\n primes.append(i)\n return primes", "title": "" }, { "docid": "1975d885c0557ec0c1adfeddbfdb8fc6", "score": "0.53738755", "text": "def simple_solution():\n primes = [2]\n\n def is_prime(candidate):\n for prime in primes:\n if candidate % prime == 0:\n return False\n\n for test in range(3, int(candidate/2)):\n if candidate % test == 0:\n return False\n\n return True\n\n candidate = 3\n while len(primes) < 10001:\n if is_prime(candidate):\n primes.append(candidate)\n\n candidate += 2\n\n return primes[len(primes)-1]", "title": "" }, { "docid": "beed9c9ed3f0c7f3b48391d636bf63cc", "score": "0.5371649", "text": "def PrimePi(n):\n ps = primes()\n p = ps.next()\n i = 0\n while prime(i) <= n:\n i += 1\n return i", "title": "" }, { "docid": "09500c6b2870bd565a131b65fccba72d", "score": "0.53510016", "text": "def consecutive_primes(num):", "title": "" }, { "docid": "74fad771222727ca9fe7150917ad3c52", "score": "0.5343944", "text": "def is_prime(number):\n if(number <= 1):\n return False\n\n if(number == 2):\n return True\n\n a = int(math.sqrt(number))+1\n divisor = 3\n\n if((number % 2) == 0):\n return False\n\n while(divisor < a):\n if((number % divisor) == 0):\n return False\n divisor += 2\n return True", "title": "" }, { "docid": "e287cc3906553a67a002d941da06a9f0", "score": "0.53292954", "text": "def main():\n num = input()\n epsilon = 0.01\n low = 0\n high = int(num)\n ans = (high + low) / 2.0\n while abs(ans ** 2 - int(num)) >= epsilon:\n if ans ** 2 < int(num):\n low = ans\n else:\n high = ans\n ans = (low + high) / 2.0\n print(ans)", "title": "" }, { "docid": "dda044f251368f01c89fa32ed7888ef8", "score": "0.5327504", "text": "def is_prime(number):\n prime = True\n for i in range(2, number):\n if number % i == 0:\n prime = False\n return prime", "title": "" }, { "docid": "61a7d9cf74cea02904d7f9c89798afda", "score": "0.5323784", "text": "def sqrt(number):\r\n if(number<0):\r\n return None\r\n low=0\r\n high=number\r\n mid=low+high//2\r\n return recu_sqrt(mid,number)", "title": "" }, { "docid": "c438aa16753abc8ed53c161099aa1133", "score": "0.53189784", "text": "def get_max_prime_factor(n):\n # Get list of prime numbers\n prime_numbers = [ i for i in range(n+1) if is_prime(i) == True ] \n max_prime_factor = 0\n \n for i in prime_numbers:\n # Check if number is divisible by prime number in list \n # and greater than previous prime factor\n if n % i == 0 and i > max_prime_factor:\n max_prime_factor = i\n return max_prime_factor", "title": "" }, { "docid": "b1946be4fedd87bcac6e9a52aa98caf1", "score": "0.5314849", "text": "def generatePrime():\n while(1):\n option = random.randrange(1000000, 10000000)\n # compare with prime under 100 to quicklu find out composite number\n judge = 0\n for i in range(len(prime_under_100)):\n if option%prime_under_100[i] == 0:\n break\n judge = i\n if judge < len(prime_under_100)-1:\n continue\n # miller-rabin test\n if isPrime(option) != 1:\n continue\n else:\n return option", "title": "" }, { "docid": "79995f76a10a88aa27968b44c962bf0a", "score": "0.53130805", "text": "def isPrime( n,checker):\r\n if n%checker==0 and n!=2 and n!=checker:\r\n \r\n return(0)\r\n else: \r\n if checker < math.sqrt(n): \r\n return isPrime(n,checker+1)", "title": "" }, { "docid": "dce6a2d8687a4ca2c05590f775c4df17", "score": "0.53071445", "text": "def primes_below(maxNumber):\n # This method was developed by Stack Overflow user Daniel G. (http://stackoverflow.com/users/207432/daniel-g)\n allNumbers = range(3, maxNumber+1, 2)\n for mIndex, number in enumerate(xrange(3, maxNumber+1, 2)):\n if allNumbers[mIndex] == 0:\n continue\n # now set all multiples to 0\n for index in xrange(mIndex+number, (maxNumber-3)/2+1, number):\n allNumbers[index] = 0\n return [2] + filter(lambda n: n!=0, allNumbers)", "title": "" }, { "docid": "e62051e54879eec22b74086ec5efd457", "score": "0.53060097", "text": "def problem50():\n limit = 1000000\n primes, primeset = util.primes(limit)\n\n best = (1, 1)\n for i in xrange(len(primes)):\n total = 0\n for j in xrange(i, len(primes)):\n total += primes[j]\n if total >= limit:\n break\n elif total in primeset and j-i > best[1]:\n best = total, j-i\n return best[0]", "title": "" }, { "docid": "12fa2ed5bbc1a6867148f1f538d867f9", "score": "0.5301002", "text": "def es_primo(num):\n try:\n if num >= 0:\n prime = True\n if num < 2:\n prime = False\n else:\n for x in range(2, num):\n resto = num % x\n if resto == 0:\n prime = False\n break\n return prime\n else:\n raise NoNumeroNatural()\n except NoNumeroNatural:\n print \"Is Prime Error: No es un numero valido\"\n except TypeError, error:\n print \"Is Prime Error: %s\" % error", "title": "" }, { "docid": "fff603d195fd6650d86b8d4d8fade046", "score": "0.53001827", "text": "def prime_number(num):\n num = abs(int(num))\n if num < 2:\n return False\n elif num == 2:\n return True\n elif not num & 1:\n return False\n for x in range(3, int(num**0.5) + 1, 2):\n if num % x == 0:\n return False\n return True", "title": "" }, { "docid": "1db322cf5cd91c90143cb8d7e10e9ca9", "score": "0.52976894", "text": "def solve(max_n=10**7):\r\n sqrt_max_n = sqrt(max_n)\r\n this_prime = prevprime(sqrt_max_n)\r\n\r\n tp = Pool(8)\r\n\r\n phi_info = tp.map(find_permutation_val_for_prime_wrapper,\r\n zip(reversed(list(sieve.primerange(2, this_prime))), repeat(max_n)))\r\n\r\n tp.close()\r\n return min(phi_info, key=lambda x: x[1] if x else float('inf'))[0]", "title": "" }, { "docid": "9ad1d8441e3e801048b5a9d7e8113578", "score": "0.5297198", "text": "def prime_num_up_to(n):\n\n list_of_primes = []\n\n for i in xrange(1, n + 1):\n i_is_prime = True\n\n for e in xrange(4, math.sqrt(i) + 1):\n if i % e == 0:\n i_is_prime = False\n\n if i_is_prime:\n list_of_primes.append(i)\n\n return list_of_primes", "title": "" }, { "docid": "51adf837ec05c5961cb223ac3eb3ad3d", "score": "0.5286219", "text": "def closest_power(number):\n \"\"\" Return the largest power of 2 less than n \"\"\"\n biggest_power = 1\n while biggest_power < number:\n biggest_power = biggest_power << 1\n return biggest_power >> 1", "title": "" }, { "docid": "3ad04457b61bbb0eeb1aff1cfb058f61", "score": "0.52731305", "text": "def greatest_factor(num: int) -> int:\r\n if num <= 1:\r\n return 1\r\n\r\n # factors found\r\n factors = []\r\n # primes, will add to list below\r\n primes = [2]\r\n\r\n # test 2 as factor, find how many times, add to list of factors\r\n num2 = dupli_factors(2, num)\r\n factors += [2]*num2\r\n\r\n if math.prod(factors) == num:\r\n return 2\r\n\r\n # one more special case:\r\n if num == 3:\r\n return 3\r\n\r\n # find next prime using latest list of primes, check if factor\r\n x = 3\r\n while x <= num:\r\n # nothing happens unless x is prime\r\n if not is_factor(primes, x):\r\n primes.append(x)\r\n\r\n # find how many times x is factor, add to list of factors\r\n numx = dupli_factors(x, num)\r\n factors += [x]*numx\r\n\r\n # check if all factors found\r\n if math.prod(factors) == num:\r\n return x\r\n\r\n # Check if unfactored portion of num is less than square of x\r\n # if so, the unfactored portion must be the highest prime factor\r\n if math.prod(factors):\r\n # If at least one factor has been found, find unfactored portion\r\n unfactored = int(num/math.prod(factors))\r\n else:\r\n # If no factors have been found, the unfactored portion is the number\r\n unfactored = num\r\n\r\n if unfactored < x**2:\r\n return unfactored\r\n\r\n x += 1", "title": "" }, { "docid": "21ff3cb2d662ec62388c01bae529a377", "score": "0.52718186", "text": "def prime_check(maxnum):\n\tbucket = range(int(math.sqrt(maxnum)) + 1)\n\tbucket[1] = 0\n\tfor i in bucket:\n\t\tif i <> 0:\n\t\t\tfor not_prime in xrange(2*i, int(math.sqrt(maxnum)) +1, i):\n\t\t\t\tbucket[not_prime] = 0\n\tbucket = filter(lambda x: x <> 0, bucket)\n\t\n\tdef isPrime(num):\n\t\tfor i in bucket:\n\t\t\tif num % i == 0:\n\t\t\t\treturn False\n\t\t\tif i > math.sqrt(num):\n\t\t\t\treturn True\n\t\treturn True\n\n\treturn isPrime", "title": "" }, { "docid": "a80bd38098149d8d1f7fedfb8d879265", "score": "0.5271264", "text": "def check_primality(number):\n logger.info(\"Testing weather {} is primality or not!\".format(number))\n\n for i in range(2, number - 1):\n if number % i == 0:\n logger.info(\"{} is not a primality! It can be divided by {}.\".format(number, i))\n break\n else:\n # negative numbers, 0, and 1 are not considered prime by definition\n if number <= 1:\n logger.info(\"{} is a not primality!\".format(number))\n else:\n logger.info(\"{} is a primality!\".format(number))", "title": "" }, { "docid": "71078618428bc9e3043e819029fe3502", "score": "0.5247564", "text": "def print_primes(n):\n primes_list = get_primes(n)\n mult_table = {}\n\n # Get the largest val for the formatting fields \n cell_size = len(str(primes_list[-1]**2)) \n\n print(\" %*s\"%(cell_size, \" \"), end=\" \")\n for i in range(len(primes_list)):\n print(\" %*s\"%(cell_size, primes_list[i]), end=' ')\n print(\" \")\n\n for i in range(len(primes_list)):\n print(\" %*s\"%(cell_size, primes_list[i]), end=' ')\n\n for j in range(len(primes_list)):\n val = 0\n if mult_table.get((i,j)):\n val = mult_table[(i,j)]\n elif mult_table.get((j,i)):\n val = mult_table[(j,i)]\n else:\n val = primes_list[i]*primes_list[j]\n mult_table[(j,i)] = val\n\n print(\" %*s\"%(cell_size, val), end=' ')\n\n print(\"\")", "title": "" }, { "docid": "c83f5a8a1ddda179a17dd1075233188b", "score": "0.52422535", "text": "def is_prime(number):\n if number == 2:\n return True\n elif number <= 1:\n return False\n else:\n for i in range(2, number):\n if number % i == 0:\n return False\n return True", "title": "" }, { "docid": "0b04150a32f4dff1804a491af5615608", "score": "0.524098", "text": "def isprime(n):\r\n if n == 2:\r\n return(True)\r\n if n == 3:\r\n return(True)\r\n if n % 2 == 0:\r\n return(2)\r\n if n % 3 == 0:\r\n return(3)\r\n\r\n i = 5\r\n w = 2\r\n\r\n while i * i <= n and i < 100:\r\n if n % i == 0:\r\n return(i)\r\n\r\n i += w\r\n w = 6 - w\r\n\r\n return(True)", "title": "" }, { "docid": "d60bf2c0d417030f2ee90e13f8e04480", "score": "0.5236364", "text": "def is_prime(number):\n if number == 2:\n return True\n for i in xrange(2, int(math.sqrt(number)) + 1):\n if number % i == 0:\n return False\n return True", "title": "" }, { "docid": "4914fb6302061a281e83d454973a083e", "score": "0.5231192", "text": "def main():\n factors = find_factors(N)\n\n for factor in factors:\n if is_prime(factor):\n prime_factors.add(factor)\n\n return sorted(prime_factors)[-1]", "title": "" }, { "docid": "764007e1ec9e9070e35eb64951b0db7a", "score": "0.52272874", "text": "def PrimeQ(n):\n return is_prime(n)", "title": "" }, { "docid": "98c439c318f66a9b8a28f46cb242ba3c", "score": "0.5227074", "text": "def primechecker(numbers):\n\tfor number in numbers:\n\t\tif number<2:\n\t\t\tprint (\"It is not prime number\")\n\t\telif number==2:\n\t\t\tprint (\"it is a prime number\")\n\t\telif number%2==0:\n\t\t\tprint (\"it is an even number but not a prime number\")\n\t\telif number%2:\n\t\t\tprint (\"it is an odd number but not a prime number\")\n\t\telse:\n\t\t print(\"it is a prime number\")\n\t\tprint(number)", "title": "" }, { "docid": "05077bb88249aba41d0342d3a825ac0b", "score": "0.5220804", "text": "def prime(my_option, my_option2, size):\r\n\r\n p = new_random_prime(size)\r\n\r\n click.echo(p)", "title": "" }, { "docid": "489b98b72fc8a563916aa8dd9639127f", "score": "0.52188146", "text": "def is_prime(number):\n if number < 2 :\n return False\n\n for i in range(2,number):\n if (number%i==0):\n return False\n else :\n return True", "title": "" }, { "docid": "0464e341f094f37179be0351af8cfb6f", "score": "0.52171224", "text": "def is_prime (number):\n return all(number % i for i in range(2, number))", "title": "" }, { "docid": "e4b9acbd558bc027295acad34f41a96c", "score": "0.5213132", "text": "def display_problem():\n return \"\\nFind the sum of all multiples of 3 or 5 below 1000.\\n\"", "title": "" }, { "docid": "4be0d09d8786c7f0f9c5e4fe0b4bc997", "score": "0.52121836", "text": "def get_nth_prime(n, max_prime=4100, safe=True):\n if n <= 100:\n first_100_primes = (\n 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61,\n 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137,\n 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199,\n 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277,\n 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359,\n 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439,\n 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521,\n 523, 541, )\n #print(len(first_100_primes))\n nth_prime = first_100_primes[n - 1]\n else:\n if safe:\n primes = [num for num in range(2, max_prime) if is_prime(num)]\n nth_prime = primes[n]\n else:\n # This can run for a while... get it? while?\n nth_prime = get_nth_prime_bruteforce(n)\n return nth_prime", "title": "" }, { "docid": "9f7f5dcfcf9f33dd08120a7d4b1a544c", "score": "0.52069706", "text": "def problem60(): \n \"\"\"\n prime_map = mlib.prime_sieve(2*10**6)\n prime_list = mlib.prime_sieve(10**4, [])\n #print prime_list\n prime_list = [str(p) for p in prime_list]\n for i in range(1, len(prime_list)):\n if int(prime_list[1] + prime_list[i]) in prime_map:\n if int(prime_list[i] + prime_list[1]) in prime_map:\n print prime_list[i]\n \"\"\"\n pass", "title": "" }, { "docid": "385242ecc40a662892adf7e5e63a1de8", "score": "0.52064526", "text": "def problem49():\n start_n = 1491\n end_n = 9999\n\n prime_map = mlib.prime_sieve(10**5, output={})\n for n in range(start_n, end_n, 2):\n for step in range(2, (end_n-n)/2, 2):\n if (n in prime_map and n+step in prime_map and \n n+step*2 in prime_map):\n n1 = list(str(n))\n n2 = list(str(n+step))\n n3 = list(str(n+2*step))\n n1.sort()\n n2.sort()\n n3.sort()\n if n1 == n2 and n2 == n3:\n return str(n)+str(n+step)+str(n+2*step)", "title": "" }, { "docid": "3bdd33d1e22de4a52bb6f3d17dce47f9", "score": "0.5203756", "text": "def problem3():\n from math import floor, ceil, sqrt\n number = 600851475143\n\n def factor(num):\n for n in xrange(2, int(ceil(sqrt(num)))):\n if num % n == 0:\n return n, num / n\n return None, num\n\n f1, f2 = 1, number\n while f1 is not None:\n f1, f2 = factor(f2)\n return f2", "title": "" }, { "docid": "be89713161280bd52f14172a2bf01389", "score": "0.5203164", "text": "def largestEvenLTE(n):\n return int(n - (n % 2))", "title": "" }, { "docid": "99313739237c84ee890e0cad21851b43", "score": "0.5195397", "text": "def nearest_power_of_2(number):\n return int(pow(2, ceil(log(number, 2))))", "title": "" }, { "docid": "12e008c45872b96d0327c5665483c5e0", "score": "0.51945317", "text": "def is_prime(num):\n if num <= 3:\n return num > 1\n\n elif num % 2 == 0 or num % 3 == 0:\n return False\n\n increment_by = 6\n sq = sqrt(num)\n iterator = 5\n\n while iterator <= sq:\n if num % iterator == 0 or num % (iterator + 2) == 0:\n return False\n\n iterator += increment_by\n\n return True", "title": "" }, { "docid": "83f81d20d51ed7094432986edefee982", "score": "0.5193836", "text": "def n_prime(n: int) -> int:\n\n if n == 1:\n return 2\n\n to_test = 3\n prime_count = 1\n\n while prime_count <= n:\n if is_prime(to_test):\n prime_count += 1\n if prime_count == n:\n return to_test\n to_test += 2", "title": "" }, { "docid": "144aa0eaa0c7d57f22e2915d6e6df39a", "score": "0.519371", "text": "def largest_factor(n):\n from math import sqrt , floor\n n = abs(n) #takes care of negative numbers\n i, max_factor = floor(sqrt(n)),1\n while i < n:\n if(n%i == 0):\n max_factor = i\n i +=1\n return max_factor", "title": "" }, { "docid": "69e2e06645f86ea437a0f67a24b3d8db", "score": "0.5184875", "text": "def checkio(number):\r\n # Special case for one to keep the logic simpler (and zero, as an early out).\r\n if number <= 1:\r\n return number\r\n\r\n # Factor number into factors in the range [2, 9] (inclusive)\r\n factors = []\r\n for d in reversed(range(2, 10)):\r\n while number > 1 and not number % d:\r\n number //= d\r\n factors.append(d)\r\n\r\n # If the number has not been completely factored, it must have prime\r\n # factors larger than seven. In this case, it's impossible to factor\r\n # with digits, so return zero.\r\n if number > 1:\r\n return 0\r\n\r\n # Combine the factors back into a single integer.\r\n # Use reversed so that smaller digits come first.\r\n return functools.reduce(lambda a, b: 10 * a + b, reversed(factors))", "title": "" }, { "docid": "e8d22bebe9b6629a2c9538896e15600f", "score": "0.51832914", "text": "def _findBest(self, num):\n middle, num = num % 100, num//100\n top, num = num % 100, num//100\n left, num = num % 100, num//100\n right, num = num % 100, num//100\n bottom = num % 100\n d = {0:middle, 1:top, 2:left, 3:right, 4:bottom}\n return max(d, key=d.get)", "title": "" }, { "docid": "fbdfe87d85450dd325c8aecad4556677", "score": "0.5177789", "text": "def is_prime(num):\n if num == 2:\n return True\n if num < 2 or num % 2 == 0:\n return False\n for n in range(3, int(num**0.5)+2, 2):\n if num % n == 0:\n return False\n return True", "title": "" } ]
c7e2a524cd9adf6b89023ec0ed9c7d11
Remove the data directory path from filenames
[ { "docid": "ac198a68326fc904ff02d2da2f4406d2", "score": "0.7594078", "text": "def _remove_data_dir_path(self, inp=None):\n # import string\n # need to add a check in here to make sure data_dir path is actually in\n # the filename\n if inp is not None:\n split_str = os.path.join(self.data_path, '')\n return inp.apply(lambda x: x.split(split_str)[-1])\n\n #elif inp is not None:\n # \n # return inp.split(split_str)[-1]\n \n # match = os.path.join(self.data_path,'')\n # num = len(match)\n # return inp.apply(lambda x: x[num:])", "title": "" } ]
[ { "docid": "9ed69e657a7e06ac66df40d6f695f8aa", "score": "0.659365", "text": "def removePath(cls, filename):\n if filename is not None:\n filename = re.sub('^.*[/\\\\\\\\]', '', filename)\n\n return filename", "title": "" }, { "docid": "4bed96882207a7485d70f66061e3cc3a", "score": "0.64608777", "text": "def delete_loc_files(self):\n filenames = os.listdir(\"./data\")\n for f in filenames:\n os.remove(f)", "title": "" }, { "docid": "b2eeb844849d0f319c2012f5b957eda9", "score": "0.6420631", "text": "def __get_datafile_name(filename):\n return Path(__file__).parent / \"data/\" + filename", "title": "" }, { "docid": "c841db7b2abf5123dc453cbed40acb42", "score": "0.64035183", "text": "def _clean_off_path_if_needed(file_name_string) :\n \n return os.path.basename(file_name_string)", "title": "" }, { "docid": "d751df0850f6c449ae6f098bf080da15", "score": "0.6349965", "text": "def rm_data_file(file_name: str) -> None:\n file_path = _PROJECT_DIR / file_name\n rm_file(file_path)", "title": "" }, { "docid": "44341157d7ec1ea5d9dbcb2a99a0518a", "score": "0.6332306", "text": "def data_filename(self):\n return os.path.basename(self.data_path)", "title": "" }, { "docid": "1d56442a6cb217abecea0d2f99d6d9d9", "score": "0.6319476", "text": "def data_path(name):\n return path.join(DATA_DIR, name)", "title": "" }, { "docid": "45d0c2bf5c0c159a8481efd265d9dae3", "score": "0.6319422", "text": "def strip_path(path):\n filename = path[path.rfind(\"/\")+1:]\n return filename", "title": "" }, { "docid": "20507a77ab5315d3eaccfa963893f3a0", "score": "0.6319025", "text": "def clean_dir_name(name):\n return name.replace('/', '_')", "title": "" }, { "docid": "84b25148d98af0f32547d1169ec11af7", "score": "0.6237159", "text": "def clean_file_name(filename):\n if '/' in filename:\n filename = filename.split('/')[-1]\n return filename", "title": "" }, { "docid": "ea90dc3ac25b7cc74104e5b0c03e1ae1", "score": "0.6189689", "text": "def strip_filenames(descriptor):\n print(f\"strip filename from {str(descriptor.location)}\")\n if descriptor._field_data.has(descriptor, 'filename'): # lint-amnesty, pylint: disable=protected-access\n descriptor._field_data.delete(descriptor, 'filename') # lint-amnesty, pylint: disable=protected-access\n\n if hasattr(descriptor, 'xml_attributes'):\n if 'filename' in descriptor.xml_attributes:\n del descriptor.xml_attributes['filename']\n\n for child in descriptor.get_children():\n strip_filenames(child)\n\n descriptor.save()", "title": "" }, { "docid": "18099a01969567a8a07ddba64d69e708", "score": "0.61822116", "text": "def _clean_file_path(cls, path, basedir=None):\n\n if basedir is not None:\n path = os.path.relpath(path, basedir)\n\n return path.replace('\\\\', '/')", "title": "" }, { "docid": "1dfce3da52c9a9ea320c1864f42d0060", "score": "0.61817914", "text": "def strip_path(full_path):\n\n return os.path.basename(full_path)", "title": "" }, { "docid": "a09ad49e1e9ba72e04afdb50ac7998c7", "score": "0.6135805", "text": "def strip_dotslash_prefix(fname: str) -> str:\n return fname[2:] if fname.startswith(\"./\") else fname", "title": "" }, { "docid": "b22ba36041aa736dde5acf7587dd2c52", "score": "0.6128602", "text": "def cleanUpDataDir(tempDir, appName):\n import glob\n dataDir = os.path.join(os.path.dirname(__file__), '..', 'data')\n os.remove(os.path.join(dataDir, '{}.tar.gz'.format(appName)))\n for dataFile in glob.glob(os.path.join(tempDir, '*.data')):\n os.remove(dataFile)\n if os.path.isdir(os.path.join(tempDir, 'benchmark')):\n rmtree(os.path.join(tempDir, 'benchmark'))", "title": "" }, { "docid": "370134237333cbf70150a585d6a349fa", "score": "0.61257213", "text": "def full_filename(self, filename):\n if sep in filename:\n return filename\n else:\n return join(self.data_directory(), filename)", "title": "" }, { "docid": "370134237333cbf70150a585d6a349fa", "score": "0.61257213", "text": "def full_filename(self, filename):\n if sep in filename:\n return filename\n else:\n return join(self.data_directory(), filename)", "title": "" }, { "docid": "370134237333cbf70150a585d6a349fa", "score": "0.61257213", "text": "def full_filename(self, filename):\n if sep in filename:\n return filename\n else:\n return join(self.data_directory(), filename)", "title": "" }, { "docid": "61cd43d9b12e74e36780bb740fffd3ea", "score": "0.6122677", "text": "def remove_data_file(self, identifier, destination):\n filename = join(destination, '%s.json' % str(identifier))\n utils.remove_file_or_dir(filename)", "title": "" }, { "docid": "14e7497c9afb416bf7877d5fba2f2a67", "score": "0.60973686", "text": "def restructure_dataset(main_dir):\n\n for loc in sorted(os.listdir(main_dir)):\n if 'DS_Store' not in loc:\n loc_path = os.path.join(main_dir, loc)\n for year in sorted(os.listdir(loc_path)):\n old_path = os.path.join(loc_path, year)\n new_path = loc_path + \"_\" + year\n shutil.move(old_path, new_path)\n\n delete_empty_folders(main_dir)", "title": "" }, { "docid": "4c277d10d86626c82d967d30566bf463", "score": "0.6095823", "text": "def _remove_path_and_metadata(self, file_list, path=None):\n if path is None:\n path = self.path\n results = []\n for file_name in file_list:\n # remove path\n if file_name.startswith(path):\n file_name = file_name[len(path):]\n if file_name[0] == SEPARATOR:\n file_name = file_name[1:]\n # remove .meta.json files\n if file_name.endswith(METADATA_SUFFIX):\n continue\n else:\n results.append(file_name)\n return results", "title": "" }, { "docid": "1589f18c4e37e8cf84f7610c2d6aa4ba", "score": "0.6091854", "text": "def get_data_path(data_dir, data_fn):\n return '{}/{}'.format(data_dir, data_fn)", "title": "" }, { "docid": "97e77d632f3a532a259a4834082f41af", "score": "0.60664123", "text": "def clean_dir(dir_path='../data/raw/'):\n # Files/dirs to remove\n try:\n mac_crap = dir_path + '__MACOSX/'\n # Remove the __MACOSX/ subdirectory\n shutil.rmtree(mac_crap)\n except:\n pass\n # Remove .DS_store\n try:\n ds_store = dir_path + '.DS_Store'\n os.remove(ds_store)\n except:\n pass\n \n # Replace files not ending with csv with csv\n files = os.listdir(dir_path)\n to_replace = [dir_path + file for file in files if not file.endswith('.csv')]\n for file in to_replace:\n cleaned = file + '.csv'\n os.rename(file, cleaned)\n \n # Renames a small number of csv files not formatted like the others in consistent format\n to_format = [dir_path + file for file in files if '_' in file]\n for file in to_format:\n if 'ipynb' not in file:\n formatted = to_format[0].replace('_', '-')\n os.rename(file, formatted)\n pass", "title": "" }, { "docid": "9bc93e4c59db86b8711ac903d2edb159", "score": "0.60656464", "text": "def get_data_filename(filename):\n here = os.path.abspath(os.path.dirname(__file__))\n return os.path.join(here, 'data', filename)", "title": "" }, { "docid": "b76e3285d4a8636e03c1ae6262a254fd", "score": "0.6055473", "text": "def only_path(self):\n return self.file_path_and_name.replace(self.file_name, \"\")", "title": "" }, { "docid": "5a478c3122f00d41141b3dd5ed126212", "score": "0.60480285", "text": "def delete_in_local(self, data_name: str) -> None:\n for local_file_path in self.list_local_file_paths(data_name):\n os.remove(local_file_path)", "title": "" }, { "docid": "c007b5cc5598cf5cde3cc60e4dc85976", "score": "0.60065097", "text": "def getPdsFileName(filename,datadir):\n for f in os.listdir(datadir): \n if re.match(filename, f, re.IGNORECASE):\n realfilename = f \n return os.path.join(datadir,realfilename)", "title": "" }, { "docid": "d29ebbd2452138a89e2d6252dc1a0968", "score": "0.59988415", "text": "def rm(choose):\n preserv = os.listdir(\".\")\n print preserv\n if choose:\n for f in preserv:\n if f.startswith(\"dataset\") and os.path.isdir(f):\n continue\n else:\n call(\"rm -r %s\" % f, shell = True)", "title": "" }, { "docid": "721ddc037bf19d368a742b93a0025f21", "score": "0.5979687", "text": "def clean_up(filename):\n pattern = re.compile(filename + r'\\.*[0-9]*')\n for fname in os.listdir(os.getcwd()):\n if re.search(pattern, fname):\n os.remove(os.path.join(os.getcwd(), fname))", "title": "" }, { "docid": "4ba47fa70b999a1d8178ac75d53fc643", "score": "0.5969337", "text": "def _data_path(self, file_name):\n prefix = self.text_hash[:3]\n suffix = self.text_hash[3:]\n\n # Form the file path.\n return os.path.join(self.corpus, prefix, suffix, file_name)", "title": "" }, { "docid": "275a3032e6c5ec06f9ab9231f13a3a17", "score": "0.5946913", "text": "def removeSTDfiles(stddir):\n for file in os.listdir(stddir):\n if file.startswith(name): os.unlink(\"%s/%s\"%(stddir, file))", "title": "" }, { "docid": "275a3032e6c5ec06f9ab9231f13a3a17", "score": "0.5946913", "text": "def removeSTDfiles(stddir):\n for file in os.listdir(stddir):\n if file.startswith(name): os.unlink(\"%s/%s\"%(stddir, file))", "title": "" }, { "docid": "52a24fb2b33442044c1fff0ade0eafcd", "score": "0.5941517", "text": "def clean_up(dir_with_tests = \".\", postfix = \".json\"):\n for name in os.listdir(dir_with_tests):\n if name.endswith(postfix): \n file_or_dir_name = os.path.join(dir_with_tests, name)\n # we should process only files\n if os.path.isfile(file_or_dir_name):\n os.remove(file_or_dir_name)", "title": "" }, { "docid": "1b751098987268026dfd0529ae415e5f", "score": "0.59385693", "text": "def _make_data_path(*data_dir_files: str) -> PathLike:\n data_path = os.path.join(*data_dir_files)\n return data_path", "title": "" }, { "docid": "fe20c50de7cfb0e4d4292a3fd078c43e", "score": "0.59330004", "text": "def data_dir_path(self):\n return os.path.realpath(self.__dest_dirname)", "title": "" }, { "docid": "f80857abe3191f73b8d22c50452be51a", "score": "0.59280396", "text": "def clean_filename(pathin):\n folder, filename = os.path.split(pathin)\n fno, ext = os.path.splitext(filename)\n out = re.sub('\\W','_', fno)\n out = re.sub('_+', '_', out)\n if out[-1] == '_':\n out = out[0:-1]\n return os.path.join(folder, out+ext)", "title": "" }, { "docid": "6b1a75ba8e6eea6296425b27abe615d7", "score": "0.59265715", "text": "def pathToData(self, filename):\n global VTK_DATA_PATHS\n if not filename:\n return VTK_DATA_PATHS\n for path in VTK_DATA_PATHS:\n if filename == os.path.split(path)[-1]:\n return path\n return filename", "title": "" }, { "docid": "b30995b6c859d7627c9e80ed7de19bf7", "score": "0.59216624", "text": "def _clean_filename(self, filename):\n \n #get basename remove dir (dir could be kept as it is a relevant info)\n dir_name, the_basename = os.path.split(filename)\n \n the_basename, the_metadata = self._clean_dwd_filename(the_basename)\n \n #remove suffix\n name, ext = os.path.splitext(the_basename)\n \n if ext.lower() in ['.tmp','.temp']:\n return (dir_name, name, the_metadata)\n else:\n return (dir_name, the_basename, the_metadata )", "title": "" }, { "docid": "99b09e31d9ea505353be534448840a22", "score": "0.5920828", "text": "def get_data_directory():\n # get directory names\n app_directory=os.path.dirname(os.path.realpath(__file__))\n data_directory=\"{0}/{1}\".format(app_directory,\"../data/2.x_extended_info/\")\n\n return data_directory", "title": "" }, { "docid": "9486107787aaeeabb95cceaa7dee2b3a", "score": "0.59195733", "text": "def removeDirectory(self, path):", "title": "" }, { "docid": "0a043455632f5591e7b85e2a6700222f", "score": "0.59187055", "text": "def shorten_filename(self, fn):\r\n if fn.startswith(os.getcwd() + '/'):\r\n fn = fn[len(os.getcwd())+1:]\r\n return fn", "title": "" }, { "docid": "f3edf39f7982899aec0656c0be279009", "score": "0.59158796", "text": "def get_datafile_path(self, filename):\n return os.path.join(os.path.dirname(__file__), \"data\", filename)", "title": "" }, { "docid": "38fef691f42df01a9d94f486be57ea0c", "score": "0.5915075", "text": "def loadAndCleanData(dataDirPath):\n #load data from files\n for fileName in os.listdir(dataDirPath):\n print('Cleaning File: ', fileName)\n logging.debug(dataDirPath + fileName)\n lines = list(open(dataDirPath + fileName).readlines())\n lines = [s.strip() for s in lines]\n\n cleanAndWriteListToFile(fileName, lines)\n return", "title": "" }, { "docid": "63705a8dbf8c49351960959bc49fd6bd", "score": "0.59071326", "text": "def fix_paths(self, source_dir):\n\n filenames = re.findall('filename=\"([^\"]+)\"', self.content)\n\n for filename in filenames:\n if not os.path.isfile(filename):\n corrected_filename = PathFixer.find(\n os.path.basename(filename),\n source_dir)\n self.content = self.content.replace(\n filename,\n corrected_filename)", "title": "" }, { "docid": "12bfff03503319af05c494499ab0ad4c", "score": "0.59070873", "text": "def _data_path(filename: str) -> str:\n return os.path.join(os.getenv(U_FIN_DATA_BASE, 'data'), filename)", "title": "" }, { "docid": "57be03d0e32a38ba41a3881ddb37b50a", "score": "0.59034014", "text": "def get_data_path(self, filename):\n current_file_dir = os.path.dirname(__file__)\n return os.path.join(current_file_dir, \"..\", \"data\", filename)", "title": "" }, { "docid": "ca3d6c24c5f13d88b4365d92f3b68b6d", "score": "0.59013915", "text": "def get_data_path(self, *rel_path):\n data_dir = os.path.join(self.data_path, *rel_path)\n return os.path.normpath(data_dir)", "title": "" }, { "docid": "d71a6e473fb8847ece51624399a211bf", "score": "0.59012985", "text": "def StripPathToFile( path ):\n head, tail = ntpath.split(path)\n return tail or ntpath.basename(head)", "title": "" }, { "docid": "2a5fcf60d5c7185d5e8107ec4a7732e0", "score": "0.58860403", "text": "def cleanup(args, data_filename, model_filename):\n # TODO: clean up hierarchy file\n if not args.cleanup:\n return\n for filename in [data_filename, model_filename]:\n if filename is not None and os.path.exists(filename):\n os.remove(filename)", "title": "" }, { "docid": "d0eda751dea05585629f7145e613d6ed", "score": "0.58801067", "text": "def cleanupServiceFiles(name):\n for filename in os.listdir(SERVICE_FILES_DIR):\n if filename.startswith(name):\n path = os.path.join(SERVICE_FILES_DIR, filename)\n os.remove(path)", "title": "" }, { "docid": "8afa39140a73288c95071093ecff4177", "score": "0.5877645", "text": "def changeFilepath(imgpath):\n path = imgpath\n if(\"dashboard_data\" not in imgpath):\n path = imgpath.replace(\"dashboard\", \"dashboard_data\")\n return path", "title": "" }, { "docid": "15aecec1ed5139e22328a37ce11eeca3", "score": "0.5869039", "text": "def data_path(path_name):\n return path_name", "title": "" }, { "docid": "5a6842859e23e9c37bf2dbe7d2e81476", "score": "0.58663523", "text": "def remove(filename, space):", "title": "" }, { "docid": "1d300f1594075036c5e65a1da3456098", "score": "0.586168", "text": "def flatten_data(path):\n for root, subdirs, files in os.walk(path):\n for file in files:\n if(file != '.DS_Store'):\n shutil.move(root + '/' + file, path)", "title": "" }, { "docid": "4d794b7672e5a7ab25f9886a54654d34", "score": "0.58450025", "text": "def get_filenames(is_training, data_dir):\n cwd = os.getcwd()\n if is_training:\n return cwd + data_dir + \"train.tfrecords\"\n else:\n return cwd + data_dir + \"test.tfrecords\"", "title": "" }, { "docid": "01b929558d336485d43856359763ba0e", "score": "0.58370596", "text": "def _clean_path(self, path):\n if path.startswith(\"/\"):\n path = path[1:]\n return path", "title": "" }, { "docid": "a761e0b3fb1cc4b57731a041605b0a6f", "score": "0.5828664", "text": "def data_dir() -> str:\n this_dir = os.path.dirname(os.path.realpath(__file__))\n return os.path.join(this_dir, 'data')", "title": "" }, { "docid": "8973ba36514e372f105eb58a8b09fea9", "score": "0.58285326", "text": "def data_dir():\n parent = os.path.dirname(__file__)\n return os.path.join(parent, \"data\")", "title": "" }, { "docid": "4dc6097c71d6f05689bd3b7f16766add", "score": "0.58281606", "text": "def get_data_fname(**_):\n return get_data_dir() + data_file", "title": "" }, { "docid": "417fa258b097622f68bf2a4d65033ba4", "score": "0.58226585", "text": "def remove_data(writer: UFOWriter, filename: str) -> None:\n ...", "title": "" }, { "docid": "c0a7183513f3b63f2181b797a59194ab", "score": "0.58224434", "text": "def parse_file_name(data_file, remove_extension=False):\n file_name = str(data_file.encode('ascii', 'ignore')).strip().split('/')[-1]\n if remove_extension:\n file_name = strip_extension(file_name)\n\n return file_name", "title": "" }, { "docid": "db50eac6f9a5f6c508ee5f492bc3ed39", "score": "0.5820092", "text": "def files_for_cleanup(self):\n return [\n \"FlavRegList\",\n \"pwg*.top\",\n \"pwgseeds.dat\",\n \"pwgcounters*.dat\",\n \"pwgubsigma.dat\",\n \"pwhg_checklimits\"\n ]", "title": "" }, { "docid": "3d135c95a06b00a0e6fbde31912b0ac8", "score": "0.5810013", "text": "def safefilename(filename):\r\n for i in \"\\\\/:*?\\\"<>|$\":\r\n filename=filename.replace(i,\"_\")\r\n return filename", "title": "" }, { "docid": "10d3b0dc1f75fbc6f996d2e324dc3f11", "score": "0.580541", "text": "def default_dataset_root() -> str:\n return str(Path(__file__).resolve().parent / 'data')", "title": "" }, { "docid": "896ce5557b33d3b3469e7bbfb23e2998", "score": "0.5800133", "text": "def strip(self,fname):\n tname = os.path.basename(fname)\n tname = tname.split(\".\")\n tname[-1] = \"\"\n tname = \".\".join(tname)\n return tname", "title": "" }, { "docid": "0a9d0a9b13f949c4b60648a9b849d866", "score": "0.5798558", "text": "def cleanPath(self, filepath):\n\n if filepath.startswith(self.librarypath):\n # strip local part od the path\n filepath = filepath[len(self.librarypath) :]\n if filepath.startswith(RAWURL):\n filepath = filepath[len(RAWURL) :]\n filepath = filepath.replace(\"\\\\\", \"/\")\n if filepath.startswith(\"/\"):\n filepath = filepath[1:]\n filepath = urllib.parse.quote(filepath)\n return filepath", "title": "" }, { "docid": "dce96d0aa8c4867eb3095a8c696f1ab9", "score": "0.57909006", "text": "def _data_file_path(path):\n data_dir = current_app.config.get('DATA_DIR', '')\n return os.path.join(data_dir, path)", "title": "" }, { "docid": "6cb5012ffc1c0b216d99caa3a9201977", "score": "0.57908183", "text": "def fileroot(self):\n # https://stackoverflow.com/questions/2235173/\n # https://stackoverflow.com/a/2235762/5006\n fileroot, ext = os.path.splitext(self.name)\n return fileroot", "title": "" }, { "docid": "98a595a128aba59432a0aaf43013833e", "score": "0.57880735", "text": "def data_path(filename):\n data_dir = os.path.join(os.path.dirname(__file__), 'data')\n return os.path.join(data_dir, filename)", "title": "" }, { "docid": "1acfc870b59294f22084381303531cab", "score": "0.57834476", "text": "def get_data_folder() -> Path:\n return (pathlib.Path(__file__).parent.parent / \"data\")", "title": "" }, { "docid": "da72f95c499a9ef6da2b3dea3a57b546", "score": "0.57805556", "text": "def set_path(self):\n self.data_path = os.path.split(self.get_filepath())[0]", "title": "" }, { "docid": "9efda9c353b48084626fa4bc7e9fbf74", "score": "0.5779246", "text": "def get_data_file_name(file_name: str = \"q1.dat\") -> str:\n full_path = os.path.realpath(__file__)\n return os.path.dirname(full_path) + \"/\" + file_name", "title": "" }, { "docid": "fd8e01ed56821b9a5458a61a67fa0368", "score": "0.57770276", "text": "def remove_first_fwd_slash_of_file_path(apps, schema_editor):\n ProjectFiles = apps.get_model('polaaar', 'ProjectFiles')\n for pf in ProjectFiles.objects.all():\n path = pf.files.name\n if path.split('/')[0] == '':\n pf.files = path[1:]\n pf.save()\n else:\n pass", "title": "" }, { "docid": "7292f1e8f16b282a962552f816899eea", "score": "0.57766354", "text": "def dataset_root(self, dataset_id):\n return os.path.join(self.data_root, dataset_id.replace(\".\", \"/\"))", "title": "" }, { "docid": "e429bb5a04751a8455adff90391a1578", "score": "0.57731307", "text": "def clean_filename(name):\n\n return name.translate(r\"___________________________________________+_-._0123456789______@ABCDEFGHIJKLMNOPQRSTUVWXYZ______abcdefghijklmnopqrstuvwxyz_____________________________________________________________________________________________________________________________________\").replace('..', '__')[:64]", "title": "" }, { "docid": "06a61d2b358f80fdfe853a728171e831", "score": "0.5768795", "text": "def normalize_file_name(fn):\n return fn.replace('\\\\', '/')", "title": "" }, { "docid": "ef4ebe2151e9a01e1327b796e58e502f", "score": "0.576774", "text": "def clean_filename(src):\n src_dir, src_file = os.path.split(src)\n clean_src_file = re.sub('[\\(\\)\\s]', '', src_file)\n clean_src_path = os.path.join(src_dir, clean_src_file)\n try:\n os.rename(src, clean_src_path)\n except Exception as e:\n print(\"[ERROR] error renaming file: {}\".format(e))\n return clean_src_path", "title": "" }, { "docid": "9460ee26ca8134e5cc2f38c881fc4383", "score": "0.5764178", "text": "def get_data_path():\n dataset_dir = abspath(dirname(__file__))\n data_dir = join(dataset_dir, 'data')\n return data_dir", "title": "" }, { "docid": "54467da06dac7a2451f87d0dd90dc09a", "score": "0.5751561", "text": "def _get_file_path(filename=\"\"):\n\n return os.path.join(data_path, filename)", "title": "" }, { "docid": "cbc5fe32df31025ee2766599442c363b", "score": "0.57510847", "text": "def prep_dataset():\n\tfor root, dirs, files in os.walk('dataset'):\n\t\tprep_dataset = root.replace(\"dataset\", \"prep_dataset\")\n\t\tif not os.path.exists(prep_dataset):\n\t\t\tos.makedirs(prep_dataset)\n\n\t\tfor file in files:\n\t\t\t#if \"gitignore\" not in file:\n\t\t\t\tpath = os.path.join(root, file)\n\t\t\t\ttext = extract_text(path)\n\t\t\t\tif path.endswith(\".html\"):\n\t\t\t\t\t\tnew_path = \".\".join(path.split(\".\")[:-1])\n\t\t\t\telse:\n\t\t\t\t\t\tnew_path = path\n\t\t\t\tnew_path = new_path.replace(\"dataset\", \"prep_dataset\")+\".txt\"\n\t\t\t\twith open(new_path, 'w', encoding=\"utf-8\") as f:\n\t\t\t\t\tf.write(text)", "title": "" }, { "docid": "68989413fdad16ab24350c33c903e312", "score": "0.57435465", "text": "def strip_test_directory_prefix(file_in):\n prefix = libastyle.get_test_directory(True)\n start = len(prefix)\n if start > len(file_in):\n start = 0\n return file_in[start:]", "title": "" }, { "docid": "9021f2b5434ac1e32b9abccb8784fb6a", "score": "0.57344925", "text": "def remove_data(self):\n mysql_dirs = ['/var/lib/mysql', '/var/lib/mysql-files']\n if self._is_maria():\n mysql_dirs += ['/var/lib/mariadb']\n\n for cur_dir in mysql_dirs:\n if not os.path.exists(cur_dir):\n continue\n\n backup_dir = util.safe_new_dir(cur_dir)\n shutil.move(cur_dir, backup_dir)", "title": "" }, { "docid": "b940777a74673d3293475aba63ec2f29", "score": "0.5730443", "text": "def stripPath(pathList):\n return [os.path.basename(x) for x in pathList]", "title": "" }, { "docid": "d73fe23e50429eb69db7f7024b74312a", "score": "0.57302535", "text": "def sanitize_directory_path(uri: str) -> str:\n for i in [\"@\", \"/\", \":\"]:\n uri = uri.replace(i, \"_\")\n return uri", "title": "" }, { "docid": "e1b6cf7af4ea577158fba5dd184ab70f", "score": "0.57292086", "text": "def RemoveFile(filename):", "title": "" }, { "docid": "70233acaeeb77511bff386198a11fe65", "score": "0.5726718", "text": "def cleanWorkSpace():\n for fileName in glob.glob('*.zip') + glob.glob('*.csv'):\n os.remove(fileName)", "title": "" }, { "docid": "88860062813709c4d58afb9f5a6a3a85", "score": "0.57232606", "text": "def remove_files() -> None:\n for path in fnmatch.filter(os.listdir('.'), \"plams_workdir*\"):\n shutil.rmtree(path)\n for ext in (\"hdf5\", \"db\", \"lock\"):\n name = f\"cache.{ext}\"\n if os.path.exists(name):\n os.remove(name)", "title": "" }, { "docid": "dba8e20a689a976d2ee8fb98b724671b", "score": "0.57201856", "text": "def remove_data_file(self, data_file: DataFile):\n self.datafiles.remove(data_file)", "title": "" }, { "docid": "ce64cc410a648f529653cf68140c777e", "score": "0.5713314", "text": "def data_dir():\n return pathlib.Path('./data')", "title": "" }, { "docid": "e347eb993b60a118cd3254840c699b4a", "score": "0.570961", "text": "def getdatafilenames(self):\n dprefix, obsf = self.classify_files()\n filelist = glob.glob(self.dirname + '/' + dprefix + '*')\n filelist = [os.path.basename(x) for x in filelist]\n filelist.sort()\n return filelist", "title": "" }, { "docid": "119af94a363004c2bff21c4ed20bec73", "score": "0.5705964", "text": "def datafile(filename):\n return os.path.join(DATA_DIR, filename)", "title": "" }, { "docid": "c1b3f86d7072cac6ec8b35be64f43ec1", "score": "0.5703985", "text": "def add_testdata_prefix(filename):\n testdata_dir = os.path.join(os.path.dirname(__file__), 'testdata')\n return os.path.join(testdata_dir, filename)", "title": "" }, { "docid": "4dcbff4e1e4be0ad7e42a7a30f72edfb", "score": "0.5703782", "text": "def clean(self):\n for root, _, filenames in os.walk(self.outdir):\n for filename in fnmatch.filter(filenames, '*{}'.format(WORKING_EXTENSION)):\n os.remove(os.path.join(root, filename))", "title": "" }, { "docid": "8c28a522ce508eb7d6ff285d0d3dbc64", "score": "0.570303", "text": "def clean_files(files):\n\n return list(filter(lambda x: x[0] != '.', files))", "title": "" }, { "docid": "1c8a2b0aa4331ac985f56d7d66dbb896", "score": "0.56994605", "text": "def _get_prepped_data_file_names(self):\n if self._gcp_implementation:\n cursor = self._gcp_bucket.list_blobs(prefix=self.data_version_dir)\n all_files = [x.name for x in cursor]\n all_files = [x.split('/')[-1] for x in all_files]\n else:\n all_files = os.listdir(self.data_version_dir)\n data_files = [x for x in all_files if x.find('_data.csv') > -1]\n if 'market_index_data.csv' in data_files:\n data_files.remove('market_index_data.csv')\n self._prepped_data_files = data_files\n self._prepped_data_files.sort()", "title": "" }, { "docid": "dba9fa8d581a9a0f12eb02b2ea05566c", "score": "0.5698608", "text": "def dir_test_data():\n current_file_dir = os.dirname(__file__)\n return os.path_join(current_file_dir, \"data\")", "title": "" }, { "docid": "7beefc7e7b3a48282199ec9693ccd34b", "score": "0.56940156", "text": "def del_file_from_path(path):\n if path.find(\"/\") == -1:\n return \"\"\n\n path = path.split(\"/\")\n\n if path[-1].find(\".\"):\n del path[-1]\n\n if len(path) == 1 and not path[0]:\n return \"/\"\n\n return \"/\".join(path)", "title": "" }, { "docid": "821ba2e0d39afe7cfa4c42ba891e6969", "score": "0.5693015", "text": "def datafile(filename: str) -> Path:\n if Path(filename).exists():\n return filename\n PYMOL_TESTING_ROOT = 2\n # pymol-testing root\n pymol_test_dir = Path(__file__).parents[PYMOL_TESTING_ROOT]\n return pymol_test_dir.joinpath('data', filename)", "title": "" }, { "docid": "7a1febb860835102361a1349ba9cdcad", "score": "0.5686637", "text": "def get_data_path():\n cur_file_path = realpath(__file__)\n return dirname(cur_file_path)", "title": "" }, { "docid": "0363ccad29fb67b866720fa373e0db90", "score": "0.5684355", "text": "def _mutate_filename(self, file, source):\n stem = source.getinfo(file).stem\n suffixes = ''.join(source.getinfo(file).suffixes)\n counter = 1\n path = f'{self.dst_folder}/{stem}_{counter}{suffixes}'\n while self.service.exists(path):\n counter += 1\n path = f'{self.dst_folder}/{stem}_{counter}{suffixes}'\n return path", "title": "" } ]
903dac3023dc69dc9ed9e9cd1fcc9de7
Get an ITK image from the provided vtkImageData object. This image can be passed to ITK filters.
[ { "docid": "e2e17b5d2cc45c12d57af73581bd71cd", "score": "0.76533484", "text": "def convert_vtk_to_itk_image(vtk_image_data):\n\n # Save the VTKGlue optimization for later\n #------------------------------------------\n #itk_import = itk.VTKImageToImageFilter[image_type].New()\n #itk_import.SetInput(vtk_image_data)\n #itk_import.Update()\n #itk_image = itk_import.GetOutput()\n #itk_image.DisconnectPipeline()\n #------------------------------------------\n import itk\n\n # See if we need to cast to a wrapped type in ITK.\n src_type = vtk_image_data.GetScalarType()\n dst_type = vtk_cast_map()[src_type]\n if src_type != dst_type:\n import vtk\n caster = vtk.vtkImageCast()\n caster.SetOutputScalarType(dst_type)\n caster.ClampOverflowOn()\n caster.SetInputData(vtk_image_data)\n caster.Update()\n vtk_image_data = caster.GetOutput()\n\n array = get_array(vtk_image_data)\n\n image_type = _get_itk_image_type(vtk_image_data)\n itk_converter = itk.PyBuffer[image_type]\n itk_image = itk_converter.GetImageFromArray(array)\n\n return itk_image", "title": "" } ]
[ { "docid": "e176320f3b5143b9c18fcba06ba1327c", "score": "0.67150855", "text": "def _get_itk_image_type(vtk_image_data):\n image_type = None\n\n # Get the scalars\n pd = vtk_image_data.GetPointData()\n scalars = pd.GetScalars()\n\n vtk_class_name = scalars.GetClassName()\n\n try:\n image_type = vtk_itk_type_map()[vtk_class_name]\n except KeyError:\n raise Exception('No ITK type known for %s' % vtk_class_name)\n\n return image_type", "title": "" }, { "docid": "94f9a62d3f6e22889d98e6391ba8f869", "score": "0.6112409", "text": "def image_by_data_id(self, data_id):\n img = self._butler.get(self._imagedataset_type, dataId=data_id)\n return img", "title": "" }, { "docid": "52cb8d5f89fedb621f6b27d88440fa8a", "score": "0.6085374", "text": "def vtkImageDataToWxImage(data, sliceNumber = -1, startpos = None, endpos = None, ctf = None):\n\tif sliceNumber >= 0:\n\t\tdata = getSlice(data, sliceNumber, startpos, endpos)\n\tif ctf != None:\n\t\tdata = imageDataTo3Component(data, ctf)\n\texporter = vtk.vtkImageExport()\n\tdata.SetUpdateExtent(data.GetWholeExtent())\n\tdata.Update()\n\texporter.SetInputConnection(data.GetProducerPort())\n\tdataMemorySize = exporter.GetDataMemorySize()\n\tformatString = \"%ds\" % dataMemorySize\n\tstructString = struct.pack(formatString, \"\")\n\texporter.SetExportVoidPointer(structString)\n\texporter.Export()\n\twidth, height = data.GetDimensions()[0:2]\n\timage = wx.EmptyImage(width, height)\n\timage.SetData(structString)\n\treturn image", "title": "" }, { "docid": "5169c3214da0b108f8148bc32956a702", "score": "0.60587776", "text": "def add_vtk_array_from_itk_image(itk_image_data, vtk_image_data, name):\n\n itk_output_image_type = type(itk_image_data)\n\n # Save the VTKGlue optimization for later\n #------------------------------------------\n # Export the ITK image to a VTK image. No copying should take place.\n #export_filter = itk.ImageToVTKImageFilter[itk_output_image_type].New()\n #export_filter.SetInput(itk_image_data)\n #export_filter.Update()\n\n # Get scalars from the temporary image and copy them to the data set\n #result_image = export_filter.GetOutput()\n #filter_array = result_image.GetPointData().GetArray(0)\n\n # Make a new instance of the array that will stick around after this\n # filters in this script are garbage collected\n #new_array = filter_array.NewInstance()\n #new_array.DeepCopy(filter_array) # Should be able to shallow copy?\n #new_array.SetName(name)\n\n # Set a new point data array in the dataset\n #vtk_image_data.GetPointData().AddArray(new_array)\n #------------------------------------------\n import itk\n result = itk.PyBuffer[\n itk_output_image_type].GetArrayFromImage(itk_image_data)\n set_array(vtk_image_data, result)", "title": "" }, { "docid": "b707075bc49b4decbc288d43ce22f1b0", "score": "0.5546133", "text": "def getImageData(image):\r\n\r\n # Get the numpy array data based on the input object type\r\n if isinstance(image, np.ndarray):\r\n data = image\r\n elif hasattr(image, \"getMaskedImage\"):\r\n data = image.getMaskedImage().getImage().getArray() \r\n elif hasattr(image, \"getImage\"):\r\n data = image.getImage().getArray() \r\n else:\r\n data = image.getArray()\r\n\r\n # Return the data in numpy array\r\n return data", "title": "" }, { "docid": "872ed33c11dc918d7357e10782c29540", "score": "0.54868585", "text": "def _parse_data(self, data):\n inferrable_types = (np.ndarray, torch.Tensor, sitk.SimpleITK.Image)\n if isinstance(self.data, inferrable_types):\n image_type = infer_image_type(data)\n if image_type.is_2d:\n if image_type.is_vector:\n return VectorImage2D(data)\n return ScalarImage2D(data)\n return ScalarImage3D(data)\n elif isinstance(self.data, Image):\n return Image\n else:\n return data # dict or Sample entry is not an Image", "title": "" }, { "docid": "dc90e8ba81710d27d2e49d5d6939618d", "score": "0.5466823", "text": "def getAsImage(self):\n\t\tfilter = vtk.vtkWindowToImageFilter()\n\t\tfilter.SetInput(self.renWin)\n\t\tfilter.Update()\n\t\treturn filter.GetOutput()", "title": "" }, { "docid": "f604aa9ab30cdaf91e0b1f7d955c835d", "score": "0.5270071", "text": "def get_image(self, dataset_name, id):\n img_o = self.get_image_object(dataset_name, id)\n return img_o.image", "title": "" }, { "docid": "77cad436e36913b097e644c7a3d1bc75", "score": "0.519754", "text": "def get_image ( self, object, trait, index ):\r\n return self._result_for( 'get_image', object, trait, index )", "title": "" }, { "docid": "325d155648325cd1ee02c215e5f34f87", "score": "0.5183275", "text": "def vtkImageDataToPreviewBitmap(dataunit, timepoint, color, width = 0, height = 0, getvtkImage = 0): \n\timagedata = dataunit.getMIP(timepoint, None, small = 1, noColor = 1)\n\tvtkImg = imagedata\n\n\tif not color:\n\t\tcolor = dataunit.getColorTransferFunction()\n\t\n\tctf = getColorTransferFunction(color)\n\tminval,maxval = ctf.GetRange()\n\timax = imagedata.GetScalarRange()[1]\n\tif maxval > imax:\n\t\tstep = float((maxval-minval) / imax)\n\t\tctf2 = vtk.vtkColorTransferFunction()\n\t\tLogging.info(\"Creating CTF in range %d, %d with steps %d\"%(int(minval), int(maxval), int(step)))\n\t\tfor i in range(int(minval), int(maxval), int(step)):\n\t\t\tred, green, blue = ctf.GetColor(i)\n\t\t\tctf2.AddRGBPoint(i / step, red, green, blue)\n\t\tctf = ctf2\n\tmaptocolor = vtk.vtkImageMapToColors()\n\tmaptocolor.SetInputConnection(imagedata.GetProducerPort())\n\tmaptocolor.SetLookupTable(ctf)\n\tmaptocolor.SetOutputFormatToRGB()\n\tmaptocolor.Update()\n\timagedata = maptocolor.GetOutput()\n\t\n\timage = vtkImageDataToWxImage(imagedata)\n\txSize, ySize = image.GetWidth(), image.GetHeight()\n\tif not width and height:\n\t\taspect = float(xSize) / ySize\n\t\twidth = aspect * height\n\tif not height and width:\n\t\taspect = float(ySize) / xSize\n\t\theight = aspect * width\n\tif not width and not height:\n\t\twidth = height = 64\n\timage.Rescale(width, height)\n\t\n\tbitmap = image.ConvertToBitmap()\n\tret = [bitmap]\n\tif getvtkImage:\n\t\tret.append(vtkImg)\n\t\treturn ret\n\treturn bitmap", "title": "" }, { "docid": "4b2ce0382aed403fd289e505250c124c", "score": "0.5154381", "text": "def get_image_object(self, dataset_name, id):\n img_o = ImageModel.from_database_and_key(dataset_name, id)\n return img_o", "title": "" }, { "docid": "372a933a19b7e7ec9456bd358d836c3a", "score": "0.51325333", "text": "def image_from_array(ary):\n \n sz = ary.shape\n dims = len(sz)\n # create the vtk image data\n img = tvtk.ImageData()\n \n if dims == 2:\n # 1D array of pixels.\n img.whole_extent = (0, sz[0]-1, 0, 0, 0, 0)\n img.dimensions = sz[0], 1, 1 \n img.point_data.scalars = ary\n \n elif dims == 3:\n # 2D array of pixels.\n img.whole_extent = (0, sz[0]-1, 0, sz[1]-1, 0, 0)\n img.dimensions = sz[0], sz[1], 1\n \n # create a 2d view of the array\n ary_2d = ary[:] \n ary_2d.shape = sz[0]*sz[1],sz[2]\n img.point_data.scalars = ary_2d\n \n else:\n raise ValueError, \"ary must be 3 dimensional.\"\n \n return img", "title": "" }, { "docid": "99471075a23f9dff7baffadc4a3acf8e", "score": "0.51101404", "text": "def image(self):\n if self._image is not None:\n return self._image\n data = h5py.File(self._datafile, \"r\")\n image = data[\"image\"]\n if self.cache_data:\n self._image = image[()]\n return image", "title": "" }, { "docid": "bd5a989982504b53e71b5f5e2ce5d864", "score": "0.50960886", "text": "def vtk_itk_type_map():\n global _vtk_to_itk_types\n\n if _vtk_to_itk_types is None:\n import itk\n _vtk_to_itk_types = {}\n\n type_map = {\n 'vtkUnsignedCharArray': 'UC3',\n 'vtkCharArray': 'SC3',\n 'vtkUnsignedShortArray': 'US3',\n 'vtkShortArray': 'SS3',\n 'vtkUnsignedIntArray': 'UI3',\n 'vtkIntArray': 'SI3',\n 'vtkFloatArray': 'F3',\n 'vtkDoubleArray': 'D3'\n }\n\n for (vtk_type, image_type) in type_map.iteritems():\n try:\n _vtk_to_itk_types[vtk_type] = getattr(itk.Image, image_type)\n except AttributeError:\n pass\n\n return _vtk_to_itk_types", "title": "" }, { "docid": "a98146e147e99f2645be9efe322286b5", "score": "0.50927305", "text": "def get_image ( self, object, trait, row, column ):\r\n return self._result_for( 'get_image', object, trait, row, column )", "title": "" }, { "docid": "709dfc3275ea092860bb02e948dd7439", "score": "0.5039214", "text": "def cast(obj: 'itkLightObject') -> \"itkBinaryReconstructionByErosionImageFilterIUC2 *\":\n return _itkBinaryReconstructionByErosionImageFilterPython.itkBinaryReconstructionByErosionImageFilterIUC2_cast(obj)", "title": "" }, { "docid": "e042f4bbe5bcdaef7618c17c1286035d", "score": "0.503895", "text": "def to_weight_image(self, data):\n if data is None:\n data = Image2D1(x_size=self.shape[2],\n y_size=self.shape[1],\n z_size=self.shape[0],\n blanking_value=self.weight.blanking_value,\n dtype=self.weight_dtype)\n elif isinstance(data, np.ndarray):\n data = Image2D1(data=data,\n blanking_value=self.weight.blanking_value,\n dtype=self.weight_dtype)\n return data", "title": "" }, { "docid": "a8e65cb1f679bd0084dd969852dc8398", "score": "0.50367737", "text": "def get_image(my_image):\n\n if isinstance(my_image,np.ndarray):\n return my_image\n else:\n return cv.imread(my_image)", "title": "" }, { "docid": "e091ec4bf558d89c5651831958df304b", "score": "0.4992968", "text": "def image(self):\n return Image.open(self.im_path)", "title": "" }, { "docid": "98d0cc70f8aa9b441dd33971bf145794", "score": "0.4991818", "text": "def GetMarkerImage(self) -> \"itkImageUC2 *\":\n return _itkBinaryReconstructionByErosionImageFilterPython.itkBinaryReconstructionByErosionImageFilterIUC2_GetMarkerImage(self)", "title": "" }, { "docid": "8376bba6deb01abbe068a5c5a2e7ba99", "score": "0.49888748", "text": "def GetMarkerImage(self) -> \"itkImageUS2 *\":\n return _itkBinaryReconstructionByErosionImageFilterPython.itkBinaryReconstructionByErosionImageFilterIUS2_GetMarkerImage(self)", "title": "" }, { "docid": "562855a386bb4670946b30a22fda9773", "score": "0.49861875", "text": "def imagehandler(data, imagespec):\n checkmember(imagespec, list(imagespecs.keys()), \"unknown image specification\")\n atype, etype, mode = imagespecs[imagespec.lower()]\n with six.BytesIO(data) as stream:\n img = PIL.Image.open(stream)\n img.load()\n img = img.convert(mode.upper())\n if atype == \"pil\":\n return img\n elif atype == \"numpy\":\n result = np.asarray(img)\n checkmember(result.dtype, [np.uint8])\n if etype == \"uint8\":\n return result\n else:\n return result.astype(\"f\") / 255.0\n elif atype == \"torch\":\n import torch\n\n result = np.asarray(img)\n checkmember(result.dtype, [np.uint8])\n if etype == \"uint8\":\n result = np.array(result.transpose(2, 0, 1))\n return torch.tensor(result)\n else:\n result = np.array(result.transpose(2, 0, 1))\n return torch.tensor(result) / 255.0\n return None", "title": "" }, { "docid": "8324370799978cb054f4b7fb8cc4737e", "score": "0.49813467", "text": "def get_image_info_from_image(img_itk, info=None):\n parsing_tags = DEFAULT_DICOM_TAG.copy()\n if info is not None:\n parsing_tags.update(info)\n info_dict = {tag: None for tag in parsing_tags}\n assert isinstance(img_itk, sitk.Image), \"only supports itk image as input\"\n for tag, meta_key in parsing_tags.items():\n try:\n info_dict[tag] = img_itk.GetMetaData(meta_key).strip(\" \\n\")\n except Exception:\n info_dict[tag] = None\n return info_dict", "title": "" }, { "docid": "312a98ad221c7ccb4ad43118c0ce9f66", "score": "0.49802405", "text": "def get_image_data(self, data: np.ndarray) -> Dict[str, Any]:\n if data.shape[-self.dim :] != self.shape:\n raise ValueError(\n f\"Shape {data.shape} of the data array is not compatible with grid \"\n f\"shape {self.shape}\"\n )\n\n if self.dim == 2:\n image_data = data\n elif self.dim == 3:\n image_data = data[:, :, self.shape[-1] // 2]\n else:\n raise NotImplementedError(\n \"Creating images is only implemented for 2d and 3d grids\"\n )\n\n extent: List[float] = []\n for c in self.axes_bounds[:2]:\n extent.extend(c)\n\n return {\n \"data\": image_data,\n \"x\": self.axes_coords[0],\n \"y\": self.axes_coords[1],\n \"extent\": extent,\n \"label_x\": self.axes[0],\n \"label_y\": self.axes[1],\n }", "title": "" }, { "docid": "7750ee6cfe408b80e145febf5b103b21", "score": "0.49789563", "text": "def _image_from_numpy(self, img, im_size):\n # reconstruct the image by assigning the quantized pixels to the labels\n image_reconstructed = (img * 255).astype(np.uint8)\n image_reconstructed = image_reconstructed.reshape(im_size)\n\n return Image.fromarray(image_reconstructed)", "title": "" }, { "docid": "e82ae97e6b3ea3391e46d28d80bc64c9", "score": "0.49785423", "text": "def get_image(self, fileid):\n image = Image.open(fileid)\n return np.array(image)", "title": "" }, { "docid": "7f669dcf8dd1cc794fac2a9f82190029", "score": "0.4976028", "text": "def cast(obj: 'itkLightObject') -> \"itkWatershedImageFilterIUC2 *\":\n return _itkWatershedImageFilterPython.itkWatershedImageFilterIUC2_cast(obj)", "title": "" }, { "docid": "18a5a1f3ce9ff1a3e47d4dfa4ffad848", "score": "0.4958249", "text": "def get_image ( self, object ):\r\n return self.image", "title": "" }, { "docid": "b16a10b4ffdf57dc07e9168b61c9f737", "score": "0.4954975", "text": "def imageData(self):\n\t\t#return self.myImageLayer.data\n\t\treturn self.filtered", "title": "" }, { "docid": "6d362c5e44258fb6f21f67d93f506907", "score": "0.4946943", "text": "def get_image(self, key=\"floats\", to_chw=True):\n tensor_rdd = callZooFunc(self.bigdl_type, \"distributedImageSetToImageTensorRdd\",\n self.value, key, to_chw)\n return tensor_rdd.map(lambda tensor: tensor.to_ndarray())", "title": "" }, { "docid": "244a942083c6efb1180f3a3b3de72885", "score": "0.49421573", "text": "def cast(obj: 'itkLightObject') -> \"itkRelabelComponentImageFilterIUC2IUC2 *\":\n return _itkRelabelComponentImageFilterPython.itkRelabelComponentImageFilterIUC2IUC2_cast(obj)", "title": "" }, { "docid": "191ef62b47b8d670137099f1e3c1128f", "score": "0.49310863", "text": "def read_image(image_file_path, image_data=None):\n\tif image_data:\n\t\timg = Image.open(io.BytesIO(image_data))\n\telse:\n\t\timg = Image.open(image_file_path)\n\t\n\treturn img", "title": "" }, { "docid": "055477e4896b3611edd72ac110db464c", "score": "0.49305508", "text": "def _get_image(self):\n raw_image = self._env.render()\n if self._pil_image and raw_image.flags['C_CONTIGUOUS']:\n # Reuse the existing image. The input image should be C-contiguous. In\n # some environments, e.g. Proco, this may not be the case.\n self._pil_image.frombytes(raw_image)\n else:\n self._pil_image = PIL.Image.fromarray(raw_image)\n return raw_image, _encode_image(\n self._pil_image, format='JPEG', quality=self._quality)", "title": "" }, { "docid": "7fc0779eb68c3f9c52a2f60925aab842", "score": "0.4913484", "text": "def get_image(self, **kwargs):\n return self._img", "title": "" }, { "docid": "35d226e2abb9badbc06c591c3a0a431a", "score": "0.48913258", "text": "def read_image(image_data):\n image = Image.open(io.BytesIO(image_data))\n\n try:\n image = ImageOps.exif_transpose(image)\n except TypeError:\n # capture and ignore this bug:\n # https://github.com/python-pillow/Pillow/issues/3973\n pass\n\n return image", "title": "" }, { "docid": "37aebbdba69f24016fc78d9f84958c4f", "score": "0.48871672", "text": "def as_pil_image(self):\n return PIL.Image.fromarray(self)", "title": "" }, { "docid": "0dd381f2ed86da88a970c9228aba206a", "score": "0.48816454", "text": "def get_image(self, image_id):\n raise NotImplementedError", "title": "" }, { "docid": "62a7333e307f69fd22ec65168c1b9748", "score": "0.488031", "text": "def GetMarkerImage(self) -> \"itkImageUS3 *\":\n return _itkBinaryReconstructionByErosionImageFilterPython.itkBinaryReconstructionByErosionImageFilterIUS3_GetMarkerImage(self)", "title": "" }, { "docid": "c5610f41da296de28f4998431fe8f68d", "score": "0.48695844", "text": "def get_image(img): # NOQA E501\n assert(type(img) in [np.ndarray, str])\n if type(img) is np.ndarray:\n image_org = img.copy()\n image_org = image_org.astype(np.uint8)\n elif type(img) is str:\n print(\"Processing file: \", img)\n image_org = cv2.imread(img)\n return image_org", "title": "" }, { "docid": "2e7a733bd6c539c1e4c6c17b914fa46b", "score": "0.48671815", "text": "def get_image_for_id(viva_path, dataset, image_id):\n fname = os.path.join(viva_path, '{}/pos/{}.png'.format(dataset, image_id))\n\n with open(fname, 'rb') as in_file:\n data = in_file.read()\n # Use of encoding based on: https://github.com/h5py/h5py/issues/745\n return np.fromstring(data, dtype='uint8')", "title": "" }, { "docid": "475a50a5e860304fb36ef3287a368909", "score": "0.4865261", "text": "def decode_image(self, image_data):\n image = self._session.run(self._decode,\n feed_dict={self._decode_data: image_data})\n if len(image.shape) != 3 or image.shape[2] not in (1, 3):\n raise ValueError('The image channels not supported.')\n\n return image", "title": "" }, { "docid": "d7bf2706d2ec0fc1b6d779ef7eca8697", "score": "0.48598766", "text": "def image(obj):\n return match(obj, types.image)", "title": "" }, { "docid": "bc0db4939bf82a506d8a3d7c73812884", "score": "0.48595163", "text": "def from_image(klass, img, data=None, coordmap=None, metadata=None):\n if data is None:\n data = img._data\n if coordmap is None:\n coordmap = copy(img.coordmap)\n if metadata is None:\n metadata = copy(img.metadata)\n return klass(data, coordmap, metadata)", "title": "" }, { "docid": "9d29ccdbb77ddb6748c02875a267bc8e", "score": "0.4853076", "text": "def take_image(self):\n image = Image.open(self.image)\n return image", "title": "" }, { "docid": "70608a566b5129c437867dc961999d4f", "score": "0.48442367", "text": "def cast(obj: 'itkLightObject') -> \"itkHoughTransform2DCirclesImageFilterUCULF *\":\n return _itkHoughTransform2DCirclesImageFilterPython.itkHoughTransform2DCirclesImageFilterUCULF_cast(obj)", "title": "" }, { "docid": "5b897d105ec05e9edd97fd68a5fbcfbf", "score": "0.48359346", "text": "def cast(obj: 'itkLightObject') -> \"itkRelabelComponentImageFilterIUC3IUC3 *\":\n return _itkRelabelComponentImageFilterPython.itkRelabelComponentImageFilterIUC3IUC3_cast(obj)", "title": "" }, { "docid": "25bc1645f7c886c03a0333de95c56106", "score": "0.48351336", "text": "def image_as_array_or_pil(self) -> Union[np.ndarray, Image.Image]:\n return self.image", "title": "" }, { "docid": "0b224fb69ddef94facd12f5a9151a2af", "score": "0.48223898", "text": "def prepare_image(image_data):\n output_image = image_data\n if len(image_data.shape) > 2:\n output_image = cv2.cvtColor(image_data, cv2.COLOR_RGB2GRAY)\n if output_image.dtype != np.uint8:\n if 0.99 < np.max(output_image) <= 1.001:\n output_image = 255 * output_image\n output_image = np.asarray(output_image, dtype=np.uint8)\n return output_image", "title": "" }, { "docid": "adec0ed7304d035a1559b1ba85f80e65", "score": "0.4820401", "text": "def read_image():\n image_file = pyto.io.ImageIO()\n image_file.read(file=image_file_name)\n image = pyto.segmentation.Grey(data=image_file.data)\n return image", "title": "" }, { "docid": "6cf9eb0d16bd9e84a80749dfa0dc625b", "score": "0.48172036", "text": "def itkWatershedImageFilterIUC2_cast(obj: 'itkLightObject') -> \"itkWatershedImageFilterIUC2 *\":\n return _itkWatershedImageFilterPython.itkWatershedImageFilterIUC2_cast(obj)", "title": "" }, { "docid": "79576d3da8b55924c46143892a71c70f", "score": "0.48161685", "text": "def decode_image(self, image_data):\n image = self._session.run(self._decode,\n feed_dict={self._decode_data: image_data})\n if len(image.shape) != 3 or image.shape[2] not in (1, 3):\n raise ValueError('The image channels not supported.')\n\n return image", "title": "" }, { "docid": "47955fc1edc29551936993f72dc319cf", "score": "0.4815708", "text": "def GetMarkerImage(self) -> \"itkImageUC3 *\":\n return _itkBinaryReconstructionByErosionImageFilterPython.itkBinaryReconstructionByErosionImageFilterIUC3_GetMarkerImage(self)", "title": "" }, { "docid": "5188111b67333e5af384686a98efb19f", "score": "0.48080644", "text": "def __load_image(self, case):\n # load NIFTI image\n img_name = case + self.img_suffix\n img_path = os.path.join(self.data_path, img_name)\n image_nib = nib.load(img_path)\n\n return image_nib.get_fdata() # To numpy", "title": "" }, { "docid": "24b4963650b832d5f5f28deea938ac56", "score": "0.48065895", "text": "def cast(obj: 'itkLightObject') -> \"itkImagePCAShapeModelEstimatorIUC2IF2 *\":\n return _itkImagePCAShapeModelEstimatorPython.itkImagePCAShapeModelEstimatorIUC2IF2_cast(obj)", "title": "" }, { "docid": "3d9dffafcd5e0ea2e0744e6268828568", "score": "0.48037592", "text": "def write_image_as_vtk(image, name):\n print(\"... Saving Image to {:s}.vti\".format(name))\n a = numpy_support.numpy_to_vtk(sitk.GetArrayFromImage(image).ravel(),\n deep=True, array_type=vtk.VTK_FLOAT)\n vtk_img = vtk.vtkImageData()\n vtk_img.SetOrigin(image.GetOrigin())\n vtk_img.SetSpacing(image.GetSpacing())\n vtk_img.SetDimensions(image.GetSize())\n vtk_img.GetPointData().SetScalars(a)\n writer = vtk.vtkXMLImageDataWriter()\n writer.SetFileName(\"{:s}.vti\".format(name))\n writer.SetInputData(vtk_img)\n writer.Write()", "title": "" }, { "docid": "0ac868a265d6e687d5ef4875246f3173", "score": "0.4802272", "text": "def get_img(self):\n img = self.retrieveBuffer()\n img = img.getData()\n img = img.reshape(VID_DIM)\n return img", "title": "" }, { "docid": "79fcef29e3b08e90e56c9210212fe93b", "score": "0.47999004", "text": "def cast(obj: 'itkLightObject') -> \"itkBinaryReconstructionByErosionImageFilterIUS2 *\":\n return _itkBinaryReconstructionByErosionImageFilterPython.itkBinaryReconstructionByErosionImageFilterIUS2_cast(obj)", "title": "" }, { "docid": "c6e59bac6b55f0c62d722426022f34cf", "score": "0.479705", "text": "def vtkImageDataToPngString(data, sliceNumber = -1, startpos = None, endpos = None): \n\tif sliceNumber >= 0:\n\t\tdata = getSlice(data, sliceNumber, startpos, endpos)\n\t\t\n\tpngwriter = vtk.vtkPNGWriter()\n\tpngwriter.WriteToMemoryOn()\n\tpngwriter.SetInputConnection(data.GetProducerPort())\n\tpngwriter.Write()\n\tresult = pngwriter.GetResult()\n\tdata = \"\"\n\tfor i in range(result.GetNumberOfTuples()):\n\t\tdata += chr(result.GetValue(i))\n\treturn data", "title": "" }, { "docid": "c8604f57578844e7ff4a8b5eae93b9c8", "score": "0.47891173", "text": "def load_image(self, image_id):\n # Load image\n image = skimage.io.imread(self.image_info[image_id]['path'])\n # If grayscale. Convert to RGB for consistency.\n if image.ndim != 3:\n image = skimage.color.gray2rgb(image)\n return image", "title": "" }, { "docid": "bde7e7db8ac8cf76e4233b1367949215", "score": "0.47848296", "text": "def getImageData(self, imagetype):\n if not isinstance(imagetype, str):\n raise TypeError, \"Invalid image type: \" + str(imagetype)\n if imagetype == 'BMP':\n return self.__bmpdata\n elif imagetype == 'WMF':\n return self.__wmfdata\n else:\n raise ValueError, \"Unexpected image type: \" + imagetype", "title": "" }, { "docid": "ec3f774ceab7c1ced5eb7be2ca178e27", "score": "0.4783326", "text": "def cast(obj: 'itkLightObject') -> \"itkRelabelComponentImageFilterIUS2IUS2 *\":\n return _itkRelabelComponentImageFilterPython.itkRelabelComponentImageFilterIUS2IUS2_cast(obj)", "title": "" }, { "docid": "ea47a96be102d4cedbc98e3a3c4f9dfe", "score": "0.47814986", "text": "def get_pillow_img(imgbytes):\r\n return Image.open(BytesIO(imgbytes))", "title": "" }, { "docid": "654a3fa1c643406da722731f075284fc", "score": "0.47660065", "text": "def preprocess(self, sensor_data):\n image = self.bridge.imgmsg_to_cv2(sensor_data.images[0])\n return image", "title": "" }, { "docid": "58c375e359304aa30e90609652cadd50", "score": "0.47635078", "text": "def cast(obj: 'itkLightObject') -> \"itkBinaryReconstructionByErosionImageFilterIUC3 *\":\n return _itkBinaryReconstructionByErosionImageFilterPython.itkBinaryReconstructionByErosionImageFilterIUC3_cast(obj)", "title": "" }, { "docid": "aca89db01dd94b0ee79a72ab863ec51f", "score": "0.47558805", "text": "def GetInput(self, *args):\n return _ITKLabelMapBasePython.itkImageToImageFilterLM2IRGBUS2_GetInput(self, *args)", "title": "" }, { "docid": "626b3e66c58eaa6d4e691954f79614e5", "score": "0.47552317", "text": "def get_image(self):\n return self.img", "title": "" }, { "docid": "be578b1176d93437472535415e3dc58e", "score": "0.4747122", "text": "def get_image(self, key=\"floats\", to_chw=True):\n return self.image_set.get_image(key, to_chw)", "title": "" }, { "docid": "1326dc3a0b6ce5409d7d086385ba4241", "score": "0.47447252", "text": "def husk2imgDF(husk_obj):\n\n data = []\n for i in range(len(husk_obj.imgs)):\n data.append(husk_obj.imgs[i].flatten())\n \n return np.array(data)", "title": "" }, { "docid": "79ba7def0120daf0b8d0d3cc14aa70ac", "score": "0.47370225", "text": "def cast(obj: 'itkLightObject') -> \"itkHoughTransform2DCirclesImageFilterUSULF *\":\n return _itkHoughTransform2DCirclesImageFilterPython.itkHoughTransform2DCirclesImageFilterUSULF_cast(obj)", "title": "" }, { "docid": "00b25a1aaaec711d07441b4dd133263b", "score": "0.473427", "text": "def getImageObject(self, path):\n return self.b.ImageObject(path)", "title": "" }, { "docid": "2e448440c32c8839ac306ee3e936b483", "score": "0.47329572", "text": "def get_image(self):\n return pil2array(self.pil)", "title": "" }, { "docid": "eb55840b3c07e8cf29dbb7e3e3f440f0", "score": "0.47229186", "text": "def GetInput(self, *args):\n return _ITKLabelMapBasePython.itkImageToImageFilterIUC2LM2_GetInput(self, *args)", "title": "" }, { "docid": "1a2618f08354de955ffd3d242cfe3da3", "score": "0.4714866", "text": "def load_image(self, image_id):\r\n info = self.image_info[image_id]\r\n img_path = info['path']\r\n image = skimage.io.imread(img_path)\r\n return image", "title": "" }, { "docid": "66290afa8069fcdc2ae6344833a4f9e8", "score": "0.47145873", "text": "def get_image(data, n, w, h):\n start_idx = n * w * h\n end_idx = (n + 1) * w * h\n\n return data[start_idx:end_idx]", "title": "" }, { "docid": "61071d0755fd4765779a47e9001b9196", "score": "0.47097254", "text": "def get_image(self, patient_id):\n try:\n dcm_data = pydicom.read_file(f\"{TRAIN_DIR}/{patient_id}.dcm\")\n img = dcm_data.pixel_array\n return img\n except:\n pass", "title": "" }, { "docid": "6ab2db1c72185b488fca1e97b7d949ac", "score": "0.46991593", "text": "def cast(obj: 'itkLightObject') -> \"itkVotingBinaryIterativeHoleFillingImageFilterIUC2 *\":\n return _itkVotingBinaryIterativeHoleFillingImageFilterPython.itkVotingBinaryIterativeHoleFillingImageFilterIUC2_cast(obj)", "title": "" }, { "docid": "de3616ba3a4660b7caa2fa38ef9ac84e", "score": "0.46915495", "text": "def cast(obj: 'itkLightObject') -> \"itkRelabelComponentImageFilterIULL2IUC2 *\":\n return _itkRelabelComponentImageFilterPython.itkRelabelComponentImageFilterIULL2IUC2_cast(obj)", "title": "" }, { "docid": "6bdee45c2f859cb93458f51d001427d6", "score": "0.46833837", "text": "def cast(obj: 'itkLightObject') -> \"itkRelabelComponentImageFilterIULL2IUS2 *\":\n return _itkRelabelComponentImageFilterPython.itkRelabelComponentImageFilterIULL2IUS2_cast(obj)", "title": "" }, { "docid": "1b3a1f506042e3d35c1521b523499640", "score": "0.46730322", "text": "def load_image(inpFilename):\n imageO = Image.open(inpFilename)\n imageO.load()\n data = np.asarray(imageO, dtype=\"int32\")\n return data", "title": "" }, { "docid": "75100ff4e5edbf24a52d7f69f5c64044", "score": "0.4669794", "text": "def as_pil_image(self):\n try:\n bio = BytesIO()\n self._extract_direct(stream=bio)\n bio.seek(0)\n return Image.open(bio)\n except NotExtractableError:\n pass\n\n im = self._extract_transcoded()\n if not im:\n raise UnsupportedImageTypeError(repr(self))\n\n return im", "title": "" }, { "docid": "e08b12a649158cbc308a69b20d40e5a9", "score": "0.46616626", "text": "def GetInput(self, *args):\n return _ITKLabelMapBasePython.itkImageToImageFilterLM2IUC2_GetInput(self, *args)", "title": "" }, { "docid": "dfb670b6d56adba16dbae44f03379528", "score": "0.46587318", "text": "def _data_to_img(data, shape):\n\n return np.reshape(np.sum(data, 0), shape, 'F')", "title": "" }, { "docid": "c4a7b10f58f2125ccfa1be04ab0e2219", "score": "0.46581674", "text": "def getData(resource):\n img = mpimg.imread(resource)\n\n return img", "title": "" }, { "docid": "241b7af7c404660891e2851aa89a9c6a", "score": "0.4653962", "text": "def cmyk_to_rgb(self, image_data: bytes) -> tf.Tensor:\n return self._sess.run(\n self._cmyk_to_rgb, feed_dict={self._cmyk_data: image_data}\n )", "title": "" }, { "docid": "671337884f35fcac05da4fe25cb353b8", "score": "0.46517116", "text": "def GetMaskImage(self) -> \"itkImageUC2 *\":\n return _itkBinaryReconstructionByErosionImageFilterPython.itkBinaryReconstructionByErosionImageFilterIUC2_GetMaskImage(self)", "title": "" }, { "docid": "d24f963e200ee06e063d7f124792d0c0", "score": "0.46431747", "text": "def datum(self) -> ImageDatum:\n pass", "title": "" }, { "docid": "478636a1be2c6bb278a51198d1a58ef2", "score": "0.46416092", "text": "def get_processed_data(img, img_size):\n img = img.resize((img_size, img_size), resample=Image.ANTIALIAS)\n img_data = np.asarray(img, dtype=np.uint8).reshape(img_size, img_size, 1)\n img_data = img_data / 255.\n return img_data", "title": "" }, { "docid": "9713e4bcbda3caabeea904c99a2dad2b", "score": "0.4641561", "text": "def cast(obj: 'itkLightObject') -> \"itkRelabelComponentImageFilterIUS3IUS3 *\":\n return _itkRelabelComponentImageFilterPython.itkRelabelComponentImageFilterIUS3IUS3_cast(obj)", "title": "" }, { "docid": "849ec4089c66eb53a0cbde2bf10b7cb0", "score": "0.46392527", "text": "def image_obj(self) -> DicomImage:\n response_pix_dicom_image = self.osirix_service.DCMPixDicomImage(self.osirixrpc_uid)\n return DicomImage(response_pix_dicom_image, self.osirix_service)", "title": "" }, { "docid": "f43452970d7359195eb538aab3a2ec14", "score": "0.46373433", "text": "def cast(obj: 'itkLightObject') -> \"itkFFTShiftImageFilterIUC2IUC2 *\":\n return _itkFFTShiftImageFilterPython.itkFFTShiftImageFilterIUC2IUC2_cast(obj)", "title": "" }, { "docid": "bdd642ec0d9dc765e085d3924d517c20", "score": "0.46333078", "text": "def imageInput(request, fieldId, error=None, fieldData=None):\n # Recover the requested die image and its corresponding die\n dieField = get_object_or_404(TypedDie, id=fieldId)\n di = dieField.dieImage\n d = di.die\n\n # Populate the form with the raw data from the previous submit if there was an error\n if error and fieldData:\n form = MonkeyTyperForm(instance=dieField, initial={'typedField': fieldData})\n else:\n form = MonkeyTyperForm(instance=dieField)\n\n # Display the input page\n context = {\n 'die': d,\n 'dieImage': di,\n 'typedDie': dieField,\n 'form' : form,\n 'error' : error,\n }\n return render(request, 'typer/imageInput.html', context)", "title": "" }, { "docid": "a2c69e860870501c435f7a8eedca9080", "score": "0.46274912", "text": "def GetMaskImage(self) -> \"itkImageUS2 *\":\n return _itkBinaryReconstructionByErosionImageFilterPython.itkBinaryReconstructionByErosionImageFilterIUS2_GetMaskImage(self)", "title": "" }, { "docid": "fd117d48f76141aa816f7d3a39e3e892", "score": "0.4622808", "text": "def getInfoImage(infotag):\n raise NotImplementedError", "title": "" }, { "docid": "da7dce7fb8801a4a5a4bcf1cdf9646c1", "score": "0.46226832", "text": "def image(self):\n return self._image", "title": "" }, { "docid": "da7dce7fb8801a4a5a4bcf1cdf9646c1", "score": "0.46226832", "text": "def image(self):\n return self._image", "title": "" }, { "docid": "34edcb7e2d48e6d3feb86363ba1f94cd", "score": "0.46221504", "text": "def get_image(image):\n img_open = gdal.Open(image)\n img_array = img_open.GetRasterBand(1).ReadAsArray()\n return img_array", "title": "" }, { "docid": "d8f8224a5483fd586f1696843c515a62", "score": "0.46171257", "text": "def to_image(self):\n return self.GetInput()", "title": "" }, { "docid": "29d65446bebd7762384103ef596db8a2", "score": "0.46103534", "text": "def cast(obj: 'itkLightObject') -> \"itkSliceBySliceImageFilterIUC3IUC3 *\":\n return _itkSliceBySliceImageFilterPython.itkSliceBySliceImageFilterIUC3IUC3_cast(obj)", "title": "" }, { "docid": "3ca682808e727ace3d5be257fbb50ff9", "score": "0.46076113", "text": "def cast(obj: 'itkLightObject') -> \"itkRelabelComponentImageFilterIULL3IUC3 *\":\n return _itkRelabelComponentImageFilterPython.itkRelabelComponentImageFilterIULL3IUC3_cast(obj)", "title": "" }, { "docid": "6df080d007f3b3ed26f07df8cb4efc9e", "score": "0.46070728", "text": "def GetInput(self, *args):\n return _ITKLabelMapBasePython.itkImageToImageFilterIUL2LM2_GetInput(self, *args)", "title": "" } ]
7253bba8b309753fbdfab01a69fb5fa9
This function loads data into staging_tables
[ { "docid": "b32976dea2fe82570a2bba3336c153bb", "score": "0.7094328", "text": "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n try:\n # Execute query and commit the results\n cur.execute(query)\n conn.commit()\n except Exception as ex:\n print(f\"Exception {str(ex)} occured while loading staging table {query}\")", "title": "" } ]
[ { "docid": "e238c7a932e2d6af5f2a180e2566e45b", "score": "0.77354974", "text": "def load_data_into_table(client, staging_table):\n\n dir_name = 'ad_data'\n for filename in os.listdir(dir_name):\n rows = []\n logging.info(f'Reading file: {filename}')\n with jsonlines.open(f'{dir_name}/{filename}') as reader:\n for obj in reader:\n rows.append(obj)\n\n # There's may be a json at the end with no rows so don't try upload\n # that as it'll cause an error\n if rows:\n logging.info(f'Uploading file: {filename}')\n errors = client.insert_rows(staging_table, rows)\n\n # Ignore errors. There enough data being inserted for them to not be\n # significant.\n # if errors:\n # raise Exception(\n # f'Errors found when loading data into staging table: {errors}'\n # )", "title": "" }, { "docid": "3590ab8e7770719f1c99c1ba25e272ea", "score": "0.7571734", "text": "def load_staging_tables(cur, conn):\n print(\"Loading staging tables...\")\n for query in copy_table_queries:\n cur.execute(query)\n conn.commit()", "title": "" }, { "docid": "392848daabe41ca23ac6e60c9e09dcab", "score": "0.751043", "text": "def load_staging_tables(cur, conn):\n \n #Listing table names and initiate index\n staging_tables = ['staging_events', 'staging_songs']\n index = 0\n \n #Iterate through queries to load staging tables\n for query in copy_table_queries: \n print('Copying staging table {}'.format(staging_tables[index]))\n index += 1\n cur.execute(query)\n conn.commit()", "title": "" }, { "docid": "f37ce6bb1652c8877aafe580b815ac7a", "score": "0.734206", "text": "def load_staging_tables(cur, conn):\n cnt = 0\n for query in copy_table_queries:\n print(\"Copying data into {}...\".format(copy_table_order[cnt]))\n cur.execute(query)\n conn.commit()\n cnt = cnt + 1\n print(\"Loading Completed!\")", "title": "" }, { "docid": "727207e646d7f602bb4942115a4a2d14", "score": "0.7307328", "text": "def create_and_load_staging_table(**kwargs):\n\n client = bigquery.Client()\n\n dataset = client.dataset(kwargs['params']['dataset_name'])\n staging_table = create_staging_table(\n client,\n dataset,\n kwargs['params']['staging_table_name'],\n )\n load_data_into_table(client, staging_table)", "title": "" }, { "docid": "f0c2b6ee84d5865a313eacd38ada2689", "score": "0.7301916", "text": "def load_staging_tables(cur, conn): \n for query in copy_table_queries:\n cur.execute(query)\n conn.commit()", "title": "" }, { "docid": "cf87639424267de38ca492c69e69a1f1", "score": "0.7221655", "text": "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n cur.execute(query)\n conn.commit()", "title": "" }, { "docid": "cf87639424267de38ca492c69e69a1f1", "score": "0.7221655", "text": "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n cur.execute(query)\n conn.commit()", "title": "" }, { "docid": "cf87639424267de38ca492c69e69a1f1", "score": "0.7221655", "text": "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n cur.execute(query)\n conn.commit()", "title": "" }, { "docid": "cf87639424267de38ca492c69e69a1f1", "score": "0.7221655", "text": "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n cur.execute(query)\n conn.commit()", "title": "" }, { "docid": "cf87639424267de38ca492c69e69a1f1", "score": "0.7221655", "text": "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n cur.execute(query)\n conn.commit()", "title": "" }, { "docid": "cf87639424267de38ca492c69e69a1f1", "score": "0.7221655", "text": "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n cur.execute(query)\n conn.commit()", "title": "" }, { "docid": "09d96632eedca446a453378245ea03e5", "score": "0.71831083", "text": "def load_staging_tables(cur, conn):\n\n for query in copy_table_queries:\n cur.execute(query)\n conn.commit()", "title": "" }, { "docid": "5034d356171fae951c78886fa767971d", "score": "0.7148097", "text": "def load_staging_tables(cur, conn):\n ITEM = 0\n for query in copy_table_queries:\n print(\"Copying data into {}...\".format(copy_table_order[ITEM]))\n cur.execute(query)\n conn.commit()\n ITEM = ITEM + 1\n print(f\"Done loading item nr. {ITEM}\")", "title": "" }, { "docid": "14ee48b41cc04afecdfbd1065bac82fb", "score": "0.6989783", "text": "def load_staging_tables(cur, conn):\n LOGGER.info(\"Running staging queries\")\n for query in copy_table_queries:\n LOGGER.info(\"Running query: %s\" %query)\n cur.execute(query)\n conn.commit()", "title": "" }, { "docid": "098061a8be0b3238cd6e3b507f20b0d0", "score": "0.69746774", "text": "def load_staging_tables(cur, conn, query_params):\n for query, params in list(zip(copy_table_queries, query_params)):\n cur.execute(query.format(*params))\n conn.commit()", "title": "" }, { "docid": "198a22c097abbc0c6bebae85d46c11b8", "score": "0.6833821", "text": "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n table_name = re.findall(r\"COPY\\ (.+?)\\ from\", query)\n try:\n cur.execute(query)\n conn.commit()\n print(\"'{}' COPY Successful...!!!\".format(table_name[0]))\n except psycopg2.Error as e:\n print(\"Error----->\", e)", "title": "" }, { "docid": "965f2f85fb046f0c4822548b87a81f1d", "score": "0.67303824", "text": "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n try:\n cur.execute(query)\n conn.commit()\n except Exception as exc:\n print('Unexpected error running copy query: {} {}'.format(query, exc))\n cur.execute('rollback')", "title": "" }, { "docid": "e8e0d1b4d80c2b0a5380241e313fd864", "score": "0.6639422", "text": "def read_data():\n # get the database conenctino refrence\n engine = get_db_connection()\n\n lookup_tbl_df = pd.read_csv(lookup_file_name)\n trip_data_df = pd.read_csv(data_file_name)\n trip_data_df = trip_data_df.head(100000)\n\n # store the data to staging tables\n lookup_tbl_df.to_sql('stg_lookup_data', engine, ifexist=\"replace\")\n\n # store the data to staging tables\n trip_data_df.to_sql('stg_rtip_data', engine, ifexist=\"replace\")", "title": "" }, { "docid": "2ef9e82757f5af05424791f33e859ab4", "score": "0.6628652", "text": "def load_staging_tables(cur, conn, bucket_config):\n for idx, query in enumerate(copy_table_queries):\n print(f'Executing copy query: {query.format(*bucket_config[idx])}')\n try:\n cur.execute(query.format(*bucket_config[idx]))\n conn.commit()\n except Exception as err:\n print_copy_diagnostics(cur, conn, err)\n raise\n print('Completed insert query.')", "title": "" }, { "docid": "45ce159fa957d62b7440f645824aa598", "score": "0.65068203", "text": "def load_data_to_glue_database(self):\n for table_name, data_paths in self.meta_and_files.items():\n out_path = os.path.join(\n self.database_path,\n table_name,\n f\"release={self.release}\",\n f\"{table_name}_{self.release}.parquet.snappy\",\n )\n read_csv_write_to_parquet(\n data_paths[\"data_path\"], out_path, data_paths[\"meta_path\"]\n )", "title": "" }, { "docid": "18f01ab68795729375dada890953f525", "score": "0.6370444", "text": "def get_ads_and_upload_to_bq_staging(**kwargs):\n get_and_save_ad_data()\n create_and_load_staging_table(**kwargs)", "title": "" }, { "docid": "38854df3878cce210446dc235361992c", "score": "0.6325147", "text": "def loadData(data, tablename, **kwargs):\n conn = psycopg2.connect(user='postgres', password='MIDSw210SDOH', host='35.199.151.123',port='5432')\n cur = conn.cursor()\n\n if tablename == 'SDOH_Model_User_Tweet_History':\n insert_query = 'INSERT INTO SDOH_Model_User_Tweet_History (handle, tweet_text, tweet_id, tweet_datetime) VALUES %s'\n rows = []\n pkeys = []\n cur.execute('SELECT tweet_id FROM %s' % tablename)\n current_pkeys = cur.fetchall()\n current_pkeys_list = [k[0] for k in current_pkeys]\n print len(data)\n for recent_tweet in data:\n if (str(recent_tweet['id']) in current_pkeys_list) or (str(recent_tweet['id']) in pkeys):\n pass\n else:\n handle = recent_tweet['user']['screen_name']\n tweet_text = recent_tweet['text']\n tweet_id = recent_tweet['id']\n tweet_timestamp = recent_tweet['created_at']\n rows.append((handle, tweet_text, tweet_id, tweet_timestamp))\n pkeys.append(tweet_id)\n try:\n print '%s rows to insert...chunking into 100s...' % len(rows)\n i = 0\n while i < len(rows):\n if i + 10 > len(rows):\n psycopg2.extras.execute_values(cur, insert_query, rows[i:len(rows)])\n conn.commit()\n print 'inserted row %s to %s.' % (i, len(rows))\n print [r[2] for r in rows[i:len(rows)]]\n else:\n psycopg2.extras.execute_values(cur, insert_query, rows[i:i+10])\n conn.commit()\n print 'inserted row %s to %s.' % (i, i+9)\n print [r[2] for r in rows[i:i+10]]\n i += 10\n cur.close()\n print 'data loaded to %s.' % tablename\n except Exception as e:\n print e\n elif tablename == 'SDOH_Model_User':\n insert_query = 'INSERT INTO SDOH_Model_User (handle, marker_tweet, marker_tweet_id, marker_tweet_datetime, search_phrase, sdoh_model, label) VALUES %s'\n rows = []\n pkeys = []\n cur.execute('SELECT marker_tweet_id FROM %s;' % tablename)\n current_pkeys = cur.fetchall()\n current_pkeys_list = [k[0] for k in current_pkeys]\n for user_info in data:\n if (str(user_info['marker_tweet_id']) in current_pkeys_list) or (user_info['marker_tweet_id'] in pkeys):\n pass\n\n else:\n handle = user_info['username']\n marker_tweet = user_info['marker_tweet']\n marker_tweet_id = user_info['marker_tweet_id']\n if user_info['marker_tweet_date']:\n marker_tweet_datetime = datetime.datetime.strptime(user_info['marker_tweet_date'], \"%H:%M %p - %d %b %Y\")\n else:\n marker_tweet_datetime = None\n label = user_info['label']\n search_phrase = user_info['key_phrase']\n sdoh_model = kwargs['sdoh']\n rows.append((handle, marker_tweet, marker_tweet_id, marker_tweet_datetime, search_phrase, sdoh_model, label))\n pkeys.append(marker_tweet_id)\n try:\n print '%s rows to insert...chunking into 100s...' % len(rows)\n i = 0\n while i < len(rows):\n if i + 100 > len(rows):\n psycopg2.extras.execute_values(cur, insert_query, rows[i:len(rows)])\n conn.commit()\n print 'inserted row %s to %s.' % (i, len(rows))\n else:\n psycopg2.extras.execute_values(cur, insert_query, rows[i:i+100])\n conn.commit()\n print 'inserted row %s to %s.' % (i, i+99)\n i += 100\n cur.close()\n print 'data loaded to %s.' % tablename\n except Exception as e:\n print e\n elif tablename == 'SDOH_Model_User_Profile_Detail':\n insert_query = 'INSERT INTO SDOH_Model_User_Profile_Detail (handle, latitude, longitude, gender, follower_count, favorites_count, friends_count, bot_likelihood) VALUES %s'\n rows = []\n pkeys = []\n cur.execute('SELECT handle FROM %s' % tablename)\n current_pkeys = cur.fetchall()\n current_pkeys_list = [k[0] for k in current_pkeys]\n for twitter_profile in data:\n if twitter_profile['screen_name'] in current_pkeys_list or twitter_profile['screen_name'] in pkeys:\n pass\n else:\n handle = twitter_profile['screen_name']\n latitude = twitter_profile['latitude']\n longitude = twitter_profile['longitude']\n gender = twitter_profile['gender']\n follower_count = twitter_profile['followers_count']\n favorites_count = twitter_profile['favourites_count']\n friends_count = twitter_profile['friends_count']\n bot_likelihood = twitter_profile['bot_likelihood']\n rows.append((handle, latitude, longitude, gender, follower_count, favorites_count, friends_count, bot_likelihood))\n pkeys.append(handle)\n try:\n print '%s rows to insert...chunking into 100s...' % len(rows)\n i = 0\n while i < len(rows):\n if i + 100 > len(rows):\n psycopg2.extras.execute_values(cur, insert_query, rows[i:len(rows)])\n conn.commit()\n print 'inserted row %s to %s.' % (i, len(rows))\n else:\n psycopg2.extras.execute_values(cur, insert_query, rows[i:i+100])\n conn.commit()\n print 'inserted row %s to %s.' % (i, i+99)\n i += 100\n cur.close()\n print 'data loaded to %s.' % tablename\n except Exception as e:\n print e\n elif tablename == 'Disease_Subject_User':\n insert_query = 'INSERT INTO Disease_Subject_User (handle, marker_tweet, marker_tweet_id, search_phrase, disease_population, marker_tweet_datetime) VALUES %s'\n rows = []\n pkeys = []\n cur.execute('SELECT marker_tweet_id FROM %s' % tablename)\n current_pkeys = cur.fetchall()\n current_pkeys_list = [k[0] for k in current_pkeys]\n for user_info in data:\n if (str(user_info['marker_tweet_id']) in current_pkeys_list) or (user_info['marker_tweet_id'] in pkeys):\n pass\n else:\n handle = user_info['username']\n marker_tweet = user_info['marker_tweet']\n marker_tweet_id = user_info['marker_tweet_id']\n search_phrase = user_info['key_phrase']\n disease_population = kwargs['disease_population']\n if user_info['marker_tweet_date']:\n marker_tweet_datetime = datetime.datetime.strptime(user_info['marker_tweet_date'], \"%H:%M %p - %d %b %Y\")\n else:\n marker_tweet_datetime = None\n rows.append((handle, marker_tweet, marker_tweet_id, search_phrase, disease_population, marker_tweet_datetime))\n pkeys.append(marker_tweet_id)\n try:\n print '%s rows to insert...chunking into 100s...' % len(rows)\n i = 0\n while i < len(rows):\n if i + 100 > len(rows):\n psycopg2.extras.execute_values(cur, insert_query, rows[i:len(rows)])\n conn.commit()\n print 'inserted row %s to %s.' % (i, len(rows))\n else:\n psycopg2.extras.execute_values(cur, insert_query, rows[i:i+100])\n conn.commit()\n print 'inserted row %s to %s.' % (i, i+99)\n i += 100\n cur.close()\n print 'data loaded to %s.' % tablename\n except Exception as e:\n print e\n elif tablename == 'Disease_Subject_User_Profile_Detail':\n insert_query = 'INSERT INTO Disease_Subject_User_Profile_Detail (handle, latitude, longitude, gender, follower_count, favorites_count, friends_count, bot_likelihood) VALUES %s'\n rows = []\n pkeys = []\n cur.execute('SELECT handle FROM %s' % tablename)\n current_pkeys = cur.fetchall()\n current_pkeys_list = [k[0] for k in current_pkeys]\n for twitter_profile in data:\n if twitter_profile['screen_name'] in current_pkeys_list or twitter_profile['screen_name'] in pkeys:\n pass\n else:\n handle = twitter_profile['screen_name']\n latitude = twitter_profile['latitude']\n longitude = twitter_profile['longitude']\n gender = twitter_profile['gender']\n follower_count = twitter_profile['followers_count']\n favorites_count = twitter_profile['favourites_count']\n friends_count = twitter_profile['friends_count']\n bot_likelihood = twitter_profile['bot_likelihood']\n rows.append((handle, latitude, longitude, gender, follower_count, favorites_count, friends_count, bot_likelihood))\n pkeys.append(handle)\n try:\n print '%s rows to insert...chunking into 100s...' % len(rows)\n i = 0\n while i < len(rows):\n if i + 100 > len(rows):\n psycopg2.extras.execute_values(cur, insert_query, rows[i:len(rows)])\n conn.commit()\n print 'inserted row %s to %s.' % (i, len(rows))\n else:\n psycopg2.extras.execute_values(cur, insert_query, rows[i:i+100])\n conn.commit()\n print 'inserted row %s to %s.' % (i, i+99)\n i += 100\n cur.close()\n print 'data loaded to %s.' % tablename\n except Exception as e:\n print e\n elif tablename == 'Disease_Subject_User_Tweet_History':\n insert_query = 'INSERT INTO Disease_Subject_User_Tweet_History (handle, tweet_text, tweet_id, tweet_datetime) VALUES %s'\n rows = []\n pkeys = []\n cur.execute('SELECT tweet_id FROM %s' % tablename)\n current_pkeys = cur.fetchall()\n current_pkeys_list = [k[0] for k in current_pkeys]\n for recent_tweet in data:\n if (str(recent_tweet['id']) in current_pkeys_list) or (str(recent_tweet['id']) in pkeys):\n pass\n else:\n handle = recent_tweet['user']['screen_name']\n tweet_text = recent_tweet['text']\n tweet_id = recent_tweet['id']\n tweet_timestamp = recent_tweet['created_at']\n rows.append((handle, tweet_text, tweet_id, tweet_timestamp))\n pkeys.append(tweet_id)\n try:\n print '%s rows to insert...chunking into 100s...' % len(rows)\n i = 0\n while i < len(rows):\n if i + 100 > len(rows):\n psycopg2.extras.execute_values(cur, insert_query, rows[i:len(rows)])\n conn.commit()\n print 'inserted row %s to %s.' % (i, len(rows))\n else:\n psycopg2.extras.execute_values(cur, insert_query, rows[i:i+100])\n conn.commit()\n print 'inserted row %s to %s.' % (i, i+99)\n i += 100\n cur.close()\n print 'data loaded to %s.' % tablename\n except Exception as e:\n print e", "title": "" }, { "docid": "4e82a0e3d31501098655df331260fb3b", "score": "0.6206383", "text": "def LoadDataset(self, source_bucket, tables, dataset=None):\n raise NotImplementedError", "title": "" }, { "docid": "e54f680fdec82f292aaaf0c474ccd376", "score": "0.61846745", "text": "def create_full_load_yml(schemaInfo,create_schema):\n\n make_tab_dir()\n yml_file = get_yml_file()\n dsn = conf[\"dsn\"]\n\n etl_db = EtlStatusDb(conf['etl_status_dsn'])\n\n final_tab_loc = final_tab_external_loc()\n logging.info(\"Full Table being loaded from %s\"%final_tab_loc)\n\n pattern=\"part*\" if final_tab_loc.endswith(\"_sqoop\") else \"*\"\n\n task_hash = {}\n task_hash[\"start_task\"] = { \"class\" : \"NopTask\" }\n\n tab = target_table_name()\n\n teradata_dsn = conf.get('teradata_dsn', None)\n teradata_final_schema = conf.get('teradata_final_schema', None)\n\n targets = get_targets()\n current_task = \"start_task\"\n\n for (target_name, target_dsn, target_schema, table_prefix,target_staging_schema,target_view_schema) in targets:\n #finale table name\n td_table_name = get_string_shortener().shorten(table_prefix + tab)\n #staging table name\n full_stg_table_name = \"stg_\"+td_table_name\n stg_table_name = full_stg_table_name[0:30]\n stg_schema = target_staging_schema\n view_schema = target_view_schema\n\n new_task = \"check_if_stg_table_exist\" + target_name\n task_hash[new_task] = check_if_table_exist(current_task, schemaInfo, target_dsn, stg_schema, stg_table_name,'true')\n current_task = new_task\n\n new_task = \"check_column_count_in_stg_table\" + target_name\n task_hash[new_task] = check_column_count(current_task, schemaInfo, target_dsn, stg_schema, stg_table_name,'true')\n current_task = new_task\n\n new_task = \"create_stg_teradata_table\" + target_name\n task_hash[new_task] = create_teradata_table(current_task, schemaInfo, target_dsn, stg_schema, stg_table_name)\n current_task = new_task\n\n new_task = \"delete_stg_teradata_tab_\" + target_name\n task_hash[new_task] = delete_teradata_table(current_task, schemaInfo, target_dsn, stg_schema, stg_table_name)\n current_task = new_task\n\n new_task = \"load_teradata_stg_tab_\" + target_name\n task_hash[new_task] = loadFromHDFS_task(current_task, schemaInfo, final_tab_loc, target_dsn, stg_schema, stg_table_name, target_name, pattern)\n current_task = new_task\n\n new_task = \"check_column_count_in_final_table\" + target_name\n task_hash[new_task] = check_column_count(current_task, schemaInfo, target_dsn, target_schema, td_table_name,create_schema)\n current_task = new_task\n\n new_task = \"check_if_td_table_exist\" + target_name\n task_hash[new_task] = check_if_table_exist(current_task, schemaInfo, target_dsn, target_schema, td_table_name,create_schema)\n current_task = new_task\n\n new_task = \"create_teradata_tab_\" + target_name\n task_hash[new_task] = create_teradata_table(current_task, schemaInfo, target_dsn, target_schema, td_table_name,view_schema)\n current_task = new_task\n\n new_task = \"load_final_teradata_table_\" + target_name\n task_hash[new_task] = load_final_teradata_table(current_task, schemaInfo, target_dsn, target_schema, td_table_name,stg_schema,stg_table_name)\n current_task = new_task\n\n task_hash[\"end_task\"] = { \"class\" : \"NopTask\" ,\n \"dependencies\": [ current_task ] }\n\n task_hash['settings'] = { \"parallelism\" : conf['dop'], \"wf_cleanup\" : 0 }\n\n logging.info(\"yml_file: %s\" % yml_file)\n with open(yml_file, \"w\") as fp:\n yml = yaml_util.dump(task_hash )\n print >>fp, yml", "title": "" }, { "docid": "7b774fad1ad860181f8229728225f0c6", "score": "0.611004", "text": "def _data_load(self):\n if self.bulk_load:\n self._bulkinsertdep()\n else:\n self._insertdep()", "title": "" }, { "docid": "d6d50a9d0aec40a56501e3f7b4ee9171", "score": "0.5982655", "text": "def load_data_to_bq(self):\n # for newest data\n bigquery_client = bigquery.Client(CONFIG['project'])\n # print(bigquery_client)\n destination_dataset_ref = bigquery_client.dataset(CONFIG['dataset'])\n destination_table_ref = destination_dataset_ref.table(self.table_destination + '$' + self.date_nodash)\n job_config = bigquery.LoadJobConfig()\n job_config.create_disposition = bigquery.CreateDisposition.CREATE_IF_NEEDED\n job_config.write_disposition = bigquery.WriteDisposition.WRITE_TRUNCATE\n job_config.source_format = bigquery.SourceFormat.NEWLINE_DELIMITED_JSON\n #using partition by ingestion Time\n job_config.time_partitioning = bigquery.TimePartitioning(type_=bigquery.TimePartitioningType.DAY)\n \n with open(self.path, 'rb') as f:\n job = bigquery_client.load_table_from_file(f, destination_table_ref, job_config=job_config)\n job.result()\n print('----->>>> '+self.path+' has success to load!')\n os.remove(self.path)", "title": "" }, { "docid": "cc8bec7fa5b2a72aa44d44df981bb178", "score": "0.59596705", "text": "def loadTables(self):\n pass", "title": "" }, { "docid": "2e1fecf8c43d6cae0e792ccf26f2679b", "score": "0.5920159", "text": "def main():\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CREDENTIALS'].values()))\n cur = conn.cursor()\n \n start = timer()\n load_staging_tables(cur, conn)\n end = timer()\n \n print(\"Loading the staging tables took {} seconds to run\".format((end-start)))\n\n insert_tables(cur, conn)\n print(\"ETL complete\")\n\n conn.close()", "title": "" }, { "docid": "00a0fd906cdae52fa26fafacf6e7862b", "score": "0.5888173", "text": "def ingest(dbname, filename, dt, lt, cname, stname):\n db = dbio.connect(dbname)\n cur = db.cursor()\n schemaname, tablename = stname.split(\".\")\n cur.execute(\n \"select * from information_schema.tables where table_schema='{0}' and table_name='{1}'\".format(schemaname, tablename))\n if not bool(cur.rowcount):\n cur.execute(\"create table {0}.{1} (rid serial not null primary key, fdate date, tercile text, leadtime int, rast raster)\".format(\n schemaname, tablename))\n db.commit()\n cur.execute(\"select * from {0} where fdate='{1}' and tercile = '{2}' and leadtime = {3}\".format(stname, dt.strftime(\"%Y-%m-%d\"), cname, lt))\n if bool(cur.rowcount):\n cur.execute(\"delete from {0} where fdate='{1}' and tercile = '{2}' and leadtime = {3}\".format(stname, dt.strftime(\"%Y-%m-%d\"), cname, lt))\n db.commit()\n dbio.ingest(dbname, filename, dt, stname, False, False)\n sql = \"update {0} set tercile = '{1}' where tercile is null\".format(\n stname, cname)\n cur.execute(sql)\n sql = \"update {0} set leadtime = '{1}' where leadtime is null\".format(\n stname, lt)\n cur.execute(sql)\n db.commit()\n cur.close()", "title": "" }, { "docid": "9fc92dcce864682373194c512ff49a10", "score": "0.58740985", "text": "def load_data(self):\n\n for k in self.table_names.copy():\n try:\n self.tables[k] = pd.read_csv(\n os.path.join(self.save_dir, self.prfx + f\"{k}.csv\")\n )\n except FileNotFoundError:\n self.table_names.remove(k)", "title": "" }, { "docid": "480018e8218012f9ab89cd3c1a5bd594", "score": "0.58454496", "text": "def readData(self):\n\tfrom tools import mytables\n\tself.data = mytables.load(self.inputFile)", "title": "" }, { "docid": "866b7b03c2575cf0b15bf5ead2487df4", "score": "0.57435966", "text": "def load_data(input_tuple: Tuple) -> None:\n db_conn_string, table_class, records = input_tuple\n engine = create_engine(db_conn_string)\n Session = sessionmaker(bind=engine)\n sess = Session()\n\n sess.bulk_insert_mappings(table_class, records)\n count = sess.query(table_class).count()\n\n sess.commit()\n sess.close()", "title": "" }, { "docid": "5efe77e7d2109de2685592fac4b414d7", "score": "0.5691603", "text": "def load_tables(table_points, table_regions, table_annotations):\n points = pd.read_csv(table_points, header=0).astype(np.int)\n regions = pd.read_csv(table_regions, header=0)\n annotations = pd.read_csv(table_annotations, header=0)\n return points, regions, annotations", "title": "" }, { "docid": "f3114efa1cf864ae4463834410a9c3f7", "score": "0.56613195", "text": "def load_data() -> List[Tuple]:\n return db_query(f\"SELECT * FROM {table_name};\")", "title": "" }, { "docid": "8418aa461650ab527e160f07e6abc846", "score": "0.5659005", "text": "def load_data(data):", "title": "" }, { "docid": "46cb926c15cc69aac48b0081beba46ea", "score": "0.56585556", "text": "def import_table_data(table_importer, datasource, output_sytype,\n parameters, manage_input):\n dspath = datasource.decode_path()\n importer = table_importer(dspath, parameters)\n if importer.is_table():\n # This is a special case where the Table file should be\n # copied into the platform.\n in_datafile = table.File(filename=dspath, mode='r')\n output_sytype.source(in_datafile)\n manage_input(dspath, in_datafile)\n else:\n importer.import_data(output_sytype, parameters)\n if not output_sytype.get_name():\n output_sytype.set_name(os.path.splitext(\n os.path.basename(dspath))[0])", "title": "" }, { "docid": "8d6fa12ada927c165a702a4820735cf9", "score": "0.56340736", "text": "def pump_table(self, bucket, source_schema, target_schema, source_creds,\n target_creds, source_table, target_table, delimiter=None):\n\n if delimiter is None:\n delimiter = '|'\n\n self.unload_to_s3(bucket, source_schema, source_table, source_creds, delimiter)\n self.copy_from_s3(bucket, target_schema, target_table, target_creds, delimiter)", "title": "" }, { "docid": "dc4132dceba1f4313f49918a240a8f32", "score": "0.56073904", "text": "def execute(self, context):\n credentials = AwsHook(self.aws_credentials_id).get_credentials()\n self.log.info(\"Creating Redshift Connection\")\n redshift = PostgresHook(postgres_conn_id=self.redshift_conn_id)\n self.log.info(\"Redshift Connection Established\")\n\n if self.create_table_query:\n self.log.info(\n f\"Creating Target Table If Not Exists: {self.target_table}\"\n )\n redshift.run(self.create_table_query)\n\n if self.truncate:\n self.log.warning(\"Deleting Data From Table: {self.target_table}\")\n redshift.run(f\"DELETE FROM {self.target_table}\")\n\n # Backfill a specific date\n self.execution_date = context.get('execution_date')\n if self.use_partitioned_data == 'true' and self.execution_date:\n data_s3_path = \"{BASE_PATH}\".format(\n BASE_PATH=self.s3_path,\n YEAR=self.execution_date.strftime(\"%Y\"),\n MONTH=self.execution_date.strftime(\"%m\"),\n DAY=self.execution_date.strftime(\"%d\")\n )\n else:\n self.log.info(f\"Execution date: {self.execution_date}\")\n data_s3_path = self.s3_path\n\n self.log.info(f\"Loading Data From: {data_s3_path}\")\n\n if self.data_type == 'json':\n if not self.json_path:\n s3_json_path = \"auto\"\n else:\n s3_json_path = self.json_path\n self.log.info(f\"Using the JSON Path Parser: {s3_json_path}\")\n\n staging_sql = StageToRedshiftOperator.copy_sql.format(\n TABLE=self.target_table,\n S3_URI=data_s3_path,\n ACCESS_ID=credentials.access_key,\n ACCESS_KEY=credentials.secret_key,\n JSON_PARSER_PATH=s3_json_path\n )\n elif self.data_type == 'parquet':\n staging_sql = StageToRedshiftOperator.copy_parquet.format(\n TABLE=self.target_table,\n S3_URI=data_s3_path,\n IAM_ROLE=self.iam_role\n )\n elif self.data_type == 'csv':\n staging_sql = StageToRedshiftOperator.copy_csv.format(\n TABLE=self.target_table,\n S3_URI=data_s3_path,\n ACCESS_ID=credentials.access_key,\n ACCESS_KEY=credentials.secret_key\n )\n\n self.log.info(\"Initiating Data Loading\")\n redshift.run(staging_sql)\n self.log.info(\"Redshift COPY Finished\")", "title": "" }, { "docid": "c6cd984b3c329464f1739c0e87924fcd", "score": "0.559714", "text": "def import_growapp_data(dt_from=None, dt_to=None):\n success = True\n # always query the whole crop type table - it will be small\n logging.info(\"Querying Growapp crop table\")\n croptype_df = get_croptype_data()\n success &= write_new_data(croptype_df, CropTypeClass)\n if success:\n logging.info(\"Successfully wrote to CropType table\")\n else:\n logging.info(\"Problem writing to CropType table\")\n logging.info(\"Querying Growapp batch table\")\n batch_df = get_batch_data(dt_from, dt_to)\n success &= write_new_data(batch_df, BatchClass)\n if success:\n logging.info(\"Successfully wrote to Batch table\")\n else:\n logging.info(\"Problem writing to Batch table\")\n logging.info(\"Querying Growapp batch event table\")\n batchevent_df = get_batchevent_data(dt_from, dt_to)\n success &= write_new_data(batchevent_df, BatchEventClass)\n if success:\n logging.info(\"Successfully wrote to BatchEvent table\")\n else:\n logging.info(\"Problem writing to BatchEvent table\")\n logging.info(\"Querying Growapp batch table to get harvest data\")\n harvest_df = get_harvest_data(dt_from, dt_to)\n success &= write_new_data(harvest_df, HarvestClass)\n if success:\n logging.info(\"Successfully wrote to Harvest table\")\n else:\n logging.info(\"Problem writing to Harvest table\")\n return success", "title": "" }, { "docid": "7094b0e242dd0684174dc5a6891614c2", "score": "0.55948293", "text": "def load_books_data():\n final_df = combine_books_data()\n\n # Validate results\n if check_if_valid_data(final_df):\n print(\"Data valid, proceed to Load stage\")\n\n # Set S3 parameters and then load into bucket \n bucket = \"nyt-api-bucket\"\n folder = \"uploads\"\n file_name = \"NYT_bestseller_data.csv\"\n key = f\"{folder}/{file_name}\"\n\n try:\n # Convert df to csv, upload to S3 in airflow\n final_df.to_csv(file_name, index=False, encoding=\"utf-8\")\n hook = airflow.hooks.S3_hook.S3Hook('my_S3_conn')\n hook.load_file(bucket_name=bucket, filename=file_name, key=key, replace=True)\n print(\"Df exported successfully\")\n except Exception as e:\n print(f\"{e} \\nData not exported, please check errors\")", "title": "" }, { "docid": "0d5242b91bc48d576ce21b4a3fa90aeb", "score": "0.55828696", "text": "def map_row_chunk(ids, file_pk, source_type, prog_key, increment, **kwargs):\n import_file = ImportFile.objects.get(pk=file_pk)\n save_type = PORTFOLIO_BS\n if source_type == ASSESSED_RAW:\n save_type = ASSESSED_BS\n\n org = Organization.objects.get(pk=import_file.import_record.super_organization.pk)\n\n # get all the table_mappings that exist for the organization\n table_mappings = ColumnMapping.get_column_mappings_by_table_name(org)\n\n # Remove any of the mappings that are not in the current list of raw columns because this\n # can really mess up the mapping of delimited_fields.\n # Ideally the table_mapping method would be attached to the import_file_id, someday...\n list_of_raw_columns = import_file.first_row_columns\n if list_of_raw_columns:\n for table, mappings in table_mappings.items():\n for raw_column_name in mappings.keys():\n if raw_column_name not in list_of_raw_columns:\n del table_mappings[table][raw_column_name]\n\n # check that the dictionaries are not empty, if empty, then delete.\n for table in table_mappings.keys():\n if not table_mappings[table]:\n del table_mappings[table]\n\n # TODO: **START TOTAL TERRIBLE HACK**\n # For some reason the mappings that got created previously don't\n # always have the table class in them. To get this working for\n # the demo this is an infix place, but is absolutely terrible and\n # should be removed ASAP!!!!!\n # NL: 4/12/2017, this should no longer be a problem after the column cleanup, remove and test post 2.0.2.\n if 'PropertyState' not in table_mappings and 'TaxLotState' in table_mappings and '' in table_mappings:\n _log.error('this code should not be running here...')\n debug_inferred_prop_state_mapping = table_mappings['']\n table_mappings['PropertyState'] = debug_inferred_prop_state_mapping\n raise Exception(\"This code has been deprecated, but is being called. Need to review the column cleanup\")\n # TODO: *END TOTAL TERRIBLE HACK**\n\n map_cleaner = _build_cleaner_2(org)\n\n # *** BREAK OUT INTO SEPARATE METHOD ***\n # figure out which import field is defined as the unique field that may have a delimiter of\n # individual values (e.g. tax lot ids). The definition of the delimited field is currently\n # hard coded\n try:\n delimited_fields = {}\n if 'TaxLotState' in table_mappings.keys():\n tmp = table_mappings['TaxLotState'].keys()[table_mappings['TaxLotState'].values().index(\n ('TaxLotState', 'jurisdiction_tax_lot_id', 'Jurisdiction Tax Lot ID', False))]\n delimited_fields['jurisdiction_tax_lot_id'] = {\n 'from_field': tmp,\n 'to_table': 'TaxLotState',\n 'to_field_name': 'jurisdiction_tax_lot_id',\n }\n except ValueError:\n delimited_fields = {}\n # field does not exist in mapping list, so ignoring\n\n # _log.debug(\"my table mappings are {}\".format(table_mappings))\n # _log.debug(\"delimited_field that will be expanded and normalized: {}\".format(delimited_fields))\n\n # If a single file is being imported into both the tax lot and property table, then add\n # an extra custom mapping for the cross-related data. If the data are not being imported into\n # the property table then make sure to skip this so that superfluous property entries are\n # not created.\n if 'PropertyState' in table_mappings.keys():\n if delimited_fields and delimited_fields['jurisdiction_tax_lot_id']:\n table_mappings['PropertyState'][\n delimited_fields['jurisdiction_tax_lot_id']['from_field']] = (\n 'PropertyState', 'lot_number', 'Lot Number', False)\n # *** END BREAK OUT ***\n\n # yes, there are three cascading for loops here. sorry :(\n for table, mappings in table_mappings.items():\n if not table:\n continue\n\n # This may be historic, but we need to pull out the extra_data_fields here to pass into\n # mapper.map_row. apply_columns are extra_data columns (the raw column names)\n extra_data_fields = []\n for k, v in mappings.items():\n # the 3rd element is the is_extra_data flag. Need to convert this to a dict and not a tuple.\n if v[3]:\n extra_data_fields.append(k)\n # _log.debug(\"extra data fields: {}\".format(extra_data_fields))\n\n # All the data live in the PropertyState.extra_data field when the data are imported\n data = PropertyState.objects.filter(id__in=ids).only('extra_data').iterator()\n\n # Since we are importing CSV, then each extra_data field will have the same fields. So\n # save the map_model_obj outside of for loop to pass into the `save_column_names` methods\n map_model_obj = None\n\n # Loop over all the rows\n for original_row in data:\n\n # expand the row into multiple rows if needed with the delimited_field replaced with a\n # single value. This minimizes the need to rewrite the downstream code.\n expand_row = False\n for k, d in delimited_fields.items():\n if d['to_table'] == table:\n expand_row = True\n # _log.debug(\"Expand row is set to {}\".format(expand_row))\n\n delimited_field_list = []\n for _, v in delimited_fields.items():\n delimited_field_list.append(v['from_field'])\n\n # _log.debug(\"delimited_field_list is set to {}\".format(delimited_field_list))\n\n # The raw data upon import is in the extra_data column\n for row in expand_rows(original_row.extra_data, delimited_field_list, expand_row):\n map_model_obj = mapper.map_row(\n row,\n mappings,\n STR_TO_CLASS[table],\n extra_data_fields,\n cleaner=map_cleaner,\n **kwargs\n )\n\n # save cross related data, that is data that needs to go into the other model's\n # collection as well.\n\n # Assign some other arguments here\n map_model_obj.import_file = import_file\n map_model_obj.source_type = save_type\n map_model_obj.organization = import_file.import_record.super_organization\n if hasattr(map_model_obj, 'data_state'):\n map_model_obj.data_state = DATA_STATE_MAPPING\n if hasattr(map_model_obj, 'clean'):\n map_model_obj.clean()\n\n # There is a potential thread safe issue here:\n # This method is called in parallel on production systems, so we need to make\n # sure that the object hasn't already been created.\n # For example, in the test data the tax lot id is the same for many rows. Make sure\n # to only create/save the object if it hasn't been created before.\n if hash_state_object(map_model_obj, include_extra_data=False) == \\\n hash_state_object(STR_TO_CLASS[table](organization=map_model_obj.organization),\n include_extra_data=False):\n # Skip this object as it has no data...\n _log.warn(\"Skipping building during mapping\")\n continue\n\n try:\n # There was an error with a field being too long [> 255 chars].\n map_model_obj.save()\n\n # Create an audit log record for the new map_model_obj that was created.\n\n AuditLogClass = PropertyAuditLog if isinstance(\n map_model_obj, PropertyState) else TaxLotAuditLog\n AuditLogClass.objects.create(organization=org,\n state=map_model_obj,\n name='Import Creation',\n description='Creation from Import file.',\n import_filename=import_file,\n record_type=AUDIT_IMPORT)\n\n except ValidationError as e:\n # Could not save the record for some reason, raise an exception\n raise Exception(\n \"Unable to save row the model with row {}:{}\".format(type(e),\n e.message))\n\n # Make sure that we've saved all of the extra_data column names from the first item in list\n if map_model_obj:\n Column.save_column_names(map_model_obj)\n\n increment_cache(prog_key, increment)", "title": "" }, { "docid": "59d1946c34ac06d39e379130710b09ad", "score": "0.5553037", "text": "def main():\n config = configparser.ConfigParser()\n config.read(\"dwh.cfg\")\n\n conn = psycopg2.connect(\n \"host={} dbname={} user={} password={} port={}\".format(\n *config[\"CLUSTER\"].values()\n )\n )\n cur = conn.cursor()\n\n load_staging_tables(cur, conn)\n insert_tables(cur, conn)\n\n conn.close()\n print(\"*************** ETL process completed ***************\")", "title": "" }, { "docid": "fcb9210075fd90ae21b8cf503a8383f7", "score": "0.55354553", "text": "def main():\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(config.get('CLUSTER','HOST'),\n config.get('DWH','DWH_DB'),\n config.get('DWH','DWH_DB_USER'),\n config.get('DWH','DWH_DB_PASSWORD'),\n config.get('DWH','DWH_PORT')))\n cur = conn.cursor()\n \n load_staging_tables(cur, conn)\n insert_tables(cur, conn)\n\n conn.close()", "title": "" }, { "docid": "91c5d63efb12cb22029cca05042808a8", "score": "0.5532705", "text": "def load_to_bigquery(table_id,review,sentiment):\n review= str(review)\n sentiment= int(sentiment)\n rows_to_insert=[\n {\"Review\": review, \"Sentiment\": sentiment},\n ]\n \n errors = client.insert_rows_json(table_id, rows_to_insert) # Make an API request.\n if errors == []:\n print(\"New rows have been added.\")\n else:\n print(\"Encountered errors while inserting rows: {}\".format(errors))", "title": "" }, { "docid": "b22ca3c0369249c6042a43a94f71ea96", "score": "0.55232894", "text": "def extract_raw(connection):\n try:\n data_dir_path = '../data/'\n data_dir_name = os.listdir(data_dir_path) # ['business',...]\n\n extract_dir_path = './sql/insert/raw/'\n extract_dir_files = os.listdir(extract_dir_path) # ['insert_checkin_raw.sql',...]\n\n for dir in data_dir_name:\n if dir in ['business']: continue # skip the business as we first load business into temp\n file_name = os.listdir(data_dir_path+dir)\n insert_file_name = [extract_file_path for extract_file_path in extract_dir_files if ''.join(extract_file_path.split('_')[1]) == dir]\n\n file_path = data_dir_path+dir+'/'+file_name[0]\n insert_path = extract_dir_path+insert_file_name[0]\n \n with open(file_path, encoding=\"utf8\") as file_data:\n batch = 0\n record_list = []\n\n for line in file_data: \n record_list.append(json.loads(line))\n batch += 1 \n \n if batch == 20000: \n extract_many(connection, insert_path, record_list)\n batch = 0\n record_list = []\n continue\n \n # for remaining records\n if len(record_list) > 0: extract_many(connection, insert_path, record_list)\n \n except Exception as e:\n print(\"An error occurred: {}\".format(e))\n\n else:\n print(\"Successfully extracted tables.\")", "title": "" }, { "docid": "d04b841887417a7b99456a9cbe6553ab", "score": "0.550835", "text": "def insert_tables(cur, conn):\n print(\"Transforming staged log and song data into 5 tables...\")\n for query in insert_table_queries:\n cur.execute(query)\n conn.commit()", "title": "" }, { "docid": "2d543e8cf9dac025d6103f9c3247c8f7", "score": "0.5507929", "text": "def main():\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n \n s3 = get_s3_client(config.get(\"AWS\", \"KEY\"), config.get(\"AWS\", \"SECRET\"))\n \n role_arn = config.get(\"IAM_ROLE\", \"ARN\")\n staging_query_params = [\n (config.get(\"S3\", \"LOG_DATA\"), role_arn, config.get(\"S3\", \"LOG_JSONPATH\")),\n (config.get(\"S3\", \"SONG_DATA\"), role_arn)\n ]\n print(\"--- Loading data into staging tables.\")\n load_staging_tables(cur, conn, staging_query_params)\n print(\"--- Inserting data into analysis tables.\")\n insert_tables(cur, conn)\n\n conn.close()", "title": "" }, { "docid": "384ba733de767b22c98e87c825638530", "score": "0.5506792", "text": "def _load_data(bq_client, table, filepath):\n job_config = bigquery.LoadJobConfig()\n job_config.source_format = bigquery.SourceFormat.CSV\n job_config.skip_leading_rows = 1\n job_config.write_disposition = bigquery.WriteDisposition.WRITE_TRUNCATE\n job_config.autodetect = True\n\n with open(filepath, \"rb\") as source_file:\n job = bq_client.load_table_from_file(source_file,\n table,\n job_config=job_config)\n job.result() # Waits for table load to complete.\n return job.state", "title": "" }, { "docid": "6831b6787adb0dd64abedf5ad36c18e9", "score": "0.55031896", "text": "def process_immigration_data(spark, immigration_input_data, mapping_dict, output_data):\n \n immigration_staging_table = spark.read.format('com.github.saurfang.sas.spark').load(immigration_input_data)\n\n get_datetime = udf(lambda x: datetime(1960, 1, 1) + timedelta(days=int(x)))\n immigration_staging_table = immigration_staging_table.withColumn('arrival_date', get_datetime(immigration_staging_table.arrdate))\n\n immigration_staging_table = immigration_staging_table.withColumn(\"city\", mapping_dict.getItem(col(\"i94port\")))\n\n immigration_dimension_table = immigration_staging_table.select(\n F.col('cicid').alias('immigration_number'), \n F.col('i94bir').alias('age'),\n F.col('i94visa').alias('visa_type'),\n F.col('biryear').alias('birth_year'),\n 'gender'\n ).dropDuplicates()\n \n quality_checks(immigration_dimension_table)\n immigration_staging_table.write.parquet(os.path.join(output_data, 'immigration_table.parquet'), mode='overwrite')\n\n print('Immigration Dimesion Table is completed')\n \n immigration_fact_table = immigration_staging_table.select(\n F.col('cicid').alias('immigration_number'),\n 'city',\n F.col('i94addr').alias('state_code'),\n F.col('i94port').alias('port'),\n F.col('i94mon').alias('month'),\n F.col('i94yr').alias('year')\n ).drop_duplicates()\n\n quality_checks(immigration_fact_table)\n immigration_fact_table.write.parquet(os.path.join(output_data, 'immigration_fact_table.parquet'), mode='overwrite')\n\n print('Immigration Fact Table is completed')", "title": "" }, { "docid": "c75e651959d0d17bcf8e59992fe921a3", "score": "0.54925984", "text": "def load(db):\n r = db.truncate_table('deployments')\n print \"Truncated deployments table\"\n\n # Read in deployment data\n file_mask = \"repos/asset-management/deployment/*.csv\"\n file_list = glob.glob(file_mask)\n\n for ifile in file_list:\n with open(ifile, 'rb') as csvfile:\n print \"Loading file: \" + ifile\n reader = csv.DictReader(csvfile)\n for row in reader: \n # Tweak row entries to match database \n row['deploy_cuid'] = row['CUID_Deploy']\n row['deployed_by'] = row['deployedBy']\n row['recover_cuid'] = row['CUID_Recover']\n row['recovered_by'] = row['recoveredBy']\n row['reference_designator'] = row['Reference Designator']\n row['deployment_number'] = row['deploymentNumber']\n row['version_number'] = row['versionNumber']\n row['start_date'] = row['startDateTime']\n row['stop_date'] = row['stopDateTime']\n row['mooring_uid'] = row['mooring.uid']\n row['node_uid'] = row['node.uid']\n row['sensor_uid'] = row['sensor.uid']\n row['latitude'] = row['lat']\n row['longitude'] = row['lon']\n\n row = remove_extraneous_columns(columns, row)\n \n # Save Instrument Deployment\n if len(row['reference_designator'])==27 or len(row['reference_designator'])==14 or len(row['reference_designator'])==8:\n if row.get('deploy_cuid','').startswith('#'):\n print \"Ignored Deployment: \" +row['reference_designator'] +' deployment #' +str(row['deployment_number'])\n else:\n # First, save parent deployment if new\n check_parent(db,row)\n # Then save the deployment\n save_deployment(db,row)\n else:\n print \"Invalid Reference Designator: \" +row['reference_designator'] +' deployment #' +str(row['deployment_number'])", "title": "" }, { "docid": "8aba4af5fe3336ee7b75947b39f5e7e3", "score": "0.54641074", "text": "def process_airport_data(spark, airport_input_data, mapping_dict, output_data):\n \n airport_staging_table = spark.read.csv(airport_input_data, header='true')\n \n get_lat = udf(lambda x: x.split(', ')[0])\n get_long = udf(lambda x: x.split(', ')[1])\n get_country_code = udf(lambda x: x.split('-')[0])\n get_state_code = udf(lambda x: x.split('-')[1])\n\n\n airport_staging_table = airport_staging_table.withColumn('latitude', get_lat(airport_staging_table.coordinates))\n airport_staging_table = airport_staging_table.withColumn('longitude', get_long(airport_staging_table.coordinates))\n airport_staging_table = airport_staging_table.withColumn('country_code', get_country_code(airport_staging_table.iso_region))\n airport_staging_table = airport_staging_table.withColumn('state_code', get_state_code(airport_staging_table.iso_region))\n\n\n airport_staging_table = airport_staging_table.withColumn(\"city\", mapping_dict.getItem(col(\"iata_code\")))\n \n airport_staging_table = airport_staging_table.select(F.col('ident').alias('airport_id'), \n 'name',\n 'city',\n 'country_code',\n 'state_code',\n 'iata_code',\n 'latitude',\n 'longitude').dropDuplicates()\n \n quality_checks(airport_staging_table)\n \n airport_staging_table.write.parquet(os.path.join(output_data, 'airport_code_table.parquet'), mode='overwrite')\n\n print('Airport Dimension Table is completed')", "title": "" }, { "docid": "67e4cd24784a06f47a5a3facfcec3e7d", "score": "0.5463381", "text": "def db_load_data(\n data: pd.DataFrame, table: str, conn: Connection, **kwargs: Any\n) -> None:\n try:\n data.to_sql(table, conn, **kwargs)\n except Exception as e: # pragma: no cover\n logging.error(\"Unable to load data into %s table\", table)\n logging.error(e)\n else:\n logging.info(f\"Successfully loaded {len(data)} records into {table} table\")", "title": "" }, { "docid": "17bec139e475a807f7f09abcc8b6a43a", "score": "0.54569244", "text": "def import_():\n\n raw_airplanes = hjson.load(sys.stdin)\n try:\n airplanes = [schemas.AirplaneCreate(**x) for x in raw_airplanes]\n except:\n console.print(\"Incorrect input.\")\n return\n\n for airplane in crud.airplanes(db):\n crud.destroy_airplane(db, airplane)\n for airplane in airplanes:\n _ = crud.build_airplane(db, airplane)\n console.print(\"Data is imported successfully.\")", "title": "" }, { "docid": "76ac0548f0a333a01d9b0074fc22e834", "score": "0.5451196", "text": "def load_tables(dataset, tablespecs):\n project, dataset_name = dataset.split(':')\n dataset = bigquery.Client(project).dataset(dataset_name)\n\n tables = {}\n for spec in tablespecs:\n name, days = spec.split(':')\n table = dataset.table(name)\n try:\n table.reload()\n except google.cloud.exceptions.NotFound: # pylint: disable=no-member\n table.schema = load_schema(bigquery.schema.SchemaField)\n table.create()\n tables[name] = (table, make_json.get_table(float(days)))\n return tables", "title": "" }, { "docid": "f0a6ac60479363ed38e8415d67f64704", "score": "0.54456496", "text": "def update(self):\n existing_table = self.metadata.point_table\n with self.staging_table as s_table:\n staging = s_table.table\n delete_absent_hashes(staging.name, existing_table.name)\n with Update(staging, self.dataset, existing_table) as new_records:\n new_records.insert()\n update_meta(self.metadata, existing_table)", "title": "" }, { "docid": "59ca0cfa1fa21dd827d76fe6ce12e9e3", "score": "0.54359984", "text": "def import_Husstable(rgi_table, rgi_regionsO1, filepath, filedict, drop_col_names,\r\n indexname=input.indexname):\r\n ds = pd.read_csv(filepath + filedict[rgi_regionsO1[0]])\r\n # Select glaciers based on 01Index value from main_glac_rgi table\r\n # as long as Huss tables have all rows associated with rgi attribute table, then this shortcut works and saves time\r\n glac_table = ds.iloc[rgi_table['O1Index'].values]\r\n# glac_table = pd.DataFrame()\r\n# if input.rgi_regionsO2 == 'all' and input.rgi_glac_number == 'all':\r\n# glac_table = ds \r\n# elif input.rgi_regionsO2 != 'all' and input.rgi_glac_number == 'all':\r\n# glac_table = ds.iloc[rgi_table['O1Index'].values]\r\n# elif input.rgi_regionsO2 == 'all' and input.rgi_glac_number != 'all':\r\n# for glacier in range(len(rgi_table)):\r\n# if glac_table.empty:\r\n# glac_table = ds.loc[rgi_table.loc[glacier,'O1Index']]\r\n# else:\r\n# glac_table = pd.concat([glac_table, ds.loc[rgi_table.loc[glacier,'O1Index']]], axis=1)\r\n# glac_table = glac_table.transpose()\r\n # must make copy; otherwise, drop will cause SettingWithCopyWarning\r\n glac_table_copy = glac_table.copy()\r\n # Clean up table and re-index\r\n # Reset index to be GlacNo\r\n glac_table_copy.reset_index(drop=True, inplace=True)\r\n glac_table_copy.index.name = indexname\r\n # Drop columns that are not elevation bins\r\n glac_table_copy.drop(drop_col_names, axis=1, inplace=True)\r\n # Change NAN from -99 to 0\r\n glac_table_copy[glac_table_copy==-99] = 0.\r\n # Shift Huss bins by 20 m since the elevation bins appear to be 20 m higher than they should be\r\n if input.option_shift_elevbins_20m == 1:\r\n colnames = glac_table_copy.columns.tolist()[:-2]\r\n glac_table_copy = glac_table_copy.iloc[:,2:]\r\n glac_table_copy.columns = colnames\r\n return glac_table_copy", "title": "" }, { "docid": "b2064a717d184d58b7e50bff04ac89b7", "score": "0.5424411", "text": "def main():\n \n # Read redshit & aws credential and cluster details\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n \n # Connect to redshift cluster based on the credentials & details provided in dwh.cfg file\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n \n # Loads the staging_table first and the the regular tables\n load_staging_tables(cur, conn)\n insert_tables(cur, conn)\n \n # Closes the connection\n conn.close()", "title": "" }, { "docid": "66a10259b95f1dec256d0cf5a26841b9", "score": "0.5415385", "text": "def create_staging_table(client, dataset_ref, table_name):\n\n schema = [\n bigquery.SchemaField(\"id\", \"STRING\", mode=\"REQUIRED\"),\n bigquery.SchemaField(\"ad_creation_time\", \"TIMESTAMP\", mode=\"NULLABLE\"),\n bigquery.SchemaField(\"ad_delivery_start_time\",\n \"TIMESTAMP\",\n mode=\"NULLABLE\"),\n bigquery.SchemaField(\"ad_delivery_stop_time\",\n \"TIMESTAMP\",\n mode=\"NULLABLE\"),\n bigquery.SchemaField(\"page_id\", \"STRING\", mode=\"NULLABLE\"),\n bigquery.SchemaField(\n \"spend\",\n \"RECORD\",\n mode=\"NULLABLE\",\n fields=[\n bigquery.SchemaField(\"upper_bound\", \"INTEGER\", mode=\"NULLABLE\"),\n bigquery.SchemaField(\"lower_bound\", \"INTEGER\", mode=\"NULLABLE\"),\n ],\n ),\n bigquery.SchemaField(\"currency\", \"STRING\", mode=\"NULLABLE\"),\n bigquery.SchemaField(\n \"impressions\",\n \"RECORD\",\n mode=\"NULLABLE\",\n fields=[\n bigquery.SchemaField(\"upper_bound\", \"INTEGER\", mode=\"NULLABLE\"),\n bigquery.SchemaField(\"lower_bound\", \"INTEGER\", mode=\"NULLABLE\"),\n ],\n ),\n bigquery.SchemaField(\n \"region_distribution\",\n \"RECORD\",\n mode=\"REPEATED\",\n fields=[\n bigquery.SchemaField(\"region\", \"STRING\", mode=\"NULLABLE\"),\n bigquery.SchemaField(\"percentage\", \"FLOAT\", mode=\"NULLABLE\"),\n ],\n ),\n bigquery.SchemaField(\"ad_creative_link_caption\",\n \"STRING\",\n mode=\"NULLABLE\"),\n bigquery.SchemaField(\n \"demographic_distribution\",\n \"RECORD\",\n mode=\"REPEATED\",\n fields=[\n bigquery.SchemaField(\"gender\", \"STRING\", mode=\"NULLABLE\"),\n bigquery.SchemaField(\"age\", \"STRING\", mode=\"NULLABLE\"),\n bigquery.SchemaField(\"percentage\", \"FLOAT\", mode=\"NULLABLE\"),\n ],\n ),\n bigquery.SchemaField(\"funding_entity\", \"STRING\", mode=\"NULLABLE\"),\n bigquery.SchemaField(\"ad_creative_link_title\", \"STRING\", mode=\"NULLABLE\"),\n bigquery.SchemaField(\"ad_creative_body\", \"STRING\", mode=\"NULLABLE\"),\n bigquery.SchemaField(\"ad_creative_link_description\",\n \"STRING\",\n mode=\"NULLABLE\"),\n ]\n\n table_ref = dataset_ref.table(table_name)\n table = bigquery.Table(table_ref, schema=schema)\n table = client.create_table(table)\n logging.info(f\"Created table {table.full_table_id}\")\n\n return table", "title": "" }, { "docid": "ffd6872ed51e0c47feefb6a7329dda30", "score": "0.54020584", "text": "def handle_import_data(self, data):\n for entry in data:\n entry['value'] = json_tricks.loads(entry['value'])\n _completed_num = 0\n for trial_info in data:\n logger.info(\"Importing data, current processing progress %s / %s\", _completed_num, len(data))\n _completed_num += 1\n assert \"parameter\" in trial_info\n _params = trial_info[\"parameter\"]\n assert \"value\" in trial_info\n _value = trial_info['value']\n if not _value:\n logger.info(\"Useless trial data, value is %s, skip this trial data.\", _value)\n continue\n _value = extract_scalar_reward(_value)\n budget_exist_flag = False\n barely_params = dict()\n for keys in _params:\n if keys == _KEY:\n _budget = _params[keys]\n budget_exist_flag = True\n else:\n barely_params[keys] = _params[keys]\n if not budget_exist_flag:\n _budget = self.max_budget\n logger.info(\"Set \\\"TRIAL_BUDGET\\\" value to %s (max budget)\", self.max_budget)\n if self.optimize_mode is OptimizeMode.Maximize:\n reward = -_value\n else:\n reward = _value\n self.cg.new_result(loss=reward, budget=_budget, parameters=barely_params, update_model=True)\n logger.info(\"Successfully import tuning data to BOHB advisor.\")", "title": "" }, { "docid": "a252f8325e418376e42650d974d1269a", "score": "0.53980917", "text": "def get_staging_data(self, staging_name, \n connector_name='protheus_carol', merge_records=True, columns=None, callback=None, max_workers=30):\n\n # number of workers to download in parallel\n max_workers=max_workers\n\n # if you want to download a few columns, [\"COLUMNS\", \"TO\", \"FETCH\"]\n col=columns\n\n # maximum records to fetch. P.S.: only works if `max_workers=None`\n max_hits=None \n\n # if metadata should be returned (mdmId, mdmLastUpdated, etc)\n return_metadata = True\n\n # if records with duplicated ids should be consolidated by pyCarol\n merge_records = merge_records\n\n #connector + staging table\n connector_name=connector_name\n staging = staging_name\n\n # file_pattern = '2021-02'\n file_pattern = None\n\n df = self.carol.staging.fetch_parquet(\n staging_name=staging, \n connector_name=connector_name, \n max_workers=max_workers, \n columns=col, \n merge_records=merge_records, \n return_metadata=return_metadata, \n max_hits=max_hits,\n callback=callback, file_pattern=file_pattern)\n\n return df", "title": "" }, { "docid": "755876613b86acbc82919498db017461", "score": "0.53846556", "text": "def main():\n config = configparser.ConfigParser()\n config.read_file(open('dwh.cfg'))\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n\n load_staging_tables(cur, conn)\n insert_tables(cur, conn)\n\n conn.close()", "title": "" }, { "docid": "3896b979d5b9ecd941cb19e30363ebbb", "score": "0.53841585", "text": "def create_sales_data_postgres_pipeline():\n # load and process the postgres table information\n table_desc = transform_table_desc_df(\n load_csv() # TODO should supply dtypes\n )\n # generate column string for creation and insert queries, for the sales_data and tracking_data tables\n create_data_columns, insert_data_columns = generate_table_fields_str(table_desc)\n create_tracking_columns, insert_tracking_columns = generate_tracking_table_fields_str()\n\n # create sales_data and tracking_data tables\n create_tables(create_data_columns, create_tracking_columns)", "title": "" }, { "docid": "3407f078aea8c856ac8731aa8d01fde1", "score": "0.53789693", "text": "def _init_table(self):\n # Take most columns straight from the source.\n original_cols = [_copy_col(c) for c in self.staging.columns\n if c.name != 'hash']\n # Take care that the hash column is designated the primary key.\n original_cols.append(Column('hash', String(32), primary_key=True))\n\n # We also expect geometry and date columns to be created.\n derived_cols = [\n Column('point_date', TIMESTAMP, nullable=True, index=True),\n Column('geom', Geometry('POINT', srid=4326),\n nullable=True, index=True)]\n new_table = Table(self.dataset.name, MetaData(),\n *(original_cols + derived_cols))\n\n try:\n new_table.create(engine)\n # Trigger is broken\n #self._add_trigger()\n except:\n new_table.drop(bind=engine, checkfirst=True)\n raise\n else:\n return new_table", "title": "" }, { "docid": "73bd4d2b99d5dcf287b4ce73e963c4cc", "score": "0.53778934", "text": "def move_data_from_disk_to_buff(self, task_details):\n func_success = False\n import_file_name = task_details['import_file_name']\n import_operation = task_details['import_operation']\n db_cols_count = task_details['db_count']\n table_name = \"buffer_\" + import_operation\n\n file_content = self.read_file_from_path(import_file_name)\n\n insert_qry = \"INSERT INTO %s \" + str(tuple(['col' + str(x) for x in range(1, db_cols_count + 1)])).replace(\"'\", \"\")\n insert_qry += \" values \"\n insert_qry = insert_qry % (table_name,)\n\n values = []\n\n for row in file_content:\n insert_qry += \"(\" + (\"%s,\" * (int(db_cols_count))).rstrip(\",\") + \"), \"\n values.extend(row[:db_cols_count])\n _logger.info(row[:db_cols_count])\n _logger.info(tuple(values))\n insert_qry = insert_qry.rstrip(\", \")\n\n with api.Environment.manage():\n env = api.Environment(self.pool.cursor(), SUPERUSER_ID, self.env.context)\n cur = env.cr\n try:\n cur.execute(\"BEGIN\")\n cur.execute(\"LOCK TABLE \" + table_name + \" IN EXCLUSIVE MODE NOWAIT \")\n cur.execute(insert_qry, tuple(values))\n cur.commit()\n qry = \"SELECT count(*) FROM \" + table_name\n cur.execute(qry)\n func_success = True\n\n except Exception as e:\n func_success = False\n if not cur.closed:\n cur.rollback()\n finally:\n if not cur.closed:\n cur.close()\n return func_success", "title": "" }, { "docid": "490bf8ec51ba3a81df333eb286c12ace", "score": "0.53766954", "text": "def prep_data_load(self):\n self._handle.seek(0)\n if self.has_header_row():\n self._get_headers_from_handle(self._handle)", "title": "" }, { "docid": "d9be1e4e44d90d8dd2a2fc42bd8854a8", "score": "0.5364896", "text": "def load_shp_to_db(self, src_dir, dst_table):\n # Must be INSIDE the same directory as the .shp file in order to pull other files\n # Go into each directory and get the shapefile\n os.chdir(src_dir)\n logger = logging.getLogger('root')\n for source, dirs, files in os.walk(src_dir):\n for file in files:\n if file[-3:] == 'shp':\n shapefile = file\n command = (\n f\"shp2pgsql -I -s 4326 -d {shapefile} {dst_table}\"\n f\" | psql -q -U {self._credentials['user']} -d {self._credentials['dbname']}\"\n )\n logger.info(f\"Uploading {shapefile}\")\n\n self._run_subprocess(command)", "title": "" }, { "docid": "d745c41c29b7d2dd22ef9d79bc97b79a", "score": "0.534407", "text": "def main():\n \n #Read Config file\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n #Connect to database\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(\\\n config.get('CLUSTER','HOST'),\\\n config.get('CLUSTER','DB_NAME'),\\\n config.get('CLUSTER','DB_USER'),\\\n config.get('CLUSTER','DB_PASSWORD'),\\\n config.get('CLUSTER','DB_PORT')))\n cur = conn.cursor()\n \n #Load staging tables\n print('Loading staging tables')\n load_staging_tables(cur, conn)\n \n #Insert columns to Redshift tables\n print('Inserting staging tables')\n insert_tables(cur, conn)\n\n #Close the connection\n conn.close()", "title": "" }, { "docid": "e02febb929dc0fabb00451a474997f00", "score": "0.5330399", "text": "def load(self, data):\r\n table_name = type(data).__name__.lower()\r\n document = dict(data._asdict())\r\n document_updatable_part = {field: document[field]\r\n for field in UPDATABLE_FIELDS}\r\n document_remain_part = {field: document[field]\r\n for field in REQUIRED_FIELDS\r\n if field not in UPDATABLE_FIELDS +\r\n (ID_FIELD,) +\r\n (TABLE_RESOLVER_FIELD,)}\r\n self.db[table_name].update_one(\r\n {\"_id\": document.get(ID_FIELD)},\r\n {\"$set\": document_updatable_part,\r\n \"$setOnInsert\": document_remain_part},\r\n upsert=True\r\n )", "title": "" }, { "docid": "e8f125790f1858b557e2cf6deb3dd373", "score": "0.5321891", "text": "def _get_data_for_splitmerge_schema():\n\n early_track_table = pandas.DataFrame.from_dict({\n tracking_utils.FULL_ID_COLUMN: ['A', 'B']\n })\n\n nested_array = early_track_table[[\n tracking_utils.FULL_ID_COLUMN, tracking_utils.FULL_ID_COLUMN\n ]].values.tolist()\n\n early_track_table = early_track_table.assign(**{\n tracking_utils.TRACK_X_COORDS_COLUMN: nested_array,\n tracking_utils.TRACK_Y_COORDS_COLUMN: nested_array,\n TRACK_COLOUR_COLUMN: nested_array\n })\n\n early_track_table[tracking_utils.TRACK_X_COORDS_COLUMN].values[0] = (\n numpy.array([5, 12.5, 20])\n )\n early_track_table[tracking_utils.TRACK_X_COORDS_COLUMN].values[1] = (\n numpy.array([5, 12.5, 20])\n )\n\n early_track_table[tracking_utils.TRACK_Y_COORDS_COLUMN].values[0] = (\n numpy.full(3, 5.)\n )\n early_track_table[tracking_utils.TRACK_Y_COORDS_COLUMN].values[1] = (\n numpy.full(3, 20.)\n )\n\n early_track_table[TRACK_COLOUR_COLUMN].values[0] = FIRST_TRACK_COLOUR\n early_track_table[TRACK_COLOUR_COLUMN].values[1] = SECOND_TRACK_COLOUR\n\n late_storm_object_table = pandas.DataFrame.from_dict({\n tracking_utils.CENTROID_X_COLUMN: numpy.full(2, 25.),\n tracking_utils.CENTROID_Y_COLUMN: numpy.array([12.5, 27.5]),\n tracking_utils.FULL_ID_COLUMN: ['C', 'D']\n })\n\n return early_track_table, late_storm_object_table", "title": "" }, { "docid": "16531d81be357b9ab66aa430e9b03b96", "score": "0.5317634", "text": "def load_and_prepare_data():\n data_training = pickle.load( open(DATASET_TRAINING, \"rb\" ) )\n data_training = prepare_input(data_training)\n data_test= pickle.load( open(DATASET_TEST, \"rb\" ) )\n data_test= prepare_input(data_test)\n data_dev= pickle.load( open(DATASET_DEV, \"rb\" ) )\n data_dev= prepare_input(data_dev)\n return data_training,data_dev,data_test", "title": "" }, { "docid": "12735cbaab0cdb22da5389784fc1be97", "score": "0.5304481", "text": "def load_tables(self, dir: str) -> None:\n try:\n self.lib = pd.read_csv(os.path.join(dir, 'LIB.csv')).set_index(self.OHCO[0])\n self.doc = pd.read_csv(os.path.join(dir, 'DOC.csv')).set_index(self.OHCO[:3])\n self.token = pd.read_csv(os.path.join(dir, 'TOKEN.csv'), dtype={'term_str':'str'}).set_index(self.OHCO)\n self.vocab = pd.read_csv(os.path.join(dir, 'VOCAB.csv'), dtype={'term_str':'str'}).set_index('term_str')\n except FileNotFoundError:\n print(\"Missing one or more tables.\")", "title": "" }, { "docid": "e4ab873581922936aa11167fc0a05e9e", "score": "0.5303107", "text": "def import_dataset(apps, schema_editor):\n\n # parameters that you might want to change:\n CSV_LOCATION = \"../metadata.csv\"\n DB_FIELDS = ['user_id', 'sample_id', 'eye', 'lens_type', 'nir_illumination', 'lens_brand', 'is_regular']\n CSV_FIELDS = ['user_id', 'sample_id', 'eye', 'live_fake', 'NIR_illumination', 'lens_brand', 'regular_irregular_lens_type']\n CSV_DB_MAP = { # maps old csv values to new database ones\n 'live_fake': {\n 'live': 'L',\n 'fake': 'F',\n 'clear': 'C',\n },\n 'eye': {\n 'left': 'L',\n 'right': 'R',\n },\n 'NIR_illumination': {\n 'cross': 'C',\n 'direct': 'D',\n },\n 'regular_irregular_lens_type': {\n 'regular': True,\n 'irregular': False,\n 'none': None,\n 'clear': None,\n }\n }\n\n # load other variables\n Image = apps.get_model(DJANGO_APP_NAME, \"Image\")\n DATASET_ROOT = settings.DATASET_ROOT\n\n # get list of images and metadata\n dataset_images = _list_images(ds_path=DATASET_ROOT)\n df = _load_csv(csv_path=os.path.join(DATASET_ROOT, CSV_LOCATION))\n\n # for each image file, extract metadata and create a database entry\n print(\"\\n\\tAdding database entries for {} images found...\".format(len(dataset_images)))\n counters = {\n '404': 0,\n 'new': 0,\n 'dup': 0,\n }\n for img_ds_path in dataset_images:\n\n img_id = _checksum(img_ds_path)\n extension = img_ds_path.split('/')[-1].split('.')\n extension = extension[-1] if len(extension) > 1 else \"\"\n img_path = (img_ds_path.split(DATASET_ROOT)[1]).strip(os.path.sep)\n\n # skip image if already registered\n if Image.objects.filter(img_id=img_id).exists():\n print(\"Skipping duplicated element:\\n\\t{}\\tSHA256: {}\".format(\n img_path, img_id))\n counters['dup'] += 1\n continue\n\n # extract file name from image path\n filename = os.path.basename(img_path).split('.')[0]\n csv_entry = None\n try:\n csv_entry = df.loc[filename]\n if csv_entry.shape[0] > len(CSV_FIELDS):\n # metadata might have duplicated filenames, if so, skip them\n print(\"Discarding {} entries: filename must be unique!\".format(csv_entry.shape[0]))\n continue\n except:\n counters['404'] += 1\n print(\"Skipping CSV filename not found: {}\".format(filename))\n continue\n\n # extract values from csv entry\n image_fields = _extract_csv_to_db(\n csv_entry=csv_entry,\n csv_fields=CSV_FIELDS,\n db_fields=DB_FIELDS,\n csv_db_map=CSV_DB_MAP,\n )\n\n # create database object\n img = Image(\n img_id = img_id,\n extension = extension,\n img_path = img_path,\n **image_fields,\n )\n\n # execute database transaction\n try:\n with transaction.atomic():\n img.save()\n counters['new'] += 1\n except IntegrityError as err:\n print(\" >> Failed to load entry from CSV into DB:\")\n print(image_fields)\n raise err\n\n print(\"\\n >> Duplicated entries: {}\".format(counters['dup']))\n print(\" >> Entries not found: {}\".format(counters['404']))\n print(\" >> Entries added: {}\".format(counters['new']))", "title": "" }, { "docid": "d68d1d2148d731ac0ea977a6e5bca9d7", "score": "0.53004885", "text": "def load(self, webhdfs_root, hdfs_path, vschema,\n dtable, rtable, mode=\"direct\"):\n\n if mode == \"direct\":\n _filter = \"FILTER GZIP()\"\n elif mode == \"decompress\":\n _filter = \"\"\n else:\n logkv(logger, {\"msg\": \"Invalid load mode supplied to Vertica COPY command\",\n \"mode\": mode}, \"error\")\n raise VerticaManagerException()\n\n # Discard the leading \"/\" in HDFS path. We're going to be pre-pending it with\n # webhdfs base URL\n if hdfs_path.startswith(\"/\"):\n hdfs_path = hdfs_path[1:]\n\n webhdfs_url = os.path.join(webhdfs_root, hdfs_path)\n\n copy_cmd = '''COPY %s.%s\n SOURCE Hdfs(url=\\'%s\\', username=\\'%s\\', low_speed_limit=1048576)\n %s\n DELIMITER E\\'\\\\001\\'\n REJECTMAX 0\n REJECTED DATA AS TABLE %s.%s\n DIRECT\n COMMIT\n ''' % (vschema,\n dtable,\n webhdfs_url,\n self.connection_info[\"vertica_user\"],\n _filter, vschema, rtable)\n\n try:\n vresult = self.execute(stmt=copy_cmd)\n rows_loaded = VerticaManager.getrows(vresult.output)\n\n logkv(logger, {\"msg\": \"Loaded data in HDFS path to Vertica table\",\n \"hdfs_path\": hdfs_path, \"vschema\": vschema,\n \"dtable\": dtable, \"rows_loaded\": rows_loaded}, \"info\")\n return rows_loaded\n except VerticaManagerException as ex:\n logkv(logger, {\"msg\": \"Load to Vertica via WebHdfs failed\"},\n \"error\", ex)\n raise", "title": "" }, { "docid": "c5466871cad80c2ba5b265f48b531fcb", "score": "0.52966243", "text": "def load_data(con):\n cursor = con.cursor()\n try:\n cursor.execute(\"\"\"\n INSERT INTO Tagit.Resource (uri, title) \n VALUES ('https://docs.intersystems.com/iris20194/csp/docbook/Doc.View.cls?KEY=RSQL_droptable', 'DROP TABLE')\n \"\"\")\n cursor.execute(\"\"\"\n INSERT INTO Tagit.Resource (uri, title) \n VALUES ('https://docs.intersystems.com/iris20194/csp/docbook/Doc.View.cls?KEY=AFL_globals', 'First Look: Globals')\n \"\"\")\n cursor.execute(\"\"\"\n INSERT INTO Tagit.Tag (name) \n VALUES ('sql')\n \"\"\")\n cursor.execute(\"\"\"\n INSERT INTO Tagit.Tag (name) \n VALUES ('intersystemsiris')\n \"\"\")\n cursor.execute(\"\"\"\n INSERT INTO Tagit.Tag (name) \n VALUES ('table')\n \"\"\")\n cursor.execute(\"\"\"\n INSERT INTO Tagit.Tag (name) \n VALUES ('globals')\n \"\"\")\n cursor.execute(\"\"\"\n INSERT INTO Tagit.Tag (name) \n VALUES ('objectscript')\n \"\"\")\n con.commit()\n except Exception as e:\n print(\"Error loading data: \" + str(e))\n return e\n\n print(\"Data loaded!\")\n return True", "title": "" }, { "docid": "72d0753a84df78f3da98bc10153bd198", "score": "0.5296327", "text": "def load_data(self, sheet_names):\n\n for sheet_name, sheet_rows in self.work_sheet.items():\n for row in sheet_rows[row_start_struct[sheet_name]:]:\n if sheet_name == sheet_names[0]:\n obj_cat = sq.ObjectCategory()\n obj_cat.ObjectCategoryName = row[0].value\n obj_cat.CategoryDefinition = row[1].value\n self.push_data(obj_cat)\n\n elif sheet_name == sheet_names[1]:\n attrib_cat = sq.AttributeTypes()\n attrib_cat.Term = row[0].value\n attrib_cat.Definition = row[1].value\n self.push_data(attrib_cat)\n\n elif sheet_name == sheet_names[2]:\n data_struct = sq.Datasets()\n data_struct.DatasetName = row[0].value\n data_struct.DatasetAcronym = row[1].value\n data_struct.SourceID = self.__session.query(sq.Sources).filter(\n sq.Sources.SourceName == row[2].value\n ).first().SourceID\n self.push_data(data_struct)\n\n elif sheet_name == sheet_names[3]:\n if all('' == cell.value for cell in row):\n break\n obj_type = sq.ObjectTypes()\n obj_type.ObjectType = row[0].value\n obj_type.ObjectCode = row[1].value\n obj_type.ObjectTopology = row[2].value\n\n if row[3].value:\n obj_type.DatasetID = self.__session.query(sq.Datasets).filter(\n sq.Datasets.DatasetAcronym == row[3].value\n ).first().DatasetID\n\n if row[4].value:\n obj_type.ObjectTypeCVID = self.__session.query(sq.ObjectTypesCV).filter(\n sq.ObjectTypesCV.ObjectTypeCV == row[4].value\n ).first().ObjectTypeCVID\n\n obj_type.MapColor = row[5].value\n obj_type.MapSymbol = row[6].value\n\n if row[7].value:\n obj_type.ObjectCategoryID = self.__session.query(sq.ObjectCategory).filter(\n sq.ObjectCategory.ObjectCategoryName == row[7].value\n ).first().ObjectCategoryID\n\n obj_type.Description = row[8].value\n self.push_data(obj_type)\n\n elif sheet_name == sheet_names[4]:\n attrib = sq.Attributes()\n attrib.AttributeName = row[0].value\n attrib.ObjectTypeID = self.__session.query(sq.ObjectTypes).filter(\n sq.ObjectTypes.ObjectCode == row[1].value\n ).first().ObjectTypeID\n attrib.AttributeCode = row[2].value\n attrib.UnitCVID = self.__session.query(sq.Units).filter(\n sq.Units.UnitNameCV == row[3].value\n ).first().UnitCVID\n attrib.AttributeTypeCVID = self.__session.query(sq.AttributeTypes).filter(\n sq.AttributeTypes.AttributeTypeCV == row[4].value\n ).first().AttributeTypeCVID\n attrib.AttributeNameCVID = self.__session.query(sq.AttributesCV).filter(\n sq.AttributesCV.AttributeNameCV == row[5].value\n ).first().AttributeCVID\n\n attrib.ModelInputOrOutput = row[7].value\n attrib.AttributeDescription = row[8].value\n self.push_data(attrib)\n self.close_db()", "title": "" }, { "docid": "561e5813939a5ec4dfb245451466d860", "score": "0.52925307", "text": "def load_data_table(data_url):\n \n data = open(data_url, \"r\").read()\n data_lines = data.split('\\n')\n print \"Loaded\", len(data_lines), \"data points\"\n data_tokens = [line.split(',') for line in data_lines]\n return [[tokens[0], float(tokens[1]), float(tokens[2]), int(tokens[3]), float(tokens[4])] \n for tokens in data_tokens]", "title": "" }, { "docid": "eee97a46116926072fd911402fa79c79", "score": "0.5292218", "text": "def ingest_sailthru_data_to_redshift(dt=\"2016-04-01\"):\n logging.info(dt)\n\n export = util.load_file_from_s3(\n path=\"export/sailthru_data-exporter_samples.zip\", bucket=BUCKET\n )\n\n files = util.unzip_files(export)\n\n logging.info(\"Files found: \" + \", \".join(list(files.keys())))\n\n for filename, contents in files.items():\n prefix = filename.split(\".\")[0]\n if prefix == \"blast\":\n process_sailthru_blast_data(filename, contents, dt)\n elif prefix == \"message_blast\":\n process_sailthru_message_blast_data(filename, contents, dt)\n elif prefix == \"message_transactional\":\n process_sailthru_message_transactional_data(filename, contents, dt)\n elif prefix == \"profile\":\n process_sailthru_profile_data(filename, contents, dt)\n else:\n # Warn about mac zip artifacts and unexpected files\n logging.warning(\"Unknown file: \" + filename)\n\n # # Treat this entire process as a single transaction and commit on success.\n # RS.commit()", "title": "" }, { "docid": "1472358fc5e48c6520c34bff04286361", "score": "0.5288588", "text": "def load_pg_data(ds, **kwargs):\n\n # Fetch the data from the rest API for the previous day.\n DOWNTOWN_DASH_ID = \"LADOTDT\"\n token = get_bearer_token()\n yesterday = str(pandas.to_datetime(ds) - timedelta(1))\n logging.info(f\"Fetching DASH data for {yesterday}\")\n r = requests.get(\n f\"https://track-api.syncromatics.com/1/{DOWNTOWN_DASH_ID}\"\n f\"/exports/stop_times.json?start={yesterday}&end={yesterday}\",\n headers={\"Authorization\": f\"Bearer {token}\"},\n )\n time_cols = [\"arrive\", \"depart\", \"scheduled_arrive\", \"scheduled_depart\"]\n df = pandas.read_json(\n r.content,\n convert_dates=time_cols,\n dtype={\n \"run_name\": str,\n \"vehicle_name\": str,\n \"arrive_variance\": float,\n \"depart_variance\": float,\n },\n )\n # The trips may be zero due to holidays or missing data.\n if len(df) == 0:\n logging.info(\"No trips found -- is this a holiday?\")\n return\n\n # Drop unnecesary driver info.\n df = df.drop(columns=[\"driver_first_name\", \"driver_last_name\"])\n\n # Drop null trip ids and make sure they are integers.\n df = df.dropna(subset=[\"trip_id\"])\n df.trip_id = df.trip_id.astype(\"int64\")\n\n # Set the timezone to local time with TZ info\n for col in time_cols:\n df[col] = df[col].dt.tz_localize(\"UTC\").dt.tz_convert(LOCAL_TIMEZONE)\n\n check_columns(dash_trips, df)\n\n # Upload the final dataframe to Postgres. Since pandas timestamps conform to the\n # datetime interface, psycopg can correctly handle the timestamps upon insert.\n logging.info(\"Uploading to PG\")\n engine = PostgresHook.get_hook(POSTGRES_ID).get_sqlalchemy_engine()\n insert = sqlalchemy.dialects.postgresql.insert(dash_trips).on_conflict_do_nothing()\n conn = engine.connect()\n conn.execute(insert, *df.to_dict(orient=\"record\"))", "title": "" }, { "docid": "3928b04dc2ccd79523086cc07765acdd", "score": "0.526913", "text": "def dbloader(csv_data, session, asean, saarc):\n\n # traverse through the list and add rows to table\n for line in csv_data:\n g = \"None\"\n if line[0] in asean:\n g = \"asean\"\n elif line[0] in saarc:\n g = \"saarc\"\n rows = Population(\n country=line[0],\n code=int(line[1]),\n year=int(line[2]),\n population=float(line[3]),\n group=g\n )\n session.add(rows)\n\n # commit the added rows to the database\n session.commit()", "title": "" }, { "docid": "3e9ce79f59e37fdaf2013c25b409e91c", "score": "0.5250011", "text": "def __enter__(self):\n\n # create n_table with point_date, geom, and id columns\n s = self.staging\n e = self.existing\n d = self.dataset\n\n derived_dates = func.cast(s.c[d.date], TIMESTAMP).label('point_date')\n derived_geoms = self._geom_col()\n cols_to_insert = [s.c['hash'], derived_dates, derived_geoms]\n\n # Select the hash and the columns we're deriving from the staging table.\n sel = select(cols_to_insert)\n # And limit our results to records\n # whose hashes aren't already present in the existing table.\n sel = sel.select_from(s.outerjoin(e, s.c['hash'] == e.c['hash'])).\\\n where(e.c['hash'] == None)\n\n # Drop the table first out of healthy paranoia\n self._drop()\n try:\n self.table.create(bind=engine)\n except Exception as e:\n raise PlenarioETLError(repr(e) +\n '\\nCould not create table n_' + d.name)\n\n ins = self.table.insert().from_select(cols_to_insert, sel)\n # Populate it with records from our select statement.\n try:\n engine.execute(ins)\n except Exception as e:\n raise PlenarioETLError(repr(e) + '\\n' + str(sel))\n else:\n # Would be nice to check if we have new records or not right here.\n return self", "title": "" }, { "docid": "d5021224e08c8bcf528ce3f9b51147c1", "score": "0.52477896", "text": "def load(df, db_file_path, table_name):\n # Save the clean dataset into an sqlite database\n engine = create_engine(\"sqlite:///\" + db_file_path)\n df.to_sql(table_name, engine, index=False, if_exists=\"replace\")", "title": "" }, { "docid": "80591ca3f0023f94bf1bb3f82f7ffebb", "score": "0.5244358", "text": "def transform(self, input, hive_instance, hive_metadata, hive_field_metadata, view_dependency):\n all_data = []\n with open(input) as input_file:\n for line in input_file:\n all_data.append(json.loads(line))\n\n dataset_idx = -1\n\n instance_file_writer = FileWriter(hive_instance)\n schema_file_writer = FileWriter(hive_metadata)\n field_file_writer = FileWriter(hive_field_metadata)\n dependency_file_writer = FileWriter(view_dependency)\n\n depends_sql = \"\"\"\n SELECT d.NAME DB_NAME, case when t.TBL_NAME regexp '_[0-9]+_[0-9]+_[0-9]+$'\n then concat(substring(t.TBL_NAME, 1, length(t.TBL_NAME) - length(substring_index(t.TBL_NAME, '_', -3)) - 1),'_{version}')\n else t.TBL_NAME\n end dataset_name,\n concat('/', d.NAME, '/', t.TBL_NAME) object_name,\n case when (d.NAME like '%\\_mp' or d.NAME like '%\\_mp\\_versioned') and d.NAME not like 'dalitest%' and t.TBL_TYPE = 'VIRTUAL_VIEW'\n then 'dalids'\n else 'hive'\n end object_type,\n case when (d.NAME like '%\\_mp' or d.NAME like '%\\_mp\\_versioned') and d.NAME not like 'dalitest%' and t.TBL_TYPE = 'VIRTUAL_VIEW'\n then 'View'\n else\n case when LOCATE('view', LOWER(t.TBL_TYPE)) > 0 then 'View'\n when LOCATE('index', LOWER(t.TBL_TYPE)) > 0 then 'Index'\n else 'Table'\n end\n end object_sub_type,\n case when (d.NAME like '%\\_mp' or d.NAME like '%\\_mp\\_versioned') and t.TBL_TYPE = 'VIRTUAL_VIEW'\n then 'dalids'\n else 'hive'\n end prefix\n FROM TBLS t JOIN DBS d on t.DB_ID = d.DB_ID\n WHERE d.NAME = '{db_name}' and t.TBL_NAME = '{table_name}'\n \"\"\"\n\n # one db info : 'type', 'database', 'tables'\n # one table info : required : 'name' , 'type', 'serializationFormat' ,'createTime', 'DB_ID', 'TBL_ID', 'SD_ID'\n # optional : 'schemaLiteral', 'schemaUrl', 'fieldDelimiter', 'fieldList'\n for one_db_info in all_data:\n i = 0\n for table in one_db_info['tables']:\n i += 1\n schema_json = {}\n prop_json = {} # set the prop json\n\n for prop_name in TableInfo.optional_prop:\n if prop_name in table and table[prop_name] is not None:\n prop_json[prop_name] = table[prop_name]\n\n view_expanded_text = ''\n\n if TableInfo.view_expended_text in prop_json:\n view_expanded_text = prop_json[TableInfo.view_expended_text]\n text = prop_json[TableInfo.view_expended_text].replace('`', '')\t# this will be fixed after switching to Hive AST\n array = []\n try:\n array = HiveViewDependency.getViewDependency(text)\n except:\n self.logger.error(\"HiveViewDependency.getViewDependency(%s) failed!\" % (table['name']))\n\n l = []\n for a in array:\n l.append(a)\n names = str(a).split('.')\n if names and len(names) >= 2:\n db_name = names[0].lower()\n table_name = names[1].lower()\n if db_name and table_name:\n self.curs.execute(depends_sql.format(db_name=db_name, table_name=table_name, version='{version}'))\n rows = self.curs.fetchall()\n self.conn_hms.commit()\n if rows and len(rows) > 0:\n for row_index, row_value in enumerate(rows):\n dependent_record = HiveDependencyInstanceRecord(\n one_db_info['type'],\n table['type'],\n \"/%s/%s\" % (one_db_info['database'], table['name']),\n 'dalids:///' + one_db_info['database'] + '/' + table['dataset_name']\n if one_db_info['type'].lower() == 'dalids'\n else 'hive:///' + one_db_info['database'] + '/' + table['dataset_name'],\n 'depends on',\n 'Y',\n row_value[3],\n row_value[4],\n row_value[2],\n row_value[5] + ':///' + row_value[0] + '/' + row_value[1], '')\n dependency_file_writer.append(dependent_record)\n prop_json['view_depends_on'] = l\n dependency_file_writer.flush()\n\n # process either schema\n flds = {}\n field_detail_list = []\n\n if TableInfo.schema_literal in table and \\\n table[TableInfo.schema_literal] is not None and \\\n table[TableInfo.schema_literal].startswith('{'):\n sort_id = 0\n urn = \"hive:///%s/%s\" % (one_db_info['database'], table['dataset_name'])\n self.logger.info(\"Getting schema literal for: %s\" % (urn))\n try:\n schema_data = json.loads(table[TableInfo.schema_literal])\n schema_json = schema_data\n acp = AvroColumnParser(schema_data, urn = urn)\n result = acp.get_column_list_result()\n field_detail_list += result\n except ValueError:\n self.logger.error(\"Schema Literal JSON error for table: \" + str(table))\n\n elif TableInfo.field_list in table:\n # Convert to avro\n uri = \"hive:///%s/%s\" % (one_db_info['database'], table['dataset_name'])\n if one_db_info['type'].lower() == 'dalids':\n uri = \"dalids:///%s/%s\" % (one_db_info['database'], table['dataset_name'])\n else:\n uri = \"hive:///%s/%s\" % (one_db_info['database'], table['dataset_name'])\n self.logger.info(\"Getting column definition for: %s\" % (uri))\n try:\n hcp = HiveColumnParser(table, urn = uri)\n schema_json = {'fields' : hcp.column_type_dict['fields'], 'type' : 'record', 'name' : table['name'], 'uri' : uri}\n field_detail_list += hcp.column_type_list\n except:\n self.logger.error(\"HiveColumnParser(%s) failed!\" % (uri))\n schema_json = {'fields' : {}, 'type' : 'record', 'name' : table['name'], 'uri' : uri}\n\n if one_db_info['type'].lower() == 'dalids':\n dataset_urn = \"dalids:///%s/%s\" % (one_db_info['database'], table['dataset_name'])\n else:\n dataset_urn = \"hive:///%s/%s\" % (one_db_info['database'], table['dataset_name'])\n\n dataset_instance_record = DatasetInstanceRecord('dalids:///' + one_db_info['database'] + '/' + table['name']\n if one_db_info['type'].lower() == 'dalids'\n else 'hive:///' + one_db_info['database'] + '/' + table['name'],\n 'grid',\n '',\n '',\n '*',\n True,\n table['native_name'],\n table['logical_name'],\n table['version'],\n table['create_time'],\n json.dumps(schema_json),\n json.dumps(view_expanded_text),\n dataset_urn)\n instance_file_writer.append(dataset_instance_record)\n\n if dataset_urn not in self.dataset_dict:\n dataset_scehma_record = DatasetSchemaRecord(table['dataset_name'], json.dumps(schema_json),\n json.dumps(prop_json),\n json.dumps(flds), dataset_urn, 'Hive', one_db_info['type'],\n table['type'], '',\n table.get(TableInfo.create_time),\n (int(table.get(TableInfo.source_modified_time,\"0\"))))\n schema_file_writer.append(dataset_scehma_record)\n\n dataset_idx += 1\n self.dataset_dict[dataset_urn] = dataset_idx\n\n for fields in field_detail_list:\n field_record = DatasetFieldRecord(fields)\n field_file_writer.append(field_record)\n\n instance_file_writer.flush()\n schema_file_writer.flush()\n field_file_writer.flush()\n self.logger.info(\"%20s contains %6d tables\" % (one_db_info['database'], i))\n\n instance_file_writer.close()\n schema_file_writer.close()\n field_file_writer.close()\n dependency_file_writer.close()", "title": "" }, { "docid": "8f519dafb186d831f75e3619b31c2f24", "score": "0.5244037", "text": "def load_etl_ld_cntl(conn,job_run_id,job_nm,tb_nm,comment,ld_status,ins_row_cnt,start_dtm,end_dtm):\r\n\tprint(\"loading processed records into Aurora DB\")\r\n\ttry:\r\n\t\t# print the connection string we will use to connect --> REMOVE BEFORE DEV\r\n\t\tprint(\"conn.status\", conn.status)\r\n\t\tprint(\"conn.server_version\", conn.server_version)\r\n\t\t# conn.cursor will return a cursor object, you can use this cursor to perform queries\r\n\t\tcursor = conn.cursor()\r\n\t\t# insert records\r\n\t\tsql_text =\"INSERT INTO etl.ld_cntrl_tb(job_run_id,job_nm, tb_nm, start_dtm, end_dtm, ins_row_cnt, comment,ld_status) VALUES %s ;\"\r\n\t\t#Generate list\r\n\t\tdata = (job_run_id,job_nm,tb_nm ,start_dtm,end_dtm,ins_row_cnt,comment,ld_status)\r\n\t\tquery = cursor.mogrify(sql_text, [data]) #save the query\r\n\t\tprint(cursor.mogrify(sql_text, [data])) #prints the query as a check\r\n\t\tcursor.execute(query)\t#execute the query\r\n\t\tconn.commit() #confirms changes made to an existing table\r\n\t\tcursor.close() #close db connection\r\n\texcept Exception as e:\r\n\t\tprint(\"load_etl_ld_cntl: {}\".format(e))\r\n\t\traise e", "title": "" }, { "docid": "03b80d4e83e52a9120a6ba312f7b2fa9", "score": "0.52364516", "text": "def save_file_to_table(filename, table, dataset, project, file_format=bigquery.SourceFormat.CSV, max_bad_records=0,\n replace=True, partition=None):\n client = bigquery.Client(project=project)\n\n dataset_ref = client.dataset(dataset)\n table_ref = dataset_ref.table(table)\n job_config = bigquery.LoadJobConfig()\n job_config.max_bad_records = max_bad_records\n job_config.source_format = file_format\n exists_ok = PBQ._writing_disposition(job_config, replace)\n\n if file_format == bigquery.SourceFormat.CSV:\n job_config.skip_leading_rows = 1\n job_config.autodetect = True\n PBQ._create_table(client, exists_ok, partition, replace, table_ref)\n\n if not partition:\n with open(filename, \"rb\") as source_file:\n job = client.load_table_from_file(source_file, table_ref, job_config=job_config)\n\n job.result() # Waits for table load to complete.\n print(\"Loaded {} rows into {}:{}.\".format(job.output_rows, dataset, table))\n else:\n print('fallback loading by CMD command due to missing api feature for partition')\n table = '{0}${1}'.format(table, partition)\n cmd = \"bq load\"\n if replace:\n cmd = \"{} --replace\".format(cmd)\n cmd = \"{cmd} --source_format={file_format} '{project}:{dataset}.{tbl_name}' {filename}\". \\\n format(cmd=cmd, tbl_name=table, filename=filename, project=project, dataset=dataset,\n file_format=file_format)\n os.system(cmd)", "title": "" }, { "docid": "65a4b710098c5c3b763c6905814f884b", "score": "0.5231847", "text": "def import_data(users, agencies, filename):\n if users:\n Users.populate(csv_name=filename)\n elif agencies:\n Agencies.populate(csv_name=filename)", "title": "" }, { "docid": "3a6f01e4db582be1cfb29f20690f7ab2", "score": "0.52291244", "text": "def load_table_seasons(self, df, operation, dictConnParams):\n import psycopg2\n from psycopg2 import Error\n\n try:\n connection = psycopg2.connect(user=dictConnParams['user'],\n password=dictConnParams['password'],\n host=dictConnParams['host'],\n port=dictConnParams['port'],\n database=dictConnParams['database'])\n\n cursor = connection.cursor()\n\n if operation == 'create':\n try:\n create_table_query = '''\n CREATE TABLE \"Tenis\".\"Seasons\"\n (\n \"season_id\" varchar(50) NOT NULL,\n \"season_name\" varchar(50) NOT NULL,\n \"start_date\" date NULL,\n \"end_date\" date NULL,\n \"year\" int NULL,\n \"tournament_id\" varchar(50) NOT NULL,\n \"category_pk\" varchar(50) NOT NULL,\n \"prize_money_amt\" int NULL,\n \"prize_currency\" varchar(50) NULL,\n \"surface\" varchar(50) NULL,\n \"complex\" varchar(50) NULL,\n \"q_competitors\" int NULL,\n \"q_qualified_competitors\" int NULL,\n \"q_scheduled_matches\" int NULL,\n CONSTRAINT \"FK_128\" FOREIGN KEY ( \"tournament_id\" ) REFERENCES \"Tenis\".\"Tournaments\" ( \"tournament_id\" )\n );\n \n CREATE UNIQUE INDEX \"PK_Seasons\" ON \"Tenis\".\"Seasons\"\n (\n \"season_id\"\n );\n \n CREATE INDEX \"fkIdx_128\" ON \"Tenis\".\"Seasons\"\n (\n \"tournament_id\"\n );\n '''\n\n cursor.execute(create_table_query)\n connection.commit()\n logging.info(\"Table Seasons creada satisfactoriamente en PostgreSQL - Tenis\")\n\n except (Exception, psycopg2.DatabaseError) as error:\n logging.error(\"Error al crear en PostgreSQL la tabla Seasons: {}\".format(error))\n raise Exception(\"Error al crear en PostgreSQL la tabla Seasons: {}\".format(error))\n\n elif operation == 'insert':\n try:\n cursor.execute('''SELECT * FROM \"Tenis\".\"Seasons\"''')\n all = cursor.fetchall()\n\n ids_excluir = []\n for id in all:\n # Eliminamos los registros de torneos del 2020 para actualizarlos\n if id[4] == 2020:\n cursor.execute('''DELETE FROM \"Tenis\".\"Seasons\" WHERE season_id = %s''', (id[0],))\n connection.commit()\n # Añadimos los ids de partidos existentes para excluirlos del Load\n ids_excluir.append(id[0])\n\n df = df[~df['season_id'].isin(ids_excluir)].reset_index(drop=True).copy()\n\n postgres_insert_query = \"\"\" INSERT INTO \"Tenis\".\"Seasons\" \n (season_id, season_name, start_date, end_date, year, tournament_id, category_pk,\n prize_money_amt, prize_currency, surface, complex, q_competitors, \n q_qualified_competitors, q_scheduled_matches) \n VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) \"\"\"\n\n if len(df) > 0:\n count = 0\n pct_logueado = 0\n for registro in range(len(df)):\n # Logueamos el avance de la carga\n if (np.round(registro/len(df), 3) in [0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90, 1.00]) & (np.round(registro/len(df) * 100, 0) > pct_logueado):\n logging.info('Avance carga: {} / {} ({}%)'.format(registro, len(df),np.round(registro / len(df) * 100,0)))\n pct_logueado = np.round(registro / len(df) * 100, 0)\n\n # Generamos el registro a insertar\n record_to_insert = (df[df.index == registro]['season_id'].values[0],\n df[df.index == registro]['season_name'].values[0],\n df[df.index == registro]['start_date'].values[0],\n df[df.index == registro]['end_date'].values[0],\n df[df.index == registro]['year'].values[0],\n df[df.index == registro]['tournament_id'].values[0],\n df[df.index == registro]['category_pk'].values[0],\n df[df.index == registro]['prize_money_amt'].values[0],\n df[df.index == registro]['prize_currency'].values[0],\n df[df.index == registro]['surface'].values[0],\n df[df.index == registro]['complex'].values[0],\n df[df.index == registro]['q_competitors'].values[0],\n df[df.index == registro]['q_qualified_competitors'].values[0],\n df[df.index == registro]['q_scheduled_matches'].values[0]\n )\n\n cursor.execute(postgres_insert_query, record_to_insert)\n connection.commit()\n count += 1\n logging.info('Registros insertados satisfactoriamente en Table Seasons: {}'.format(count))\n else:\n logging.info('No hay registros nuevos para agregar')\n\n except (Exception, psycopg2.Error) as error:\n if (connection):\n logging.error(\"Fallo al insertar registro en la Tabla Seasons: {}\".format(error))\n raise Exception(\"Fallo al insertar registro en la Tabla Seasons: {}\".format(error))\n\n elif operation == 'update':\n print('update')\n\n except:\n logging.error('Falló la conexión con PostgreSQL')\n raise Exception('Falló la conexión con PostgreSQL')\n\n finally:\n # closing database connection.\n if (connection):\n cursor.close()\n connection.close()\n print(\"PostgreSQL connection is closed\")", "title": "" }, { "docid": "fcf27483de35984904fa3dcff421b9ad", "score": "0.52288675", "text": "def _check_and_load_tables(tables_, var_name):\n tables = []\n for table_idx, table in enumerate(tables_):\n table = stringify_path(table)\n if isinstance(table, str):\n loaded = _read_events_table(table)\n tables.append(loaded)\n elif isinstance(table, pd.DataFrame):\n tables.append(table)\n elif isinstance(table, np.ndarray):\n pass\n else:\n raise TypeError(\n \"%s can only be a pandas DataFrames or a\"\n \"string. A %s was provided at idx %d\"\n % (var_name, type(table), table_idx)\n )\n return tables", "title": "" }, { "docid": "586a3b4323c7d31a2d90023811f64058", "score": "0.52253515", "text": "def prepare_airport_data():\n\n airport_schema = StructType([\n StructField('ident', StringType()),\n StructField('type', StringType()),\n StructField('name', StringType()),\n StructField('elevation_ft', IntegerType()),\n StructField('continent', StringType()),\n StructField('iso_country', StringType()),\n StructField('iso_region', StringType()),\n StructField('municipality', StringType()),\n StructField('iata_code', StringType()),\n StructField('coordinates', StringType())\n ])\n \n airport_data = AIRPORT_KEY\n df_airport = spark.read.format('csv').options(header=True, delimiter=\",\").schema(airport_schema).load(airport_data)\n \n df_airport_uni = df_airport.dropDuplicates([\"ident\"])\n df_airport_new_columns = df_airport_uni \\\n .withColumn('latitude', split(df_airport_uni['coordinates'], ',').getItem(0)) \\\n .withColumn('longitude', split(df_airport_uni['coordinates'], ',').getItem(1)) \\\n .withColumn('country', lit('United States'))\n df_dropped = df_airport_new_columns.drop('coordinates')\n \n file_name = airport_data.split(\"/\")[-1].split('.')[0]\n df_dropped.write.mode('overwrite').parquet(f\"{PATH_OUT}/dim_{file_name}.parquet\")", "title": "" }, { "docid": "8d0f71fb17fb55cd60c0f8015d393d31", "score": "0.52246755", "text": "def loader(request):\n # Configure TableLoader to use directory containing sample shapefiles.\n root_path = os.path.dirname(__file__)\n data_path = os.path.join(root_path, '../../test_data')\n loader = TableLoader(directory=data_path)\n\n # Recreate PostgreSQL sample schema.\n with loader.database.cursor() as cur:\n cur.execute(\"\"\"\n CREATE EXTENSION IF NOT EXISTS postgis;\n DROP SCHEMA IF EXISTS sample CASCADE;\n CREATE SCHEMA sample;\n \"\"\")\n loader.database.refresh()\n\n # Load all shapefiles in test data directory.\n for filename in os.listdir(data_path):\n file_root, file_ext = os.path.splitext(filename)\n if file_ext.lower() == '.shp':\n shp_path = os.path.join(data_path, filename)\n table_name = 'sample.' + file_root\n loader.load_shp(shp_path, table_name)\n\n # Reproject all non-conforming SRIDs into project SRID.\n conform_srids(loader.srid, schema=loader.tables.sample)\n\n # Tear down sample schema when done.\n def teardown():\n with loader.database.cursor() as cur:\n cur.execute(\"DROP SCHEMA sample CASCADE;\")\n request.addfinalizer(teardown)\n\n return loader", "title": "" }, { "docid": "40be516baae8a2f3532825776721eb8b", "score": "0.52234316", "text": "def load_data(df):\n engine = create_engine('sqlite:///'+database_filename)\n with engine.connect() as con:\n con.execute('drop table if exists disaster')\n df.to_sql('disaster', engine, index=False)\n return", "title": "" }, { "docid": "a6eb1edea9f7b855eb1600cc9823e57c", "score": "0.5221471", "text": "def load(self, data: List[Dict]) -> None:\n if not data:\n return\n\n logging.debug(f\"Loading data to SQLite for {self.table.name}\")\n if self.table.primary_key:\n # We have to use SQLite's Upsert but the default SQLite for python\n # does not yet support the \"ON CONFLICT\" clause for upserting\n # So, we'll follow the slow but stable approach of inserting each\n # row and updating on conflict.\n with self.engine.connect() as connection:\n for row in data:\n try:\n connection.execute(self.table.insert(), row)\n except exc.IntegrityError:\n statement = self.table.update() # .where()\n for primary_key in self.table.primary_key:\n statement = statement.where(\n primary_key == row[primary_key.name]\n )\n\n connection.execute(statement, row)\n else:\n # Just Insert (append) as no conflicts can arise\n with self.engine.connect() as connection:\n connection.execute(self.table.insert(), data)", "title": "" }, { "docid": "78b93a15eba65c1b216668517855014e", "score": "0.52199495", "text": "def get_table(self, table):\n if table not in ct.TABLES:\n raise AttributeError\n\n db_path = os.path.join(self.db_dir, \"%s.db\" % table)\n engine = create_engine(r\"sqlite:///%s\" % db_path)\n\n if os.path.exists(db_path):\n cmd = raw_input(\"Data Base already exists!, still generate data base? (Y/N)\")\n if cmd == \"Y\":\n pass\n else:\n return\n\n for year in self.years:\n for season in range(4, 5):\n print(\"%sget data in %s S%s...\" % (ct.NEW_LINE_CHAR, year, season))\n try:\n table_data = getattr(ts, \"get_%s_data\" % table)(year, season)\n except IOError:\n print(\"No data in %s S%s\" % (year, season))\n continue\n table_data['year'] = year\n table_data['season'] = season\n\n # data in \"debtpaying\" table contain \"--\" characters.\n # Numeric columns in DataFrame couldn't compare with a string.\n table_data[table_data[table_data.columns[table_data.dtypes == \"object\"]] == \"--\"] = np.nan\n\n if table == \"report\":\n # Since distributions in report table contain Chinese characters and Nan.\n # Pandas DataFrame use \"unicode\" type when Chinese characters appear,\n # use \"float\" type when there are no Chinese characters. While sqlite3 save\n # all the distributions as float, this may raise error when loading data\n # from sqlite3 database. So force all distribution data to \"unicode\" here.\n table_data.distrib = table_data.distrib.astype(\"unicode\")\n # print table_data[table_data.code==\"000651\"]\n # tushare has a bug: get duplicate rows of None for some\n # stock, like 601229.\n table_data.drop_duplicates(inplace=True) # subset=[\"code\", \"year\", \"season\"], inplace=True)\n # print table_data[table_data.code==\"000651\"]\n table_data.to_sql(table, engine, if_exists='append')", "title": "" }, { "docid": "0e0b6acff1a1a28a23dc17445005d2a2", "score": "0.52157176", "text": "def bq_load_partition(self, release: StreamRelease, **kwargs):\n if release.first_release:\n # The main table is the same as the partition, so no need to upload the partition as well. Especially\n # because a first release can be relatively big in size.\n raise AirflowSkipException(\"Skipped, because first release\")\n\n for transform_path in release.transform_files:\n if self.batch_load:\n transform_blob = batch_blob_name(release.transform_folder)\n main_table_id, partition_table_id = (self.dag_id, f\"{self.dag_id}_partitions\")\n else:\n transform_blob = blob_name(transform_path)\n main_table_id, partition_table_id = table_ids_from_path(transform_path)\n table_description = self.table_descriptions.get(main_table_id, \"\")\n bq_load_ingestion_partition(\n self.schema_folder,\n release.end_date,\n transform_blob,\n self.dataset_id,\n main_table_id,\n partition_table_id,\n self.source_format,\n self.schema_prefix,\n self.schema_version,\n self.dataset_description,\n table_description=table_description,\n **self.load_bigquery_table_kwargs,\n )\n if self.batch_load:\n return", "title": "" }, { "docid": "e61145afb572c8f0e5574ff9303f67d2", "score": "0.51974016", "text": "def import_data():\n driver_filename = './data/drivers.json'\n shipment_filename = './data/shipments.json'\n # Import drivers\n data = ''\n with open(driver_filename) as _f:\n data = _f.read()\n if data:\n json_data = json_loads(data)\n for driver_id, loc_data in json_data.items():\n coordinates = loc_data['coordinates']\n lat = coordinates['latitude']\n lon = coordinates['longitude']\n #Create and save the Driver object\n kwargs = {\n 'driverId': driver_id,\n 'lat': lat,\n 'lon': lon,\n 'point': Point(lon, lat)\n }\n Driver(**kwargs).save()\n print('saved driver {0}, {1} {2}'.format(driver_id, lat, lon))\n\n # Import shipments\n data = ''\n with open(shipment_filename) as _f:\n data = _f.read()\n if data:\n json_data = json_loads(data)\n for shipment_id, ship_data in json_data.items():\n coordinates = ship_data['coordinates']\n lat = coordinates['latitude']\n lon = coordinates['longitude']\n #Create and save Shipment object\n kwargs = {\n 'shipmentId': shipment_id,\n 'lat': lat,\n 'lon': lon,\n 'point': Point(lon, lat)\n }\n Shipment(**kwargs).save()\n print(\"saved shipment {0} {1} {2}\".format(shipment_id, lat, lon))", "title": "" }, { "docid": "610af6854dbf539150986ee8bceb8f6f", "score": "0.5194224", "text": "def loadData(self):\n\n # Connect to the database\n try:\n if self.connector == 'mysql':\n self._conn = MySQLdb.connect(\n self.server, self.user, self.password, self.db_name)\n self._c = self._conn.cursor()\n elif self.connector == 'sqlalchemy':\n engine_name = ('mysql://' + self.user + ':' + self.password +\n '@' + self.server + '/' + self.db_name)\n print('---- Creating engine {}'.format(engine_name))\n engine = create_engine(engine_name)\n self._conn = engine.connect()\n else:\n # sqlite3\n # sqlite file will be in the root of the project, we read the\n # name from the config file and establish the connection\n db_path = os.path.join(self.directory,\n self.dataset_fname + '.db')\n print(\"---- Connecting to {}\".format(db_path))\n self._conn = sqlite3.connect(db_path)\n self._c = self._conn.cursor()\n self.dbON = True\n except:\n print(\"---- Error connecting to the database\")\n\n try:\n # #####################\n # Create missing tables\n\n # Create all tables that do not exist in the database yet.\n tablenames = self._getTableNames()\n alltables = [self.preds_tablename, self.label_values_tablename,\n self.label_info_tablename, self.history_tablename]\n missing_tables = [t for t in alltables if t not in tablenames]\n\n self._createDBtable(missing_tables)\n\n # ################\n # Load predictions\n sqlQuery = 'SELECT * FROM ' + self.preds_tablename\n df_preds = pd.read_sql(\n sqlQuery, con=self._conn, index_col=self.ref_name)\n\n # ###########\n # Load labels\n\n # Load label metadata\n sqlQuery = 'SELECT * FROM ' + self.label_info_tablename\n df_labelinfo = pd.read_sql(\n sqlQuery, con=self._conn, index_col=self.ref_name)\n # Rename column 'datestr' to 'date':\n df_labelinfo.rename(columns={'datestr': 'date'}, inplace=True)\n # Convert column names into tuples\n df_labelinfo.columns = (\n [('info', c) for c in df_labelinfo.columns])\n\n # Load label values\n sqlQuery = 'SELECT * FROM ' + self.label_values_tablename\n df_labelvalues = pd.read_sql(\n sqlQuery, con=self._conn, index_col=self.ref_name)\n # Convert column names into tuples\n df_labelvalues.columns = (\n [('label', c) for c in df_labelvalues.columns])\n\n # Joing label metadata and label values into a single dataframe\n # df_labels = pd.concat([df_labelinfo, df_labelvalues])\n df_labels = df_labelinfo.join(df_labelvalues)\n\n # Convert tuple column names to multi-index\n df_labels.columns = pd.MultiIndex.from_tuples(\n df_labels.columns)\n\n # ##################\n # Load label history\n sqlQuery = 'SELECT * FROM ' + self.history_tablename\n # Read dataframe. Note that I do not take any reference columns\n labelhistory = pd.read_sql(sqlQuery, con=self._conn)\n # Rename columns datestr to date\n # (this is required because 'date' is a reserved word in sql)\n labelhistory.rename(\n columns={'datestr': 'date', self.ref_name: 'wid'},\n inplace=True)\n\n except Exception as E:\n print('Exception {}'.format(str(E)))\n\n return df_labels, df_preds, labelhistory", "title": "" }, { "docid": "9ef83b77b1bfd482f24887c56864a93e", "score": "0.5186416", "text": "def load_sample_data_into_datasource(schema_only_db):\n logging.getLogger().debug(\"Loading Sample data for tests\")\n\n _pk = schema_only_db.store_taskmanager(\n \"taskmanager1\",\n \"11111111-1111-1111-1111-111111111111\",\n datetime.datetime(2016, 3, 14),\n ) # _pk=1 probably\n header = Header(_pk)\n metadata = Metadata(_pk)\n schema_only_db.insert(_pk, 1, \"my_test_key\", b\"my_test_value\", header, metadata)\n schema_only_db.insert(_pk, 1, \"a_test_key\", b\"a_test_value\", header, metadata)\n\n _pk = schema_only_db.store_taskmanager(\"taskmanager2\", \"22222222-2222-2222-2222-222222222222\") # _pk=2 probably\n header = Header(_pk)\n metadata = Metadata(_pk)\n schema_only_db.insert(_pk, 2, \"other_test_key\", b\"other_test_value\", header, metadata)\n\n # return the connection now that it isn't just the schema\n return schema_only_db", "title": "" }, { "docid": "8912f4ecf0823e2953eb8ebf6d1c45a2", "score": "0.5183488", "text": "def import_in_synthese(import_obj):\n try:\n\n logger.info(\"Importing data in gn_synthese.synthese table\")\n\n IMPORTS_SCHEMA_NAME = blueprint.config[\"IMPORTS_SCHEMA_NAME\"]\n MODULE_CODE = blueprint.config[\"MODULE_CODE\"]\n\n # get table name\n table_name = set_imports_table_name(get_table_name(import_obj.id_import))\n # set total user columns\n selected_cols = get_selected_columns(table_name, import_obj.id_field_mapping)\n added_cols = {\n \"the_geom_4326\": \"gn_the_geom_4326\",\n \"the_geom_local\": \"gn_the_geom_local\",\n \"the_geom_point\": \"gn_the_geom_point\",\n \"id_area_attachment\": \"id_area_attachment\",\n }\n #  add date_max if not provided\n if \"date_max\" not in selected_cols:\n added_cols[\"date_max\"] = \"date_max\"\n total_columns = set_total_columns(\n selected_cols, added_cols, import_obj.id_import, MODULE_CODE\n )\n\n # check if id_source already exists in synthese table\n is_id_source = check_id_source(import_obj.id_import)\n if is_id_source:\n raise GeonatureImportApiError(\n message=\"échec : déjà importé (vérification basée sur l'id_source)\",\n details=\"\",\n status_code=400,\n )\n logger.info(\"INSERT IN t_sources\")\n # insert into t_sources\n insert_into_t_sources(\n IMPORTS_SCHEMA_NAME, table_name, import_obj.id_import, total_columns\n )\n\n logger.info(\"#### Start insert in Synthese\")\n # insert into synthese\n load_data_to_synthese(\n IMPORTS_SCHEMA_NAME, table_name, total_columns, import_obj\n )\n\n logger.info(\"-> Data imported in gn_synthese.synthese table\")\n\n # UPDATE TIMPORTS\n\n logger.info(\"update t_imports on final step\")\n\n date_ext = get_date_ext(\n IMPORTS_SCHEMA_NAME,\n table_name,\n total_columns[\"date_min\"],\n total_columns[\"date_max\"],\n )\n import_obj.import_count = get_n_valid_rows(IMPORTS_SCHEMA_NAME, table_name)\n import_obj.taxa_count = get_n_taxa(\n IMPORTS_SCHEMA_NAME, table_name, total_columns[\"cd_nom\"]\n )\n import_obj.date_min = date_ext[\"date_min\"]\n import_obj.date_max = date_ext[\"date_max\"]\n import_obj.date_end_import = datetime.datetime.now()\n import_obj.is_finished = True\n import_obj.processing = False\n\n logger.info(\"-> t_imports updated on final step\")\n\n DB.session.commit()\n\n mappings = (\n DB.session.query(TMappings)\n .filter(\n TMappings.id_mapping.in_(\n [import_obj.id_content_mapping, import_obj.id_field_mapping]\n )\n )\n .all()\n )\n import_as_dict = import_obj.as_dict()\n import_as_dict[\"mappings\"] = [m.as_dict() for m in mappings]\n\n return import_as_dict\n\n except Exception as e:\n DB.session.rollback()\n logger.error(\"*** SERVER ERROR WHEN IMPORTING DATA IN GN_SYNTHESE.SYNTHESE\")\n logger.exception(e)\n raise GeonatureImportApiError(\n message=str(e), details=\"\",\n )\n finally:\n DB.session.close()", "title": "" }, { "docid": "363a9ddeaacc5f687e9e5dee59ea8663", "score": "0.51826304", "text": "def load_data(first_year=2004):\n delete_tables()\n create_tables()\n\n loader = models.LobbyLoader(int(first_year))\n loader.run()", "title": "" }, { "docid": "5ef827997a2bc2855b39ddf7c88358fe", "score": "0.518077", "text": "def process_temperature_data(spark, temperature_input_data, output_data):\n \n temperature_staging_table = spark.read.csv(temperature_input_data, header='true')\n \n temperature_staging_table = temperature_staging_table.dropna(subset=['AverageTemperature'])\n temperature_staging_table = temperature_staging_table.filter((col(\"dt\") >= \"1990-01-01\") & (col(\"Country\") == 'United States'))\n\n temperature_staging_table = temperature_staging_table.select(F.year('dt').alias('year'),\n F.month('dt').alias('month'),\n F.col('AverageTemperature').alias('average_temperature'), \n F.col('City').alias('city'), \n F.col('Country').alias('country'), \n F.col('Latitude').alias('latitude'), \n F.col('Longitude').alias('longitude')).dropDuplicates() \\\n .groupby(['city', 'country', 'latitude', 'longitude', 'month']).agg(F.mean('average_temperature'))\n\n temperature_staging_table = temperature_staging_table.withColumnRenamed(\"avg(average_temperature)\", \"average_temperature\")\n\n temperature_staging_table = temperature_staging_table.withColumn('temperature_id', F.monotonically_increasing_id())\n \n quality_checks(temperature_staging_table)\n temperature_staging_table.write.parquet(os.path.join(output_data, 'temperature_table.parquet'), mode='overwrite', partitionBy=['country', 'city'])\n\n print('Temperature Dimension Table is completed')", "title": "" } ]
1fc1d869ae81ca45e2e28f8d4f509f65
return iterator to generate the elements of the group
[ { "docid": "7ab081a8bb44eb0d34c426013988cce0", "score": "0.0", "text": "def generate(self, method=\"coset\", af=False):\n if method == \"coset\":\n return self.generate_schreier_sims(af)\n elif method == \"dimino\":\n return self.generate_dimino(af)\n else:\n raise ValueError('there is not this method')", "title": "" } ]
[ { "docid": "02a1f366aad699ebd518e22e4bcd0b92", "score": "0.76486987", "text": "def __iter__(self):\n return self.groupList.__iter__()", "title": "" }, { "docid": "ce6ed2a8992327e0d2ab1bf6140a6a03", "score": "0.70087254", "text": "def __iter__(self):\n \n return itertools.product(*[range(x[0]) for x in self.frame_groups])", "title": "" }, { "docid": "03bf9d058b4120cf8b5fdafea90781e8", "score": "0.6715033", "text": "def iter_grouped(self) -> Iterable[\"ActiveAlarm\"]:\n for a in ActiveAlarm.objects.filter(groups__in=[self.reference]):\n yield a", "title": "" }, { "docid": "7afe80934af7f8bf43ac1aefb6053949", "score": "0.67019165", "text": "def __grouper(self) -> Generator:\n args = [iter(self.frames.to_numpy())] * self.frames_per_chunk\n return itertools.zip_longest(*args, fillvalue=np.nan)", "title": "" }, { "docid": "539b9a092efc4c09bc59aee71501053f", "score": "0.66423535", "text": "def __iter__(self):\n for element in self._treeset:\n yield element", "title": "" }, { "docid": "539b9a092efc4c09bc59aee71501053f", "score": "0.66423535", "text": "def __iter__(self):\n for element in self._treeset:\n yield element", "title": "" }, { "docid": "3f6409d47ec7d3cd8c1c799bf31c0906", "score": "0.66315323", "text": "def group_iterator(symmetric_only=False):\n for row, iso in zip(names, isomorphic):\n if symmetric_only:\n if iso is None:\n continue\n yield load_group(row[0])", "title": "" }, { "docid": "4413609775279b122473d629bc623877", "score": "0.65806264", "text": "def iter_groups(self) -> Iterable[\"ActiveAlarm\"]:\n for a in ActiveAlarm.objects.filter(reference__in=self.groups):\n yield a", "title": "" }, { "docid": "459353e90401c3a9b6a14b8801737ac2", "score": "0.6554567", "text": "def __iter__(self):\n for iset in self._imagesets:\n yield iset", "title": "" }, { "docid": "8bcb92ce20b95b1d7ad4b1898a843ae9", "score": "0.6523349", "text": "def __iter__ (self):\n cursor = self.first()\n while cursor is not None:\n yield cursor.element()\n cursor = self.after(cursor)", "title": "" }, { "docid": "976cb7031df8ddc6a79196a068c76a2e", "score": "0.6484406", "text": "def __iter__(self):\n for node in self.slices[0]:\n yield node", "title": "" }, { "docid": "fa74fefc3e53bb8aa61bcc9784fde55f", "score": "0.64786446", "text": "def iter_grouped(self) -> Iterable[\"ArchivedAlarm\"]:\n if not self.groups:\n return\n for a in ArchivedAlarm.objects.filter(groups__in=self.groups):\n yield a", "title": "" }, { "docid": "213c104eac168178b3c76b7439d5e790", "score": "0.645429", "text": "def __iter__(self):\n p = self.first()\n while p is not None:\n yield p.element()\n p = self.after(p)", "title": "" }, { "docid": "ff18350d91bc3e36aea7d6083a76f97f", "score": "0.6425498", "text": "def groups():", "title": "" }, { "docid": "b08c2c0b57b0416ee25390587bf54a16", "score": "0.64058894", "text": "def __iter__(self):\n for idx in range(3):\n yield self[idx]", "title": "" }, { "docid": "3c716101729b190dc7864d8129efc79c", "score": "0.6395879", "text": "def __iter__(self):\n for span in chain.from_iterable(map(iter, self.children)):\n yield span\n yield self", "title": "" }, { "docid": "6ba30e3a03707e367a0a71e5632e052e", "score": "0.637505", "text": "def __iter__():", "title": "" }, { "docid": "6ba30e3a03707e367a0a71e5632e052e", "score": "0.637505", "text": "def __iter__():", "title": "" }, { "docid": "6ba30e3a03707e367a0a71e5632e052e", "score": "0.637505", "text": "def __iter__():", "title": "" }, { "docid": "6ba30e3a03707e367a0a71e5632e052e", "score": "0.637505", "text": "def __iter__():", "title": "" }, { "docid": "6ba30e3a03707e367a0a71e5632e052e", "score": "0.637505", "text": "def __iter__():", "title": "" }, { "docid": "e4071f94a6ee5a0aa52d6c0ec02b62f7", "score": "0.6331622", "text": "def __iter__(self):\n for x in self.shape:\n yield x", "title": "" }, { "docid": "3052399dc35990959f4d4ae029d05cfb", "score": "0.6323877", "text": "def __iter__(self):\n for thing in self._container:\n yield thing", "title": "" }, { "docid": "87b5cc42dabf4f66bf8612bc7d74e193", "score": "0.6321858", "text": "def itergroups(self, group_size, p=0.0):\n counts, ranks, labels = self.getGrouped(group_size)\n for i in range(len(counts)):\n if ranks is None:\n rank_data = None\n else:\n rank_data = numpy.array(ranks[i])\n \n if labels is None:\n label_data = None\n else:\n label_data = numpy.array(labels[i])\n \n count_data = numpy.array(counts[i])\n\n # two-sided per-line Chebyshev filtering of whole gene counts\n if p > 0.0:\n k = chebyshev_two_tailed(p)\n max_ = count_data.max(axis=1)\n mean_ = count_data.mean()\n stdev_ = count_data.std(ddof=1)\n max_ -= mean_\n max_ /= stdev_\n indices = max_ < k\n count_data = count_data[indices]\n\n yield count_data, rank_data, label_data", "title": "" }, { "docid": "357c86b59ffedf32e65ec8540a3f1e3b", "score": "0.62996864", "text": "def __iter__(self):\n return self.entries()", "title": "" }, { "docid": "a8ae17168252566502b26e3f2d51c196", "score": "0.62935936", "text": "def __iter__(self):\n for o in self._iter:\n yield o", "title": "" }, { "docid": "0b2e8ffe5531826de89364136876d6c3", "score": "0.62790763", "text": "def groupcomplete(self, it, is_complete):\n g = []\n #\n for e in it:\n n = g + [e]\n if is_complete(n):\n yield n\n g = []\n else:\n g = n\n #\n # yield the final incomplete group\n if g:\n yield g", "title": "" }, { "docid": "d47d51decd66d7abe62447233cc97ddc", "score": "0.6272096", "text": "def __iter__(self):\n for it in self.data:\n yield it", "title": "" }, { "docid": "6b846ef15aab9ba1bb60b0d7ee0b4d33", "score": "0.62582", "text": "def __iter__(self):\r\n for match in self.matches:\r\n yield match", "title": "" }, { "docid": "c66b10df1ec554f7857dfa3a15332cf5", "score": "0.6249203", "text": "def iter(self):\n return []", "title": "" }, { "docid": "55435ec9f351d984c07862f78d96a7d8", "score": "0.6247007", "text": "def __iter__(self):\n for p in self.points():\n yield p", "title": "" }, { "docid": "0a8199ed629ade3c60a4afa53f71d786", "score": "0.6245429", "text": "def __iter__(self):\n return(self(x, can_coords = False) for x in xmrange(self.elementary_divisors()) )", "title": "" }, { "docid": "6f92e1a6268a94ac8af2ef2f1b9ab915", "score": "0.6236876", "text": "def groupwise(n,iterable):\r\n grupo = tee(iterable,n)\r\n for i,e in enumerate(grupo):\r\n consume(e,i)\r\n return zip( *grupo )", "title": "" }, { "docid": "8292c24b08ae133d0f4adb2d91f054d3", "score": "0.6213941", "text": "def __iter__(self):\n \n orders = [x.order() for x in self.gens()]\n res = [sum(self.gens()[i]*x[i] for i in range(len(self.gens()))) for x in xmrange(orders)]\n return (x for x in uniq(res))", "title": "" }, { "docid": "c801d46f7859c5bd806f1c576f787479", "score": "0.6189109", "text": "def __iter__(self):\n cur = self.first\n while cur is not None:\n cur2 = cur.next\n yield cur.value[1]\n cur = cur2\n raise StopIteration", "title": "" }, { "docid": "411e9aa34eb5b6164374a75346cd20a5", "score": "0.61855197", "text": "def __iter__(self):\n for i in range(self.getNumSequences()):\n yield self.getSequenceIterator(i)", "title": "" }, { "docid": "123d7734c103277f78e1492883523393", "score": "0.6173249", "text": "def items(self):\n for elem in self._pairs:\n yield elem", "title": "" }, { "docid": "4fd9e12a92ea96e9d24e7867630a7a66", "score": "0.6171555", "text": "def __iter__(self):\n for xCoord in range(self.nXPix):\n for yCoord in range(self.nYPix):\n pixelLabel = self.beamImage[xCoord][yCoord]\n pixelData = self.file.get_node('/' + pixelLabel)\n yield pixelData", "title": "" }, { "docid": "f2abf39959be92e7d9a7f20063f3bf3e", "score": "0.61659205", "text": "def __iter__(self):\n yield from (nd.val for nd in self.nodes())", "title": "" }, { "docid": "495b8bca9708be77cbb02d3f666995d7", "score": "0.6164896", "text": "def __iter__(self):\n for item in self._items:\n yield item", "title": "" }, { "docid": "02aa3f4de1a9ae0cd0bd2671b4baf722", "score": "0.61470205", "text": "def __iter__( self ):\n return self", "title": "" }, { "docid": "c99b7e93cba542fc8d215a595056efef", "score": "0.61342496", "text": "def __iter__(self):\n if self.value != None:\n yield((self.type(), self.value))\n\n for i in self.children:\n\n yield from ((i+key, val) for key, val in self.children[i])", "title": "" }, { "docid": "897dc736e1f10fc7d92e69a8896b8812", "score": "0.6128896", "text": "def __iter__(self):\n for item in (self[i] for i in range(len(self))):\n yield item", "title": "" }, { "docid": "897dc736e1f10fc7d92e69a8896b8812", "score": "0.6128896", "text": "def __iter__(self):\n for item in (self[i] for i in range(len(self))):\n yield item", "title": "" }, { "docid": "3f4ba7b6cf269ac0521cd881f6a9d8c3", "score": "0.6124573", "text": "def __iter__(self):\n yield from self._ordered", "title": "" }, { "docid": "42ab08040898ce3b127113f7b4a45920", "score": "0.61237127", "text": "def __iter__(self):\n\t\tcursor = 0\n\t\twhile cursor < len(self):\n\t\t\tyield self._items[cursor]", "title": "" }, { "docid": "b1ed09276e1e977b90b7ef6d028dfafd", "score": "0.6122613", "text": "def __iter__(self) -> Iterable:\n return iter(self.builder)", "title": "" }, { "docid": "26dccbd02248f853cb233aa8146111eb", "score": "0.6121947", "text": "def _iterate(self):\n return iter(())", "title": "" }, { "docid": "20b97fc4aec429181c21f01e94399999", "score": "0.612022", "text": "def __iter__(self):\r\n\t\treturn self.iter()", "title": "" }, { "docid": "790f1246f11602f93da07df8af9c5bcc", "score": "0.6097875", "text": "def __iter__(self):\n for r in self.results:\n yield r", "title": "" }, { "docid": "ae2163e8012bdaea2d7928688ee56d65", "score": "0.60922146", "text": "def __iter__(self):\n for item in self.__data:\n yield item", "title": "" }, { "docid": "a65a61bb9899a50be12bfbe568980260", "score": "0.60831773", "text": "def __iter__(self) -> Iterator:\n\n yield from self.astuple()", "title": "" }, { "docid": "a65a61bb9899a50be12bfbe568980260", "score": "0.60831773", "text": "def __iter__(self) -> Iterator:\n\n yield from self.astuple()", "title": "" }, { "docid": "a65a61bb9899a50be12bfbe568980260", "score": "0.60831773", "text": "def __iter__(self) -> Iterator:\n\n yield from self.astuple()", "title": "" }, { "docid": "a0a73ec76e2b9db729eb8b78bef014a3", "score": "0.6072035", "text": "def __iter__(self):\n return iter([self.zaehler, self.nenner])", "title": "" }, { "docid": "09fae83597e9b8af90f7a6c681a98446", "score": "0.60692173", "text": "def __iter__(self):\n for idx in np.where(self._mask)[0]:\n yield self._data[idx]", "title": "" }, { "docid": "cee9d3aa7688a4ba1f1d151a54d83c70", "score": "0.6068228", "text": "def __iter__(\n self,\n ) -> Iterator[Item]:\n for _, items in self.blocks:\n yield from items", "title": "" }, { "docid": "f16d62ffa4570d34b843f3ba7a62ee78", "score": "0.60631907", "text": "def __iter__(self):\r\n \r\n #first element in the list\r\n cursor = self.first()\r\n \r\n #iterate the list until trailer or header sentinel is reached\r\n while cursor is not None:\r\n \r\n #freeze cursor at current position\r\n yield cursor.element()\r\n \r\n #move cursor to the next node\r\n cursor = self.after(cursor)", "title": "" }, { "docid": "1050d7f47baa9000cd43cb6ddb068804", "score": "0.6062899", "text": "def group_consequent(iterator, key=None):\n if key is None:\n key = lambda e: e\n\n prev_key = None\n first_run = True\n current_group = []\n for row in iterator:\n current_key = key(row)\n if not first_run and current_key != prev_key:\n yield current_group\n current_group = []\n\n current_group.append(row)\n first_run = False\n prev_key = current_key\n\n if current_group:\n yield current_group", "title": "" }, { "docid": "35ebb3006d275a4a42816b46edf35baf", "score": "0.6051579", "text": "def __iter__(self) -> Iterator[np.ndarray]:\n for i in range(self.len()):\n yield self[i]", "title": "" }, { "docid": "89ecfbf7ae09c088917d2405d07031b2", "score": "0.603609", "text": "def __iter__(self):\n for i in range(len(self)):\n yield self.keys[i]", "title": "" }, { "docid": "ce56b783bac1fbcaee3bf19ad3a8a3b5", "score": "0.60351205", "text": "def __iter__(self):\n return self", "title": "" }, { "docid": "ce56b783bac1fbcaee3bf19ad3a8a3b5", "score": "0.60351205", "text": "def __iter__(self):\n return self", "title": "" }, { "docid": "4186e6900d5e2e7a9c7f2226beebfb87", "score": "0.60340625", "text": "def __iter__(self):\n return self._inner_list.__iter__()", "title": "" }, { "docid": "2e5a029730d23a34f1c3da61967b2ec6", "score": "0.6033499", "text": "def __iter__(self):\n\t\treturn self.set.__iter__()", "title": "" }, { "docid": "cb844e98f9f08374f84423c42c64340f", "score": "0.60302395", "text": "def generate_groups(big_list: List[str]) -> List[str]:\n group = []\n indices = []\n copy_big_list = big_list[:]\n for i,line in enumerate(copy_big_list):\n indices.append(i)\n if line != '\\n':\n group.append(line)\n elif line == '\\n':\n print(f\"group is {group}\")\n yield group\n print(f\"indices are: {indices}\")\n del big_list[min(indices):max(indices)-1]\n indices = [max(indices)]\n group = []\n yield(group)", "title": "" }, { "docid": "7ed5b077e3f10a5dddd8176883e56873", "score": "0.6025625", "text": "def __iter__(self):\n yield self.match\n raise StopIteration", "title": "" }, { "docid": "3941a390ba3d5f50f97452f04c8f937b", "score": "0.60217744", "text": "def group_iter(iterator, n=2):\n accumulator = []\n for item in iterator:\n accumulator.append(item)\n if len(accumulator) == n:\n yield accumulator\n accumulator = []\n\n # Yield what's left\n if len(accumulator) != 0:\n yield accumulator", "title": "" }, { "docid": "0707f58ce0d4cb98271e1bc97c0e9c3f", "score": "0.60207987", "text": "def __iter__(self):\n\n for tokens in self._iter_from_edges(self.start):\n yield tokens", "title": "" }, { "docid": "117938d743aaed528100ca0e9780575a", "score": "0.60197115", "text": "def __iter__(self):\n return iter(self._iter)", "title": "" }, { "docid": "329c2f36360926599aac55d1893e348a", "score": "0.6018578", "text": "def grouper(n, iterable):\n i = iter(iterable)\n piece = list(islice(i, n))\n while piece:\n this_start_time = datetime.now()\n yield np.array(piece)\n piece = list(islice(i, n))\n #print \"grouper timing: {}\\n\".format(str(datetime.now() - this_start_time))", "title": "" }, { "docid": "a635c92abc5558a757bf280aa4fd571f", "score": "0.6018441", "text": "def grouper(iterable, size):\n\n source = iter(iterable)\n\n while True:\n group = islice(source, size)\n yield chain([next(group)], group)", "title": "" }, { "docid": "94d66ab723c69311b668a5de7e85e80a", "score": "0.60177493", "text": "def __iter__(self) -> Iterator[np.ndarray]:\n return iter(self.__elements)", "title": "" }, { "docid": "540279586f868bacf0861b7640a32dfb", "score": "0.6017348", "text": "def __iter__(self):\n\t\treturn self._grid.__iter__()", "title": "" }, { "docid": "8796a887ef5c8442b7ac0b47a14daa04", "score": "0.6015872", "text": "def get_blocks(self):\n for cr, value in self.grid.get_blocks():\n yield add_cr(self.cr, cr), value", "title": "" }, { "docid": "b199383f53ac3e511c477e8cb4d8c9d7", "score": "0.6014158", "text": "def __iter__(self):\n for stab in self.normal_form:\n yield stab", "title": "" }, { "docid": "8a55d4d2e9de271913d924b1fcf7ca79", "score": "0.60063046", "text": "def tagIterator(self):\n for tag in self.tags:\n yield [tag,self.tags[tag]]", "title": "" }, { "docid": "3406ad54c6da304ef5758c0243a0757e", "score": "0.6004267", "text": "def __iter__(self):\n for val in self.data.ravel():\n yield val", "title": "" }, { "docid": "1310a5b98ca6d244250940bf7efe39be", "score": "0.6003605", "text": "def __iter__(self):\n cursor = 0\n while cursor < len(self):\n yield self._items[cursor]\n cursor += 1", "title": "" }, { "docid": "57953ce196bca8142a1c8bf1e270d369", "score": "0.60015494", "text": "def __iter__(self):\n for n in self.nameList:\n yield (n,self.cuts[n])", "title": "" }, { "docid": "ea6d5a628af4b801bd14762348f682b7", "score": "0.5992612", "text": "def __iter__(self):\n\n return self", "title": "" }, { "docid": "04b65d1d35aeb0f1d7ee9a0cee17172b", "score": "0.59889287", "text": "def __iter__(self):\n for item in self._layout:\n yield wrap_layout(prepare_layout(item), self._behavior, allow_other=True)", "title": "" }, { "docid": "b2deb68466ae49f2a896a2f57ed5a187", "score": "0.5986812", "text": "def __iter__(self):\r\n for v in self.results.matches:\r\n yield v", "title": "" }, { "docid": "b9e6ca0f9b8d71f8082249038138cc33", "score": "0.5985573", "text": "def _items(self):\n for i in range(2 ** len(self.measurements)):\n branch = tuple(int(b) for b in np.binary_repr(i, width=len(self.measurements)))\n yield branch, self.processing_fn(*branch)", "title": "" }, { "docid": "acfdb1f6eb7f00244d01436961bfc523", "score": "0.5984099", "text": "def grouper(iterable):\n return izip(chain([0], iterable), iterable)", "title": "" }, { "docid": "563e00b862ecd948f87955fa3a13fa2c", "score": "0.5980808", "text": "def iterGroups(iterable, size):\n iterator = iter(iterable)\n while True:\n val = tuple(iterator.next() for i in xrange(size))\n if len(val) != size:\n break\n yield val", "title": "" }, { "docid": "16ba2adf9981ca232238cd0106a70b48", "score": "0.59802294", "text": "def __iter__(self):\n return self.create_tuple_iterator()", "title": "" }, { "docid": "92a378573aa7107a8ba89ece83e877db", "score": "0.597507", "text": "def __iter__(self):\n return iter(self.shapeList)", "title": "" }, { "docid": "3cadaa3f4919ef9b9c980c8b6cd55007", "score": "0.5975023", "text": "def __iter__(self):\n\n yield from self.children.values()", "title": "" }, { "docid": "a46b676fdb01ef7b4ff55e3a56dd11bb", "score": "0.5974077", "text": "def __iter__(self):\n yield self.match\n raise StopIteration", "title": "" }, { "docid": "a46b676fdb01ef7b4ff55e3a56dd11bb", "score": "0.5974077", "text": "def __iter__(self):\n yield self.match\n raise StopIteration", "title": "" }, { "docid": "a46b676fdb01ef7b4ff55e3a56dd11bb", "score": "0.5974077", "text": "def __iter__(self):\n yield self.match\n raise StopIteration", "title": "" }, { "docid": "a46b676fdb01ef7b4ff55e3a56dd11bb", "score": "0.5974077", "text": "def __iter__(self):\n yield self.match\n raise StopIteration", "title": "" }, { "docid": "a46b676fdb01ef7b4ff55e3a56dd11bb", "score": "0.5974077", "text": "def __iter__(self):\n yield self.match\n raise StopIteration", "title": "" }, { "docid": "a46b676fdb01ef7b4ff55e3a56dd11bb", "score": "0.5974077", "text": "def __iter__(self):\n yield self.match\n raise StopIteration", "title": "" }, { "docid": "a46b676fdb01ef7b4ff55e3a56dd11bb", "score": "0.5974077", "text": "def __iter__(self):\n yield self.match\n raise StopIteration", "title": "" }, { "docid": "a46b676fdb01ef7b4ff55e3a56dd11bb", "score": "0.5974077", "text": "def __iter__(self):\n yield self.match\n raise StopIteration", "title": "" }, { "docid": "a46b676fdb01ef7b4ff55e3a56dd11bb", "score": "0.5974077", "text": "def __iter__(self):\n yield self.match\n raise StopIteration", "title": "" }, { "docid": "a46b676fdb01ef7b4ff55e3a56dd11bb", "score": "0.5974077", "text": "def __iter__(self):\n yield self.match\n raise StopIteration", "title": "" }, { "docid": "a46b676fdb01ef7b4ff55e3a56dd11bb", "score": "0.5974077", "text": "def __iter__(self):\n yield self.match\n raise StopIteration", "title": "" }, { "docid": "c30d3a8860be9dd739cd03d824038594", "score": "0.5968542", "text": "def __iter__(self):\r\n yield self.match\r\n raise StopIteration", "title": "" } ]
b3fbf1972df5153ec19b17fc6ed496a2
Takes a dictionary of markov chains and returns random text based off an original text.
[ { "docid": "25ac592f4bcde749dbd32a31581f83f6", "score": "0.5955309", "text": "def make_text(chains):\n\n\n tupled_list = lyrics.items()\n print lyrics\n #get random dictionary key pair\n dict_length = len(tupled_list)\n print dict_length\n starter_keypair = random.randint(0,dict_length)\n starter_tuple = tupled_list[starter_keypair]\n #print starter_tuple\n i = 0\n starter_key1 = starter_tuple[0][0]\n #print starter_key1\n starter_key2 = starter_tuple[0][1]\n #print starter_key2\n\n new_lyric = [starter_key1, starter_key2]\n\n while i < 5: #number of words\n \n #key - tuple(0)\n #lyric_key = key[1]\n #key - tuple(1)\n\n #get random value\n \n num_value = len(lyrics[starter_key1, starter_key2]) - 1\n random_value = random.randint(0, num_value)\n lyric_value = lyrics[starter_key1, starter_key2][random_value]\n #find the next value based on lyric_key,lyric_value tuple\n #nextword = lyrics[(lyric_key,lyric_value)][1]\n #print starter_key1, starter_key2, lyric_value\n new_lyric.append(lyric_value)\n #new_starter = key[1], lyric_value\n i += 1\n starter_key1 = starter_key2\n starter_key2 = lyric_value\n new_lyric_txt = \" \".join(new_lyric)\n print new_lyric_txt", "title": "" } ]
[ { "docid": "53868debcc9e7ba350786bd26bd2e235", "score": "0.74235207", "text": "def make_text(chain_dict):\n \n #initialize empty list and make random choice from keys in chain_dict\n text_block = []\n start_phrase = random.choice(chain_dict.keys()) \n\n #assign variable key to start of random text and add to text_block list\n key = (start_phrase[0],start_phrase[1])\n text_block.extend([start_phrase[0],start_phrase[1]])\n\n #step through key value pairs to randomly pick next value \n while key in chain_dict:\n add_phrase = random.choice(chain_dict.get(key)) # chooses random value from key value list\n text_block.extend([add_phrase])\n key = (key[1],add_phrase) # makes a new tuple key from start phrase and add phrase\n \n return text_block", "title": "" }, { "docid": "eaed63006715aab1f2d888eb9c6125a2", "score": "0.7367819", "text": "def make_text(chains):\n\n # TODO: the last word in our string does not print i.e. \"sam I\" instead of \"sam i am\"\n # welp\n \n text = \"\"\n text_list = []\n rand_key = random.choice(chains.keys()) #generating a random key from dictionary \n while rand_key in chains: \n first, second = rand_key #unpack random key from above\n next_word = random.choice(chains[rand_key]) #binding next word to random word in list associated with random index\n rand_key = (second, next_word) #binding rand key to a tuple - second word from random index, next word from list\n text_list.append(next_word)\n\n text = ' '.join(text_list)\n print text", "title": "" }, { "docid": "9f74bce54a908fbb72b1c5abfcd898a2", "score": "0.7228948", "text": "def random_text(markov,n):\n prefix = random.choice(markov.items())[0]\n random_string = prefix # Initial value\n for i in range(n):\n try:\n suffix = random.choice(markov[prefix])\n random_string += \" \" + suffix\n prefix = shift(prefix, suffix)\n except KeyError:\n # New prefix generated by adding suffix doesn't exist as prefix in markov dict\n prefix = random.choice(markov.items())[0]\n return random_string", "title": "" }, { "docid": "8ef65beeedd9321b5fb94da426ac7a5b", "score": "0.70783037", "text": "def make_text(chains):\n # create an empty list that will house the end product\n # to start, get a random key from dictionary(chains), and add it to the empty list as the first two items in the list (first word in tuple as first item in list, second word in tuple as second item in list)\n # \n # if the key has multiple values, get a random item from the list of values\n # print as list\n # then take the last two items in the list as a key, then print the value\n return \"Here's some random text.\"", "title": "" }, { "docid": "0822b8451328f1af1ea4d0c880a4fbfc", "score": "0.7075093", "text": "def make_text(chains):\n \n #Start with a random bi-gram\n bi_gram = random.choice(chains.keys())\n \n #Start output_string with our first bi-gram\n output_string = starter_string + \" \".join(bi_gram)\n\n #Continue generating random text, concatonating to output_string, and creating new bi-grams as we go\n while True:\n if chains[bi_gram] != []:\n #if bi_gram in chains and chains[bi_gram] != []:\n new_word = random.choice(chains[bi_gram])\n output_string += \" \" + new_word\n bi_gram = (bi_gram[1], new_word)\n else:\n break\n\n return output_string", "title": "" }, { "docid": "01a0dac190ad34cf4922409895af6539", "score": "0.69882554", "text": "def make_text(chains, n):\n words = []\n current_gram = random.choice(chains.keys())\n\n while current_gram[0].lower() == current_gram[0]:\n current_gram = random.choice(chains.keys())\n \n words.extend(current_gram)\n while current_gram in chains:\n next_word = random.choice(chains[current_gram])\n words.append(next_word)\n current_gram = tuple(words[-n:])\n\n # print words\n\n return \" \".join(words)", "title": "" }, { "docid": "67da537fa4e19a1f1cde2df2c7d96f8e", "score": "0.69387275", "text": "def make_text(chains):\n\n statement = True\n\n while statement == True:\n key_names = choice(list(chains.keys()))\n\n words = [key_names[0], key_names[1]]\n\n if key_names[0].istitle():\n random_value = choice(chains[key_names])\n statement = False\n\n else:\n statement = True\n\n while random_value is not None:\n key_names = (key_names[1], random_value)\n words.append(random_value)\n random_value = choice(chains[key_names])\n \n return \" \".join(words)", "title": "" }, { "docid": "33ce856ed74dc53d9b1d32f50404c8da", "score": "0.6902121", "text": "def make_text(chains):\n\n #string to be used at very end\n text = \" \" \n\n #create empty list \"sentence\"\n sentence = [] \n #saves the inputted dictionary as a variable\n dict_of_words = chains\n #new dictionary to store all keys and values where the key has a capital letter. \n dict_of_capitals = {}\n #new list to put all the keys that will go into dict_of_capitals.\n list_of_keys = []\n #Get list of keys from dict_of_words to sort through. \n list_of_keys = dict_of_words.keys() \n #Iterating through the list of tuples\n for key in list_of_keys:\n #Assigning the first letter of the first word in the tuple to a variable. \n first_letter = key[0][0]\n #Check to see if that letter is capital. If it is, add the key and value to dict_of_capitals. \n if first_letter.isupper() == True:\n value = dict_of_words[key]\n dict_of_capitals[key] = value\n \n #randomly selected a key in the capitals dictionary\n first_tuple = choice(dict_of_capitals.keys())\n\n #add tuple[0] to our sentence\n sentence.append(first_tuple[0])\n #add tuple[1] to our sentence\n sentence.append(first_tuple[1])\n \n #randomly pick an item in the value's list\n random_third_word = choice(dict_of_words[first_tuple]) \n\n #add the new string to \"sentence\"\n sentence.append(random_third_word) \n \n\n while True:\n #Grabs last two items of sentence and creating new tuple to become our key\n end_tuple = tuple([sentence[-2],sentence[-1]]) \n\n #If the key is not in our dictionary, the loop breaks.\n if end_tuple not in dict_of_words: \n break\n #Otherwise, we randomly select another value from the list of values of \n #the key (random_tuple)\n else:\n #randomly pick an item in the value's list \n third_word = choice(dict_of_words[end_tuple]) \n #Add the third_word to the sentence list. \n sentence.append(third_word) \n\n #joining the sentence list together to create one string\n return text.join(sentence)", "title": "" }, { "docid": "2a21c8fd27c2b00cdf6aa27e4e643a88", "score": "0.6891029", "text": "def make_text(chains):\n # print(chains[0])\n open_file = open_and_read_file(input_path)\n string_of_file = open_file.split()\n print(string_of_file)\n words = []\n # while\n random_key = random.choice(list(chains.keys())) # Tuple key; first key\n random_key_word = random.choice(\n random_key\n ) # string pulled from tuple to be added to final text\n # words.append(random_key_word)\n # print(random_key)\n # print(random_key_word)\n\n random_value = random.choice(chains[random_key]) # Value from the tuple key\n # print(random_value)\n words.append(random_value)\n\n for key, value in chains.items():\n # print(key[1])\n if key != random_key:\n words.append(random.choice(list(key)))\n # words.append(random.choice(value))\n\n if key[1] == random_value:\n words.append(random.choice(value))\n\n # for key, value in chains.items():\n\n # print(words) # CHECKING IF IN LIST\n # your code goes here\n # link is a tuple/ key form our dictionary\n # the link also includes a random word from the value of that key (random single value)\n # Iterate over\n\n # for key, value in chains.items():\n # print(key, value)\n # first_key = random.choice(chains.keys())\n # first_value = random.choice(chains[key])\n # print(first_key)\n # words.append(first_key)\n # words.append(first_value)\n # break\n # print(first_key)\n # print(words)\n\n return \" \".join(words)", "title": "" }, { "docid": "60d3eddbe704326ddfbba8881c931fcc", "score": "0.6880405", "text": "def make_text(chains):\n\n words = [choice(chains.keys())]\n\n # your code goes here\n\n # for loop through chains dictionary\n # for tuple_key, text_list in chains.items():\n # #tuple_key = list(tuple_key)\n # tuple_key = tuple_key[1]\n # random_word = choice(text_list)\n # words.append(tuple_key)\n # words.append(random_word)\n\n\n\n random_word = choice(chains[tuple_key])\n new_key = (tuple_key[1], random_word)\n\n\n # words.append(tuple_key)\n # words.append(random_word)\n\n\n\n\n return \" \".join(words)", "title": "" }, { "docid": "196af2cfa09eaf073b80803ac4e2457b", "score": "0.68800515", "text": "def make_text(chains):\n\n words = []\n\n #Start with a random key from dictionary. Remember, this key is a tuple.\n chains_key = choice(list(chains))\n \n #Append the two words that compose the tuple to the list.\n words.append(str(chains_key[0]))\n words.append(str(chains_key[1]))\n \n #This while loop will run as long as the key is in the dictionary.\n while chains_key in chains.keys():\n \n #Get a random word from the list assigned to chains_key\n random_choice_from_key = choice(chains[chains_key])\n\n #Append that random word to the word list \n words.append(random_choice_from_key)\n\n #Now, make a new chains key that is composed of the last two \n #list items in the word list \n chains_key = (words[-2], words[-1])\n\n return \" \".join(words)", "title": "" }, { "docid": "2ad19388dfa239fc023cb2d725bccc3d", "score": "0.6815278", "text": "def generate_text(chains):\n\n output_text = \"\"\n\n # make key list and then choose the first key randomly\n keys = chains.keys()\n new_key = choice(keys)\n\n # add the first tuple key to the output_text\n output_text = output_text + new_key[0] + \" \" + new_key[1] + \" \"\n \n # continues to add keys until text is too long or the key isn't in dict\n \n # while len(output_text) < 130:\n while True:\n value = chains.get(new_key, None)\n if value:\n random_value = choice(value)\n else:\n break\n new_output_text = output_text + random_value+\" \"\n if len(new_output_text) < 140:\n output_text = new_output_text\n new_key = tuple([new_key[1], random_value])\n else:\n break\n return output_text", "title": "" }, { "docid": "e07261b105f0f7e2b58e6cb9715a4eff", "score": "0.67852384", "text": "def make_text(chains):\n\n start_tuples = make_start_tuples(chains)\n\n word_tuple = random.choice(start_tuples)\n text = list(word_tuple)\n\n while word_tuple in chains:\n\n next_word = np.random.choice(chains[word_tuple]['choices'], p=chains[word_tuple]['probabilities'])\n # print word_tuple, next_word\n text.append(next_word)\n\n word_tuple = word_tuple[1:] + (next_word,)\n\n if (\".\" in next_word or \"?\" in next_word or \"!\" in next_word) and len(text) > 25:\n break\n\n if word_tuple not in chains and len(text) < 3:\n word_tuple = random.choice(start_tuples)\n text.extend(word_tuple)\n\n return \" \".join(text)", "title": "" }, { "docid": "d2044bd8053dd1f55f040d8da4941d66", "score": "0.67459494", "text": "def make_text(chains):\n\n words = []\n\n # your code goes here\n # pull random word from dictionary, add to list\n words.extend(choice(chains.keys()))\n # access tuple, find value (rand choose), add to list\n\n while True:\n # key = (words[-2], words[-1]))\n # possible_next = chains[key]\n # next_word = chosie(possible_next)\n next_word = choice(chains[(words[-2], words[-1])])\n if next_word is None:\n break\n elif (len(\" \".join(words) + \" \" + next_word) > 140):\n break\n # and (\n # words[-1][-1] == \".\" or\n # words[-1][-1] == \"?\" or\n # words[-1][-1] == \"!\" or\n # words[-1][-1] == \"\\\"\" or\n # words[-1][-1] == \")\" or\n # words[-1][-1] == \"*\")):\n else:\n words.append(next_word)\n return \" \".join(words)\n # make (y, z)\n # search dictionary for y, z\n # repeat until reach none.\n # change list into string", "title": "" }, { "docid": "0e7b0fd393b710fd71e1c62ecab3d1e2", "score": "0.6715437", "text": "def make_text(chains):\n\n words = []\n words.extend(choice(chains.keys()))\n\n while True:\n bigram = (words[-2:])\n\n try:\n next_word = choice(chains[bigram])\n except KeyError:\n break\n\n words.append(next_word)\n\n return \" \".join(words)", "title": "" }, { "docid": "3f8e6c16726e3f539d9b7be91cb07b5b", "score": "0.6664983", "text": "def make_text(chains):\n # import random generator\n # have random generator pick a starting point\n # random generator will pick a random tuple\n # get random item in the list associated with the random tuple\n # shift the key by 1\n # get a random value associated with the new key\n # if the word None appears in the loop, then stop]\n import random\n key_list = chains.keys()\n first_key = random.choice(key_list)\n # print first_key\n random_value = chains.get(first_key)\n # print random_value\n first_value = random.choice(random_value)\n # print first_value\n the_second_word = first_key[1]\n\n new_key = (the_second_word, first_value)\n # print new_key\n\n new_words = \"\"\n while True:\n if chains.get(new_key) == None:\n break\n the_new_list = chains.get(new_key)\n # print \"the new list\", the_new_list\n new_random_value = random.choice(the_new_list)\n # print \"new_value\", new_random_value\n new_key = (new_key[1], new_random_value)\n # print \"new_key\", new_key\n\n new_words += new_random_value + ' '\n \n # your code goes here\n\n return new_words", "title": "" }, { "docid": "06c9c449cf8d9ac352a26c48dca2964c", "score": "0.66189545", "text": "def make_text(chains):\n\n words = []\n # keys = (list(chains.keys()))\n # key_capital = []\n # for key in keys:\n # if key[0][0].isupper():\n # key_capital.append(key)\n # key_start = choice(key_capital)\n key_capital = []\n keys = (list(chains.keys()))\n for key in keys:\n if key[0][0].isupper():\n key_capital.append(key)\n key_start = choice(key_capital)\n\n words.extend([key_start[0], key_start[1]])\n\n key = key_start\n\n while key in chains:\n value_list = chains.get(key)\n rand_value = choice(value_list)\n words.extend([rand_value])\n key = (key[1], rand_value)\n if len(words) > 40 and key[1][-1] in \"?.!\":\n break\n\n\n return \" \".join(words)", "title": "" }, { "docid": "3de37b6336727030d489a2ea98730868", "score": "0.65905106", "text": "def generate_markov_text(self, num_chars):\r\n # pick a random start character from the known characters in frequency table\r\n start_string = rand.choice(list(self.frequency_table.keys())) \r\n markov_text = \"\"\r\n\r\n current_char = start_string\r\n for i in range(num_chars):\r\n next_char = self.get_next_char(current_char)\r\n markov_text += next_char\r\n current_char = next_char\r\n\r\n return markov_text", "title": "" }, { "docid": "9642b4ea87be7dbe6dd3cd2859838d66", "score": "0.6458063", "text": "def make_text(chains, max_length):\n\n # grab a random key from the chains dict, this is a tuple. Could be the last key added to the dictionary, which would end the random_text_list before it reach the specified max_length\n seed = random.choice(chains.keys())\n\n # convert the tuple into a list and add it to the list random_text_list\n random_text_list = []\n random_text_list += list(seed)\n\n text_string = ' '.join(random_text_list)\n\n # while the random_text_list is not yet the max length specified...\n while len(text_string) < max_length:\n # to deal with if the key chosen is the last set, because then the value would be the last word in the text and likely can't be used to make a new key. If attempted new key is the last two words & has no value, choose a random new key to restart instead.\n choices = None\n while not choices:\n choices = chains.get(seed)\n if not choices:\n seed = random.choice(chains.keys())\n random_text_list += list(seed)\n\n # choose a random value from the list of values in the dictionary for the seed key\n next = random.choice(chains[seed])\n # append that new value (word) to the random_text_list\n random_text_list.append(next)\n # set new seed key based on previous seed, starting from the 2nd item to the end of the previous seed, and tack on the just added next word as part of the tuple\n seed = seed[1:] + (next,)\n\n text_string = ' '.join(random_text_list)\n # print \"length of text_string in while loop\", len(text_string)\n\n # this is to strip off the last few words (usually just the last word, but might be last few words in the txt file if the last key added was the choices key)\n while len(text_string) > max_length:\n last_word = random_text_list.pop()\n text_string = text_string.rstrip(last_word)\n text_string = text_string.rstrip(\" \")\n # print \"inside the stripping while loop\"\n\n print \"length of final text_string is\", len(text_string)\n\n return text_string", "title": "" }, { "docid": "ad399dbc383952896c6388af399d0068", "score": "0.64553213", "text": "def make_text(chains):\n\n keys = list(chains.keys())\n key = choice(keys)\n\n words = [key[0], key[1]]\n while key in chains:\n word = choice(chains[key])\n words.append(word)\n key = (key[1], word)\n\n return ' '.join(words)", "title": "" }, { "docid": "312f1df770896bbc6e22f8b26c2e79dd", "score": "0.63528776", "text": "def make_text(chains):\n\n text_list = []\n original_key = choice(chains.keys())\n original_value = choice(chains[original_key])\n text_list.append(original_key[0].title())\n text_list.append(original_key[1])\n text_list.append(original_value)\n\n while (text_list[-2], text_list[-1]) in chains:\n new_key = (text_list[-2], text_list[-1])\n new_value = choice(chains[new_key])\n text_list.append(new_value)\n\n\n return \" \".join(text_list)", "title": "" }, { "docid": "4c0e4b475c06531d5ca70842b247527e", "score": "0.6345328", "text": "def genchain(dictsource, length):\n print \"\\nGenerating Markov chain...\\n\"\n\n t = datetime.datetime.now()\n\n newtext = []\n word = choice(dictsource.keys())\n\n while length > 0:\n word = choice(dictsource[word])\n newtext.append(word)\n length -= 1\n\n print datetime.datetime.now() - t\n\n return \" \".join(newtext)", "title": "" }, { "docid": "3117bfc6c4d77d3a2886e1f3a068ce04", "score": "0.63336325", "text": "def make_text(chains):\n words = []\n\n\n first_line = random.choice(list(chains.keys()))\n\n first_link = random.choice(chains[first_line])\n words.extend(list(first_line))\n words.append(first_link)\n\n print(words)\n\n newkey = words[-2], words[-1]\n\n while \n words.extend(list(first_line))\n\n # first_line = first_line + (chains.values())\n\n # for key, value in chains.items():\n # if words == []:\n # words.extend(list(key))\n # newvalue = random.choice(value)\n # words.append(newvalue)\n # newkey = words[-2], words[-1]\n # key = newkey\n\n # print(words)\n\n # newkey = (key[1], newvalue)\n\n # key = newkey\n\n # ('Would', 'you'): ['could', 'could', 'could', 'could', 'like', 'like']\n # ('you', 'could'): ['you', 'you', 'you', 'you']\n # ('could', 'you'): ['in', 'with', 'in', 'with']\n # ('you', 'in'): ['a', 'a']\n # ('in', 'a'): ['house?', 'box?']\n # ('a', 'house?'): ['Would']\n\n # Would you like you with a house? Would you a mouse? \n # Would you Would you Would you them, eggs and ham? \n # Would you Sam I am?\n\n # Would you could you in a house? Would you a mouse? \n # Would you Would you Would you green eggs and ham? \n # Would you Sam I am?\n\n\n # Would you could you could you could you in you in a in a box? \n # a house? Would house? Would you you with a with a fox? a mouse? \n # Would mouse? Would you a box? Would box? Would you a fox? Would \n # fox? Would you you like them, like green eggs green eggs and eggs \n # and ham? and ham? Would ham? Would you like them, Sam them, Sam I \n # Sam I am?\n\n\n\n\n #pair key[1] with random list value\n #find this new pair in old list of tuple keys\n #repeat \n\n # ([1]) [randlist] random.choice(chains)\n\n\n # while loop:\n # newkey to chains[key]\n\n # key:you | value:could | key:could | value:you | key:you | value:in \n\n # key: you could you in a \n\n # for key in chains:\n # newvalue = random.choice(chains[key])\n # newkey = (key[1], newvalue)\n\n\n \n\n #key: [1] \n #value: random.choice(value)\n\n\n\n \n\n print(\" \".join(words))", "title": "" }, { "docid": "059edd6c46ccfe2eda5822122fe16018", "score": "0.63166463", "text": "def gen_text (lm, seed, nletters=1000):\n for k in lm.keys():\n order = len(k)\n\n if len(seed) < order:\n seed = ' ' * order + seed\n\n history = seed[-order:]\n out = []\n for i in range(nletters):\n if history not in lm:\n if history.lower() in lm:\n history = history.lower()\n break\n def find_suitable_replacement():\n for removed_letters in range (1, order):\n for k, v in lm.items():\n if k[-order+removed_letters:] == history[-order+removed_letters:]:\n return k\n history = find_suitable_replacement()\n c = generate_letter(lm, history, order)\n history = history[-order+1:] + c\n out.append(c);\n return \"\".join(out)", "title": "" }, { "docid": "3674d245f8a113b62adb7e81abae4618", "score": "0.63134927", "text": "def generate(self):\n blobs = [blob for blob in self.text.split('\\n') if blob]\n word = re.escape(random.choice(blobs).split(' ')[0])\n sentence = word\n still_chuggin = True\n while still_chuggin:\n matches = [(m.start(0), m.end(0)) for m in re.finditer(word + \" \", self.text)]\n if matches:\n match = random.choice(matches)\n word = self.text[match[0]:].split(' ')[1]\n if \"\\n\" in word:\n still_chuggin = False\n sentence = sentence + \" \" + word.split('\\n')[0]\n else:\n still_chuggin = False\n\n return sentence.replace('\\\\', '')", "title": "" }, { "docid": "df9582f60805d035a84e81dc2b36e4be", "score": "0.6278908", "text": "def make_text(chains):\n\n text = \"\"\n all_upper_tuples = []\n for a_tuple in chains.keys():\n if a_tuple[0][0].isupper():\n all_upper_tuples.append(a_tuple)\n current_tuple = choice(all_upper_tuples)\n for word in current_tuple:\n text += word + \" \"\n\n end_punctuation = [\".\", \"?\", \"!\"]\n while current_tuple in chains:\n rand_word = choice(chains[current_tuple])\n if len(text) + len(rand_word) < 140:\n text += rand_word + \" \"\n #loop in current_tuple to get the words from 1 to len(current_tuple) and add rand_word\n if len(text) > 8 and text[-2] in end_punctuation:\n return text \n new_tuple = list(current_tuple)\n new_tuple.pop(0)\n new_tuple.append(rand_word)\n current_tuple = tuple(new_tuple)\n\n else:\n return text", "title": "" }, { "docid": "678c32b2241fc96761a858e19678f178", "score": "0.62702984", "text": "def make_text(dict1, dict2):\n \n dictionaries = [dict1, dict2]\n pick_dict = random.choice(dictionaries)\n start = random.choice(pick_dict.keys())\n first_word = start[-1]\n next = random.choice(pick_dict[start])\n result = list(start)\n result.append(next)\n #print \" \".join(result)\n\n while next[-1] != \".\":\n new = tuple([first_word, next])\n pick_dict_again = random.choice(dictionaries)\n\n if new in pick_dict_again:\n next = random.choice(pick_dict_again[new])\n else:\n for dictionary in dictionaries:\n if dictionary != pick_dict_again:\n next = random.choice(dictionary[new])\n result.append(next)\n first_word = result[-2]\n next = result[-1]\n\n\n return \" \".join(result)", "title": "" }, { "docid": "544b907d9ad8d78d2e77f45afd330ea2", "score": "0.6269932", "text": "def first_order_markov_sentence(markov_dictionary):\n words_list = [] # O(1)\n # Choose start word at random from markov chain keys\n start_word = random.choice(list(markov_dictionary.keys()))\n words_list.append(start_word)\n\n last_word = start_word\n for word in range(0, 10):\n # get historgram of all words following start word\n histogram = markov_dictionary[last_word]\n # use histogram to sample next word\n next_word = sample_by_frequency(histogram)\n # add new random word into words list\n words_list.append(next_word)\n # reassign last word so when loop starts again, you start from new word\n last_word = next_word\n\n random_sentence = ' '.join(words_list) + '.'\n return random_sentence", "title": "" }, { "docid": "c22d31b6eed8304aeb683be3bd2e29e1", "score": "0.6259059", "text": "def print_mimic(mimic_dict, word):\n initial = word\n dct = mimic_dict\n phrase = []\n while len(phrase) <= 200:\n phrase.append(initial)\n initial = random.choice(dct[phrase[-1]])\n return ' '.join(phrase)", "title": "" }, { "docid": "eb5ec956f2f9b622a398ab05ce24717d", "score": "0.6184573", "text": "def make_text(chains):\n\n words = [list(chains)[0][0], list(chains)[0][1]]\n\n i = 0\n while '.' not in words[-1]:\n words.append(choice(chains[(words[i], words[i + 1])]))\n i += 1\n\n print(' '.join(words))", "title": "" }, { "docid": "d2003ec1efa431a18fb3dffc098d5f2c", "score": "0.6162602", "text": "def make_text(chains):\n\n key = choice(chains.keys())\n words = [key[0], key[1]]\n while key in chains:\n word = choice(chains[key])\n if len(\" \".join(words)) + len(word) < 140:\n words.append(word)\n key = (key[1], word)\n else:\n break\n\n return \" \".join(words)", "title": "" }, { "docid": "472f84346de60a2dfaafb9d2166a0976", "score": "0.6122254", "text": "def make_text(chains, n):\n\n words = []\n\n # random_key pulls a tuple from the .keys() in dictionary chains and converts to list. .keys() DOES NOT GENERATE A LIST. it generates an ITERABLE.\n random_key = choice(list(chains.keys()))\n # next_word randomly chooses from the values (which is list) associated with random_key\n next_word = choice(chains[random_key])\n\n # initializing counter variable for length of words list\n total_char = len(words)\n\n # while loop to make sure text won't exceed character limit \n while total_char < 250:\n\n len_words = sum(len(i) for i in words)\n spaces = len(words) - 1\n total_char = len_words + spaces\n \n \n # conditional statement to make sure random_key has a next_word available.\n if next_word is not None:\n # redefine random_key to be next key, first make into list so it's mutable then convert back to tuple.\n random_key_list = list(random_key[1:])\n random_key_list.append(next_word)\n random_key = tuple(random_key_list)\n\n # random_key (random_key[1], random_key[2], next_word)\n words.append(next_word)\n # redefine next_word to be from the values associated with new tuple\n next_word = choice(chains[random_key])\n\n # once we hit none, comes out of while loop\n else:\n words.pop()\n break\n # if total_char < 280:\n # break\n # else:\n # words.pop()\n\n print(\"character length is {}\".format(total_char))\n return \" \".join(words)", "title": "" }, { "docid": "18287a0b4a5ab41998881a0720d0f545", "score": "0.6104362", "text": "def make_text(chains: Dict[tuple, List[str]], first_words: Set[str]):\n\n words = []\n\n # We instantiate first word and then keep drawing a new one until an uppercase one is found.\n first_word = \"\"\n while first_word not in first_words:\n first_key = choice(list(chains.keys()))\n first_word = choice(chains[first_key])\n words.append(first_word)\n\n # The rest of the keys are generated based on the first key\n key = generate_key(first_key, first_word)\n\n # As long as we choose a key (word sequence) that is seen\n # in the corpus and thereby registered in the chains dict,\n # we will keep building our text (list of words).\n while key in chains:\n word_to_add = choice(chains[key])\n words.append(word_to_add)\n key = generate_key(key, word_to_add)\n\n # Return one long string by joining all words with a whitespace\n return \" \".join(words)", "title": "" }, { "docid": "78bed293a629697264313d971ba79940", "score": "0.6078891", "text": "def random_text(self):\n return choice(self.texts)", "title": "" }, { "docid": "7011a019738859a1cb04b9424f73dcc1", "score": "0.6074395", "text": "def make_text(chains, n_gram):\n\n words = []\n chosen_tuple = choice(list(chains.keys()))\n\n while chosen_tuple in chains:\n \n chosen_value = chains[chosen_tuple]\n\n last_word = choice(chosen_value)\n \n words.append(last_word)\n\n chosen_last_words = list(chosen_tuple[-n_gram+1:])\n chosen_last_words.append(words[-1])\n\n chosen_tuple = tuple(chosen_last_words)\n\n return \" \".join(words)", "title": "" }, { "docid": "bbdd918282669a08478919bc9aa85c00", "score": "0.6030232", "text": "def generate(self, limit = 30):\n seed = random.choice(self.markov_chain.keys())\n sentence = []\n for word in seed:\n sentence.append(word)\n sentence[0] = sentence[0].capitalize()\n count = self.order\n while count < limit:\n next_word = random.choice(self.markov_chain.get(seed))\n sentence.append(next_word)\n next_seed = tuple(sentence[-self.order:])\n if next_seed not in self.markov_chain:\n sentence = self._full_stop(sentence)\n seed = random.choice(self.markov_chain.keys())\n for word in seed:\n sentence.append(word)\n else:\n seed = next_seed\n count += 1\n sentence = \" \".join(self._full_stop(sentence))\n return sentence.decode('unicode_escape').encode('ascii','ignore')", "title": "" }, { "docid": "a48b9ed1a0599e622bf3528895eeaa0a", "score": "0.59879875", "text": "def generate_random_string(template_dict, key='start'):\n\n data = template_dict.get(key)\n\n #if isinstance(data, list):\n result = random.choice(data)\n #else:\n #result = random.choice(data.values())\n\n for match in token_regex.findall(result):\n word = generate_random_string(template_dict, match) or match\n result = result.replace('{{{0}}}'.format(match), word)\n\n return result", "title": "" }, { "docid": "81b27dc3db3ccd03fcf231f1921217e6", "score": "0.5970394", "text": "def generate_Markov_Chain(self):\n i = 0\n while i < len(self.sample_text):\n if i + self.S >= len(self.sample_text):\n break\n else:\n self.generated_chain.add_to_chain(\n self.sample_text[i : i + self.S],\n self.sample_text[i + self.S])\n i += 1", "title": "" }, { "docid": "2745c22bdf01256b628355a092f44500", "score": "0.59555256", "text": "def sample_markov_path(words, transitions, first=None, m=10, n=1):\n\n def split(a, n):\n \"\"\"\n\n Parameters\n ----------\n a :\n \n n :\n \n\n Returns\n -------\n\n \"\"\"\n\n parts = len(a) // n\n k, m = divmod(len(a), parts)\n return [a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(parts)]\n\n def _sample_at_random(D):\n \"\"\"\n\n Parameters\n ----------\n D :\n \n\n Returns\n -------\n\n \"\"\"\n return random.choices(list(D.keys()), weights=D.values(), k=1)[0]\n\n import random\n if first is None:\n previous = _sample_at_random(words[n])\n else:\n previous = first\n\n N = len(previous) // n\n result = split(previous, N)\n for i in range(m):\n new_sample = _sample_at_random(transitions[n][previous])\n previous = tuple(new_sample)\n N = len(previous) // n\n result += split(new_sample, N)\n\n return result", "title": "" }, { "docid": "233b5473d294bef79db8b62aa011f16d", "score": "0.59406006", "text": "def generate_text(seed, model, max_length=10):\n assert len(seed) < max_length, \\\n \"Max length must be greater than the length of the seed\"\n sentence_finished = False\n\n while (not sentence_finished) and len(seed) <= max_length:\n probs = list(model[tuple(seed[-2:])].values())\n words = list(model[tuple(seed[-2:])].keys())\n seed.append(np.random.choice(words, p=probs))\n if seed[-2:] == ['</s>', '</s>']:\n sentence_finished = True\n return ' '.join([t for t in seed if t not in PADDING])", "title": "" }, { "docid": "cf48446eac5351e5defca8bb8dd5f2f9", "score": "0.59284294", "text": "def markovtalk_learn(text_line):\n text_line = msg_to_array(text_line)\n length = len(text_line)\n order = [TOKEN, ] * ORDER_K\n for i in range(length-1):\n order.insert(0, text_line[i])\n order = order[:ORDER_K]\n next_word = text_line[i+1]\n key = markovchains.setdefault(o2i(order), [])\n if not next_word in key: key.append(mw(next_word))", "title": "" }, { "docid": "8d68ddd928e71ae36a181678051376d2", "score": "0.5848186", "text": "def random_word(h):\n words = h.keys()\n cumulative_sum = []\n for word in words:\n cumulative_sum.append(h[word])\n\n n = cumulative_sum[-1]\n r = random.randint(0,n)", "title": "" }, { "docid": "a42dd8abbcc6211a5309156b59239152", "score": "0.57919097", "text": "def generate_text(inputs, generation_function, seed_txt, index_to_token,\n gen_txt_length=250):\n feed_dict = {inputs['X']: seed_txt}\n gen_txt = []\n for gen_iter in range(gen_txt_length):\n # Get the probability of each character\n gen_chars = generation_function(feed_dict=feed_dict)\n\n # Get the probability for each character for the first sample\n gen_chars = gen_chars[:, -1, 0]\n\n # Due to rounding errors, sum of softmax output could be very slightly above 1\n # This throws an error in np.random.multinomial\n # Scale softmax outputs so they sum up to 1\n gen_chars = gen_chars / (gen_chars.sum() + 1e-6)\n # Sample the next character from the scaled distribution\n pred_char = np.argmax(np.random.multinomial(1, gen_chars, 1))\n\n # Append the sampled char to the seed_txt's first sample\n seed_txt[0, :-1] = seed_txt[0, 1:]\n seed_txt[0, -1] = pred_char\n feed_dict = {inputs['X']: seed_txt}\n\n # Append the sampled character to generated text\n gen_txt.append(pred_char)\n\n # Convert integer index of tokens to actual tokens\n gen_txt = [index_to_token[i] for i in gen_txt]\n return gen_txt", "title": "" }, { "docid": "280a8ace1950172e8cc2131b9043e04c", "score": "0.5788433", "text": "def generate_markov_chain(mc, n_gram=2):\n text = brown.words(categories=[\"lore\", \"romance\", \"humor\"])\n regex = re.compile(\"^([A-Z])\\w+([a-zA-Z]+[-'][a-zA-Z]+)|([a-zA-Z]+\\.)|([a-zA-Z])+$\")\n text = [word for word in text if regex.fullmatch(word)]\n n_grams = nltk.ngrams(text, n_gram)\n ngram_counter = {}\n # Get the frequency of an n-gram in all generated n-grams from text\n for ng in n_grams:\n if ng in ngram_counter.keys():\n ngram_counter[ng] += 1\n else:\n ngram_counter[ng] = 1\n # Create the markov chain for each n-gram\n for ng in ngram_counter:\n current_subtree = mc\n for index in range(len(ng)):\n word = ng[index]\n if current_subtree.get(word):\n current_subtree = current_subtree[word]\n elif index is not len(ng) - 1:\n current_subtree[word] = {}\n current_subtree = current_subtree[word]\n else:\n current_subtree[word] = ngram_counter[ng]", "title": "" }, { "docid": "3a7710ecd260b9e46ae37f68eb65587a", "score": "0.5750511", "text": "def generate_phonetic_text(words, cmu_dict):\n phonetic_text = []\n for word in words:\n if word == ' ' or word == '\\n':\n phonetic_word = word\n else:\n phonetic_word = cmu_dict[word]\n if len(phonetic_word) >= 1:\n phonetic_word = phonetic_word[0]\n else:\n get_closest_phonetic_word(word, cmu_dict)\n for phoneme in phonetic_word:\n phonetic_text.append(phoneme)\n\n return phonetic_text", "title": "" }, { "docid": "7343765b9f983f458f08075c53469b2f", "score": "0.57477695", "text": "def chooseFromDictRandom( dictText, nUseLang = -1 ):\n nUseLang = getLanguageIdx( nUseLang );\n strCodeLang = speech.toLangAbbrev( nUseLang )\n try:\n strTexts = dictText[strCodeLang];\n if( isinstance( strTexts, list ) ):\n aText = strTexts; # alternate form\n else:\n aText = strTexts.split(\"/\");\n return aText[numeric.randomDifferent(0,len(aText)-1)];\n except BaseException, err:\n print( \"ERR: abcdk.translate.chooseFromDictRandom: while accessing word '%s' for lang idx %d: err: %s\" % (str(dictText), nUseLang, err ) );\n return getUndefined( nUseLang );", "title": "" }, { "docid": "e2606a903b4964e83bd99f6d1685a267", "score": "0.5738324", "text": "def generate_sentence(model):\n\n sentence=[]\n #sentences between 2 and 15 words\n length= random.randint(2,15)\n keys=model.keys()\n bigram=random.choice(keys)\n #iterate until sentence is correct length\n for i in range(0,length):\n matches=[]\n found=False\n while not found:\n \n #search in keys for key[0] to match the bigram[1]\n for key in keys:\n regex=re.compile(r\"\\b%s\\b\"%bigram[1])\n result=regex.match(key[0])\n if result:\n matches.append(key)\n found=True\n \n #if no match, choose another bigram to try\n if not found:\n bigram=random.choice(keys)\n \n #add first member of bigram to sentence list \n sentence.append(bigram[1])\n #choose next bigram from the list of matches\n bigram=random.choice(matches)\n \n #combine strings from list\n return \" \".join(sentence)", "title": "" }, { "docid": "0ea43d95f0dc2ad8288f78f6d27af1fc", "score": "0.57323045", "text": "def print_mimic(mimic_dict, word):\n # +++your code here+++\n # LAB(begin solution)\n allwords =\"\"\n for unused_i in range(200):\n allwords = allwords + \" \" + word\n nexts = mimic_dict.get(word) # Returns None if not found\n\n\n if not nexts:\n nexts = mimic_dict[''] # Fallback to '' if not found\n word = random.choice(nexts)\n print(allwords)\n\n\n # The 'unused_' prefix turns off the lint warning about the unused variable.\n # LAB(replace solution)\n # return\n # LAB(end solution)", "title": "" }, { "docid": "dc1702a888c65c28843d2d63b6d877e8", "score": "0.57283133", "text": "def generate_sentence(self, n):\n # Come up with the first word by using a possible word after a period\n current_word = random.choice(self.chain['.'])\n generated_text = current_word\n word_count = 1\n\n # Iterate through the markov chain until you hit a period\n # or the max number of words in a sentence\n while(word_count < n):\n next_word = random.choice(self.chain[current_word])\n if next_word == '.':\n generated_text = generated_text + \".\"\n return generated_text\n else:\n generated_text = generated_text + \" \" + next_word\n current_word = next_word\n word_count += 1\n\n # Make sure each sentence ends with a period\n if generated_text[-1] != '.':\n generated_text = generated_text + '.'\n\n return generated_text", "title": "" }, { "docid": "51a6fb35f5f6257703cc41f25dad98c3", "score": "0.5713863", "text": "def create_trigram(txt_location):\n my_file = \"\"\n if txt_location.startswith(\"https:\") or txt_location.startswith(\"http:\"):\n my_file = str(urllib.request.urlopen(txt_location).read(1000))\n else:\n with open(txt_location) as outputfile:\n my_file = outputfile.read()\n print(\"Original Text:\")\n print(my_file)\n print()\n my_file = clean_txt(my_file)\n txt_list = my_file.split()\n trigrams = []\n i = 0\n while i < len(txt_list) - 2:\n trigrams.append(list((txt_list[i], txt_list[i + 1], txt_list[i + 2])))\n i += 1\n print(\"Create List of Trigrams:\")\n print(trigrams)\n print()\n bigram_list = []\n for item in trigrams:\n bigram_list.append([\"{} {}\".format(item[0], item[1]), item[2]])\n print(\"Create List of Bigrams to Build Dictionary Keys:\")\n print(trigrams)\n print()\n d = defaultdict(list)\n for k, v in bigram_list:\n d[k].append(v)\n starting_pt = random.choice(list(d.keys()))\n print(\"Create a Dictionary of Keys and Values Matched From Text:\")\n print(d.items())\n print()\n my_story = starting_pt + \" \"\n for i in range(10):\n for k, v in d.items():\n if k == starting_pt:\n rand_value = randint(0, len(v) - 1)\n # print(\"keys and values: {}, {}\".format(k, v))\n my_story += v[rand_value] + \" \"\n new_key = k.split(' ', 1)[1]\n # print(\"new_key: \")\n # print(new_key) # the list rearranges\n starting_pt = \"{} {}\".format(new_key, v[rand_value])\n # print(\"new starting point: \" + starting_pt)\n i += 1\n print(\"The New Trigram Story:\")\n print(my_story)", "title": "" }, { "docid": "c0241ad12c3956d645d3fa4b4b554a4a", "score": "0.56853807", "text": "def make_text(self):\n key = choice(self.chains.keys())\n words = [key[0], key[1]]\n num_chars = 0\n while key in self.chains and num_chars < self.max_output_size:\n # Keep looping until we have a key that isn't in the chains\n # (which would mean it was tmax_output_sizehe end of our original text)\n #\n # Note that for long texts (like a full book), this might mean\n # it would run for a very long time.\n word = choice(self.chains[key])\n num_chars += len(word)+ 1 #Add the lenght of the word and the space to the list counting characters\n words.append(word)\n key = (key[1], word)\n\n print \" \".join(words)", "title": "" }, { "docid": "a7aa00b9ea7d5aa3c97053d035aa04a4", "score": "0.56737167", "text": "def genText(size):\n #print(\"genText\")\n global words\n if(not randStart):\n seed = random.randint(0, len(words) - 3)\n seed_word, next_word = words[seed], words[seed + 1]\n a, b = seed_word, next_word\n else:\n seed = random.randint(0, len(startwords) - 3)\n seed_word, next_word = startwords[seed], words[index(startwords[seed]) + 1]\n a, b = seed_word, next_word\n\n gen_words = []\n endPunc = [\".\", \"!\", \"?\", '.\"', \".'\"]\n leng = 0\n while (not b[len(b) - 1] in endPunc or leng <= size):\n gen_words.append(a)\n try:\n a, b = b, random.choice(cache[(a, b)])\n except:\n a, b = b, cache[(a, b)][0]\n leng = len(gen_words)\n gen_words.append(b)\n for i in range(len(gen_words)): # Makes the text look better\n if (\".\" in gen_words[i] and i < len(gen_words) - 1):\n gen_words[i + 1] = gen_words[i + 1][0].upper() + gen_words[i + 1][1:]\n gen_words[0] = gen_words[0][0].upper() + gen_words[0][1:]\n\n return \" \".join(gen_words)", "title": "" }, { "docid": "db6b818e5ec742a38b543c2675f80eb2", "score": "0.56696665", "text": "def generate_words():\n url = requests.get(\"https://raw.githubusercontent.com/sindresorhus/mnemonic-words/master/words.json\")\n if url.status_code == 200:\n words = json.loads(url.text)\n return [choice(words) for n in range(4)]\n return None", "title": "" }, { "docid": "d8eaa676f0fde4803bf43f49ec88a9a3", "score": "0.5663053", "text": "def generate_abab(mc):\n\n # start with a random word that will follow the markov chain\n # traversing the path in the markov chain will reflect the POS sentence structure\n\n # keep track of the last word in the sentence\n # if second A or B, make sure the last word rhymes with the first\n # keep track of the number of syllables\n # might need backtracking to get the right number of syllables\n a1 = generate_sentence(mc)\n a_rhyme = a1[-1]\n b1 = generate_sentence(mc, a_rhyme=a_rhyme)\n b_rhyme = b1[-1]\n a2 = generate_sentence(mc, a_rhyme=a_rhyme, b_rhyme=b_rhyme)\n b2 = generate_sentence(mc, b_rhyme=b_rhyme)\n\n return ' '.join(a1).capitalize(), ' '.join(b1).capitalize(), ' '.join(a2).capitalize(), ' '.join(\n b2).capitalize(),", "title": "" }, { "docid": "f062d5a202266580797572b98d343f3e", "score": "0.5638802", "text": "def make_text(chains):\n\n words = []\n\n # your code goes here\n\n return \" \".join(words)", "title": "" }, { "docid": "0ca42af8afc60ac4a8bf3a5cd8fb1f1e", "score": "0.56342405", "text": "def create_chain_text(tri_dict, num = 60):\r\n trikeylist = list(tri_dict.keys())\r\n starter_pairs = list(filter(starter_pair, trikeylist))\r\n\r\n chain = [choice(starter_pairs)]\r\n\r\n for i in range(num):\r\n selection = choice(tri_dict[chain[-1]])\r\n chain.append((chain[-1][1], selection))\r\n\r\n chain_words = [chain[0][0]]\r\n for x, y in chain:\r\n chain_words.append(y)\r\n if y:\r\n if y[-1] == \".\":\r\n end_word = len(chain_words)\r\n ##finding the last period in the text sample\r\n try:\r\n chain_words = chain_words[:end_word]\r\n except:\r\n pass\r\n\r\n return chain_words", "title": "" }, { "docid": "cb96df2ba80ea318309238f26122d7e3", "score": "0.56281716", "text": "def generate_sentence(mc, start_node=None, num_syllables=10, a_rhyme=None, b_rhyme=None):\n\n # Base case is when the number of syllables is reached\n if num_syllables is 0:\n return []\n\n # Get a random word to start the sentence\n start = random.choice(list(mc)) if start_node is None else start_node\n weights = calculate_weights(mc, start)\n # print(sent_struc) if start_node is None else ()\n redo = True\n chosen_words = [] # words that don't fulfill syllable requirement\n while redo: # keep looping until we find a word that does not exceed the syllable limit and satisfies the other conditions\n # find a random word from the markov chain\n choices = list(mc[start].keys())\n chosen_word = np.random.choice(choices, None, p=weights)\n chosen_word_pos = nltk.pos_tag(nltk.word_tokenize(chosen_word))[0][1]\n\n prev_word_pos = nltk.pos_tag(nltk.word_tokenize(start))[0][1]\n\n # If the word we chose is not in the rejected words list and in mc key\n if chosen_word not in chosen_words and chosen_word in mc.keys():\n # Get remaining number of syllables we need\n chosen_word_syllable = syllables(chosen_word)\n new_num_syllables = num_syllables - chosen_word_syllable\n # if the chosen word makes the total number of syllables > 10 or has the same POS as the previous word,\n # then choose another word\n if new_num_syllables >= 0 and chosen_word_pos is not prev_word_pos:\n redo = False\n\n # Check if we are generating the second sentence of A or B\n if new_num_syllables is 0:\n if a_rhyme is not None and b_rhyme is not None: # Second sentence of A\n chosen_word = get_rhyme_word(mc, a_rhyme, None, chosen_word_syllable)\n if a_rhyme is None and b_rhyme is not None: # Second sentence of B\n chosen_word = get_rhyme_word(mc, None, b_rhyme, chosen_word_syllable)\n # print(\"NEW WORD IS \" + chosen_word)\n chosen_words.append(chosen_word)\n # Case of only having one choice and it not being compatible for the sentence, get a new word to branch off of\n elif chosen_word not in mc.keys() or len(choices) is len(chosen_words):\n start = random.choice(list(mc))\n weights = calculate_weights(mc, start)\n chosen_words = []\n return [chosen_word] + generate_sentence(mc, start_node=chosen_word,\n num_syllables=new_num_syllables, a_rhyme=a_rhyme, b_rhyme=b_rhyme)", "title": "" }, { "docid": "d1ad00700efb4461339a65f40a1dbb3a", "score": "0.5614928", "text": "def make_text(chains):\n\n text = \"\"\n\n # your code goes here\n # end_var = 0\n # words = []\n sentence = []\n # print chains\n\n #this is a temp end point\n while len(text) < 140:\n key = choice(chains.keys())\n # print key\n\n while key[0] != key[0].title():\n key = choice(chains.keys())\n # print key\n #is there a better way to do this?\n sentence = [key[0], key[1]]\n\n value = choice(chains[key])\n\n if value is None:\n checked_length = check_concat_length(text, sentence)\n if checked_length is None:\n return text\n else:\n text = checked_length\n continue\n\n while True:\n sentence.append(value)\n if value[-1] in ['.', '!', '?']:\n checked_length = check_concat_length(text, sentence)\n if checked_length is None:\n return text\n else:\n text = checked_length\n\n key = (key[1], value)\n value = choice(chains[key])\n\n if value is None:\n break\n\n # while end_var < 50 and value[-1] not in ['.', '!', '?']:\n # if value[-1] in ['.', '!', '?']:\n # sentence.append(value)\n\n # value = choice(chains[key])\n\n # #here we want to be dealing with ends of sentences and making sure\n # #we start with beginnigs next time\n\n # #print value\n # words.append(value)\n # key = (key[1], value)\n # end_var += 1\n \n #break\n text = ' '.join(words)\n return text", "title": "" }, { "docid": "67d70b5216204aa24ce84cd3bcdf05e1", "score": "0.55619085", "text": "def markov_word(P, N):\n\n assert P.shape == (N+1,N+1)\n i = random.randint(0,N-1)\n word = string.ascii_lowercase[random.randint(0,N-1)]+string.ascii_lowercase[i]\n\n # Initial state\n v = np.zeros(N + 1)\n v[i] = 1.\n\n while True:\n # Compute transition probabilities for next state.\n v = P.dot(v)\n # Cumulative sums will aid in choosing next state.\n cumv = np.cumsum(v)\n #print cumv\n\n # We choose a random number and look for which state corresponds.\n i = bisect(cumv, np.random.rand())\n\n # Check if transition is nonword ending,\n if i<N:\n word += string.ascii_lowercase[i]\n v = np.zeros(N+1)\n v[i]=1.\n # or word ending.\n elif i == N:\n return word", "title": "" }, { "docid": "ded32595c3d8d2406291590569e470bf", "score": "0.550744", "text": "def sample_text(\n n_chars,\n network,\n vocabulary,\n initial_text=None,\n random_state=None,\n top_k=None,\n verbose=False,\n device=None,\n):\n device = device or torch.device(\"cpu\")\n network.eval()\n initial_text = initial_text or \"\"\n res = initial_text\n h, c = None, None\n\n iterable = range(n_chars)\n if verbose:\n iterable = tqdm.tqdm(iterable)\n\n if random_state is not None:\n np.random.seed(random_state)\n\n for _ in iterable:\n previous_chars = initial_text if res == initial_text else res[-1]\n new_ch, h, c = sample_char(\n network,\n vocabulary,\n h=h,\n c=c,\n previous_chars=previous_chars,\n top_k=top_k,\n device=device,\n )\n res += new_ch\n\n return res", "title": "" }, { "docid": "18be667c61b75923870f0f3f980e3ab1", "score": "0.5501808", "text": "def get_rand_trikey(trigram):\n return random.choice(list(trigram.keys()))", "title": "" }, { "docid": "f9bf571859b805f5d6f7d88c037aa03c", "score": "0.5492372", "text": "def print_mimic(mimic_dict, word):\n # +++your code here+++\n \n a=\"\"\n fname=filelist(filename)\n for x in mimic_dict:\n if word==x:\n a=a+mimic_dict[x][0]\n for x in fname:\n if len(mimic_dict[x])!=0:\n a=a+\" \"+random.choice(mimic_dict[x])\n print a\n return", "title": "" }, { "docid": "e650a445bf47755d39a5df630e8a739d", "score": "0.5490131", "text": "def generate_message(chain, seed=['END'], count=100, verbose_failure=True):\n print('Making markov chain...')\n finalmessage = \"\"\n attempts = 0\n while len(finalmessage) < 15 and attempts < 50:\n if len(seed) > 1:\n seedl = [x.lower() for x in seed]\n message = ' '.join(seedl)\n word1 = seedl[-1]\n else:\n word1 = seed[0]\n if word1 != 'END':\n word1 = word1.lower()\n message = word1\n\n ended = False\n while len(message.split(' ')) < count and not ended:\n if word1 in chain:\n word2 = random.choice(chain[word1])\n word1 = word2\n if word1 != 'END':\n if word1 in ['.',',', '!', '?', ';']:\n message += word2\n else:\n message += ' ' + word2\n count += 1\n else:\n ended = True\n else:\n if verbose_failure:\n return \"%s? that doesn't make any sense\" % word1\n else:\n return None\n\n attempts += 1\n\n finalmessage = message.replace('&&newline', '\\n')\n finalmessage = finalmessage.replace('END', '')\n\n if attempts == 50:\n if verbose_failure:\n return \"that doesn't make any sense at all.\"\n else:\n return None\n else:\n print('Made a markov chain: %s' % finalmessage)\n return finalmessage", "title": "" }, { "docid": "20561fb0302a0bfa700a2e7df0168a8d", "score": "0.5473496", "text": "def generate_text(prompt, pipeline, **kwargs):\n kwargs = kwargs.copy()\n\n # Make answers reproducible only if wanted\n seed = kwargs.pop('seed', None)\n if seed is not None:\n set_seed(seed)\n\n responses = pipeline(prompt, **kwargs)\n return list(map(lambda x: clean_text(x['generated_text'][len(prompt):]), responses))", "title": "" }, { "docid": "2322dcf2e7c5d400c6cfa74d0a64f342", "score": "0.54451394", "text": "def cluelist(sequencelist):\n words = [list(seq.wordset)[0] for seq in sequencelist]\n return [random.choice(vocabwithclues[word]) for word in words]", "title": "" }, { "docid": "c38f1f6769d08d0f09309de96272869a", "score": "0.5421231", "text": "def generate(self, count=30):\n first_word = random.choice(list(self.word_dict.keys())) # first word for our sentence\n # first_word = first_word.capitalize()\n sentence = []\n print(self.word_dict)\n print(\"first_word\", first_word)\n for i in range(count):\n second_word = self.word_dict[first_word]\n next_word = second_word.sample()\n first_word = next_word\n sentence.append(next_word)\n # check for keys that has only 1 key value pair and collect them as a candidate for end word\n end_words = []\n for index in range(len(self.token)-1):\n if len(self.word_dict[self.token[index]]) == 1:\n end_words.append(self.token[index])\n last_word = random.choice(end_words)\n sentence.append(last_word)\n sentence = ' '.join(sentence)\n sentence = sentence.capitalize()\n return sentence + \".\"", "title": "" }, { "docid": "9bea9e42906d5eab7a636fb43b9debe7", "score": "0.5419392", "text": "def build_txt_strand_from_chains(model):\n txt = ''\n for _, chain in model.items():\n txt += ''.join(chain)\n\n return txt", "title": "" }, { "docid": "9ceba285bc9b08a20558b9a1c7059f85", "score": "0.5417729", "text": "def build_text(word_pairs):\n word = random.choice(list(word_pairs.keys()))\n output_text = \"\"\n text_words = list(word)\n\n while len(text_words) < 100:\n pair = tuple(text_words[-2:])\n\n if pair in word_pairs:\n text_words.append(random.choice(word_pairs[pair]))\n else:\n text_words[-1] += \".\"\n alternate_pair = random.choice(list(word_pairs.keys()))\n text_words.extend(list(alternate_pair))\n\n output_text = \"START\\n\\n\" + \" \".join(text_words) + \"! \\\n \\n\\nEND\"\n return output_text", "title": "" }, { "docid": "b1d2fdc42dfb6cc06e81590ad1cea420", "score": "0.5414453", "text": "def make_chains(text_string):\n\n text_string = open_and_read_file(sys.argv[1])\n chains = {}\n i = 0\n\n while i in range(len(text_string) -1):\n bigram = (text_string[i] + ' ' + text_string[i + 1])\n bigram_value = text_string[i+2:i+3]\n chains[bigram] = bigram_value\n i = i + 1\n \n return chains", "title": "" }, { "docid": "59855fdb25f1e0e610f501ce7d8c0b96", "score": "0.54103154", "text": "def generateSentence(nChoices=10):\n\tsentence = [[vocabLines[152].split()[1]]]\n\tsentenceC = [[153]]\n\tx1 = NextFromBigram(153, nChoices)\n\tprob=1\n\tsentenceC.append([x1[1]])\n\tsentence.append([x1[0]])\n\tx = (-1, -1)\n\twhile x[1] != 152:\t\t\t#<\\s> (whose number is 153) is the stop condition\n\t\tx = NextFromTrigram(sentenceC[len(sentenceC) - 2][0], sentenceC[len(sentenceC) - 1][0], nChoices)\n\t\tsentenceC.append([x[1]])\n\t\tsentence.append([x[0]])\n\t\tprob*=x[2]\t\t\t#cumulative prior sample probability\n\n\trandSentence= ' '.join(word[0] for word in sentence)\n\treturn randSentence, round(prob, 7)", "title": "" }, { "docid": "f6b83d23fc79dd0324bb313856823a1e", "score": "0.540492", "text": "def __init__(self, texts):\n self.toxic_offsets = [[i for i, ch in enumerate(text) if random.random()>0.5] for text in texts]", "title": "" }, { "docid": "e602b52f23fc702b20b9bdd54c4e9804", "score": "0.5393141", "text": "def markov(self, length, randomness, text = ''):\n\n\t\t# If text is empty, we have to generate a seed.\n\t\ttries = 0\n\t\twhile not text:\n\t\t\ttext = self.genSeed(randomness)\n\t\t\ttries += 1\n\t\t\tif tries > 100:\n\t\t\t\tprint ('Couldn\\'t produce a seed. Try decreasing the randomness.')\n\t\t\t\texit(1)\n\t\tif len(text) >= length:\n\t\t\treturn self.prettify(text)\n\n\t\telse:\n\t\t\ttext.extend(self.genSeed(randomness))\n\t\t\treturn self.markov(length, randomness, text)", "title": "" }, { "docid": "e267acd8d3a1afe5bb99aa4745d33ee5", "score": "0.53691953", "text": "def random_proverb():\n langs = {'ses': u\"soŋay koyraboro šenni\", 'bm': u\"bamanakan\"}\n p = proverbs[random.randint(0, len(proverbs) - 1)]\n return (langs[p[0]], p[1], p[2])", "title": "" }, { "docid": "988db9e1019fba623b500432dc48e45d", "score": "0.53626263", "text": "def random_word() -> str:\n # Loading from JSON for better performance.\n with open(WORD_LIST) as infile:\n words = tuple(json.loads(infile.read()).keys())\n return random.choice(words)", "title": "" }, { "docid": "3515356109fb0a7781d7d10d36d973bf", "score": "0.5359282", "text": "def generate_text(lm, order, nletters=1000):\n history = \"~\" * order\n out = []\n for i in range(nletters):\n c = generate_letter(lm, history, order)\n history = history[-order:] + c\n out.append(c)\n return \"\".join(out)", "title": "" }, { "docid": "23f5c0c7c9207ce9248481bf5556452d", "score": "0.53554475", "text": "def trans(kana):\n if (random.random() < 0.5):\n return kana\n\n xmin = edge([sum([kana[y][x] for y in range(16)]) for x in range(16)])\n xmax = edge([sum([kana[y][15 - x] for y in range(16)]) for x in range(16)])\n ymin = edge([sum([kana[y][x] for x in range(16)]) for y in range(16)])\n ymax = edge([sum([kana[15 - y][x] for x in range(16)]) for y in range(16)])\n \n dx = random.randint(-xmin, xmax)\n dy = random.randint(-ymin, ymax)\n\n return [[kana[y + dy][x + dx] if 0 <= y + dy < 16 and 0 <= x + dx < 16 else 0 for x in range(16)] for y in range(16)]", "title": "" }, { "docid": "ff2567ce60495bd4903b307635ba9f05", "score": "0.5349105", "text": "def generate_text(trigram, num_words):\n # set the first two words as a random key in trigram\n word_list = list(get_rand_trikey(trigram))\n while len(word_list) < num_words:\n # if key exists add one trigram value to generated text\n if (word_list[-2], word_list[-1]) in trigram:\n potential_words = trigram[(word_list[-2], word_list[-1])]\n word_list.append(potential_words[\n random.randint(0, len(potential_words) - 1)])\n # if a trigram cannot be found select a random key and add the words\n else:\n rand_key = get_rand_trikey(trigram)\n word_list.append(rand_key[0])\n word_list.append(rand_key[1])\n # clear out any extra words that may have been made\n word_list = word_list[:num_words]\n return ' '.join(word_list)", "title": "" }, { "docid": "b7adba63d7f56ef632ce6518dd540d3f", "score": "0.5338673", "text": "def generate_phrase():\n buzz_terms = sample(parts.noun_phrases, 2)\n phrase = ' '.join([\n sample(parts.adjectives),\n buzz_terms[0],\n sample(parts.adverbs),\n sample(parts.verbs),\n buzz_terms[1]\n ])\n return phrase.title()", "title": "" }, { "docid": "803e7c0011e865060073dfd8ab01f36c", "score": "0.5332376", "text": "def build_random_sentence(chain, key_length, msg_len=25, tries=10):\n\n for i in range(tries):\n\n # Get a key from the words that begin a sentence\n words = random.choice(chain[BEGIN]).split(' ')\n sentence = words[0].capitalize()\n if key_length > 1:\n sentence += ' ' + ' '.join(words[1:])\n\n # Generate a maximum of msg_len words for the sentence\n invalid = False\n for i in range(msg_len - key_length):\n try:\n next_word = random.choice(chain[' '.join(words)])\n if next_word == END:\n if test_generated_sentence(sentence.split(' ')):\n return green(sentence)\n else:\n invalid = True\n break\n sentence += ' ' + next_word\n del words[0]\n words.append(next_word)\n except KeyError:\n # Print something to let user know an error occured\n print('t(\\'-\\')t', end='')\n\n # If here, reached msg_len OR invalid sentence\n # Make sure sentence ends with punctuation\n if not invalid:\n if not sentence[-1] in '.?!':\n sentence += random.choice(list('.?!'))\n if test_generated_sentence(sentence.split(' ')):\n return green(sentence)\n\n return red('UNABLE TO GENERATE ORIGINAL SENTENCE')", "title": "" }, { "docid": "476c2fa6e8e13b606c08703c90466557", "score": "0.5318961", "text": "def pass_phrase(args):\n debug(\"Reading dictionary {}\".format(args.dict))\n with open(args.dict) as f:\n words = f.readlines()\n min = args.min if args.min else 4\n max = args.max if args.max else 6\n length = random.randint(min, max)\n debug(\"Length is {}\".format(length))\n s = \" \".join([random.choice(words).strip() for i in xrange(length)])\n return s", "title": "" }, { "docid": "36e82acc43b6eb2dc74d0f5db003e0ce", "score": "0.5316658", "text": "def generate_random_codename(seed):\n random.seed(seed)\n with open('adjectives.txt', 'r') as adjectives_file:\n adjectives = adjectives_file.read()\n adjective = random.choice(adjectives.split('\\n'))\n with open('animals.txt', 'r') as animals_file:\n animals = animals_file.read()\n animal = random.choice(animals.split('\\n'))\n return '{} {}'.format(adjective, animal)", "title": "" }, { "docid": "d2b75abbb388c63df8a56aad822296eb", "score": "0.53145456", "text": "def generateMargChain(self, key_word):\n if not key_word:return None\n base_margs = Markov.select(Markov.q.second_wordID == key_word)\n base_margs = list(base_margs)\n if not base_margs:return None\n\n #base is random for now!!\n base = base_margs[int(random.random()*len(base_margs))]\n # Create the forward chain (keyword -> end)\n first_word = base.first_word.id\n second_word = base.second_word.id\n third_word = base.third_word.id\n \n second_half = [base.second_word.id]\n while third_word !=1 and len(second_half) < 12:\n #Loop until it hits EOF = id(1)\n second_half.append(third_word)\n #swap things forward\n first_word = second_word\n second_word = third_word\n #self.debug('first_word:%s second_word:%s' % (first_word, second_word))\n hits = Markov.select(AND(Markov.q.first_wordID == first_word, Markov.q.second_wordID == second_word))\n hits=list(hits)\n choice = hits[int(random.random()*len(hits))]\n #choice = self.pickBestChoice(hits)\n second_word = choice.second_word.id\n third_word = choice.third_word.id\n # Next the reverese chain keyword -> start\n first_half=[]\n first_word = base.first_word.id\n second_word = base.second_word.id\n while first_word !=1:\n first_half.append(first_word)\n hits = Markov.select(AND(Markov.q.second_wordID == first_word, Markov.q.third_wordID == second_word))\n hits = list(hits)\n choice = hits[int(random.random()*len(hits))]\n first_word = choice.first_word.id\n second_word= choice.second_word.id\n #Merge the halves together\n first_half.reverse()\n first_half.extend(second_half)\n #self.debug('first_half:%s' % first_half)\n return first_half", "title": "" }, { "docid": "8a9d660b35dd80b1e50fe3bbe5d1097b", "score": "0.53050673", "text": "def generator(set_path, output_path):\n random_select(set_path, output_path, 'short_pure.txt')\n random_select(set_path, output_path, 'short_pure_number.txt')\n random_select(set_path, output_path, 'short_pure_punctuation.txt')\n random_select(set_path, output_path, 'short_pure_number_punctuation.txt')", "title": "" }, { "docid": "6d48bc2de401292423b5f54880b26c5f", "score": "0.5301611", "text": "def make_chains(text_string):\n\n split_data = open_and_read_file(input_path).split()\n\n chains = {}\n\n # iterate over the split_data list of words\n # each iteration, we take word1 and word2, which will be a tuple that is\n # a dictionary key\n # the value will be word3\n # 1st iteration:\n # ('Would', 'you') : ['could']\n # ('you', 'could') : ['you']\n\n\n for i in range(0, len(split_data) - 1):\n key = split_data[i], split_data[i+1] #default type is tuple\n\n #if key exists, add to existing value\n # value = chains.get(key, []).append(split_data[i+2])\n # chains[key] = value\n if i == (len(split_data) - 2):\n break\n if key in chains: #checks for duplicates\n value = chains.get(key)\n value.append(split_data[i+2])\n else:\n value = []\n value.append(split_data[i+2])\n chains[key] = value\n \n # for key, value in chains.items():\n # print(f'{key}: {value}')\n\n# Would you could you in a house?\n# Would you could you with a mouse?\n# Would you could you in a box?\n# Would you could you with a fox?\n# Would you like green eggs and ham?\n# Would you like them, Sam I am?\n\n return chains", "title": "" }, { "docid": "40580f142e99de269c15d3e779a5052b", "score": "0.5294502", "text": "def generate_text():\n for i in range (0,30):\n generate_sentence()\n print \" \".join(rihannsu_text) #print the entire paragraph", "title": "" }, { "docid": "b5e8a1bd058b1531e1fb5693acc42dd5", "score": "0.5290841", "text": "def generate_text_from_chain(self):\n text_length = input(\n \"Your desired length of text.\\n\\tType int: \")\n self.generated_text = self.sample_text[0 : self.S]\n for i in range(text_length - self.S):\n self.generated_text += self.generated_chain.\\\n generate_proceeding_character(\n self.generated_text[i : i + self.S])", "title": "" }, { "docid": "f2eb932712f4bd427af0e1a9676b0b88", "score": "0.5287191", "text": "def create_rnd_txt_wm ():\n\n prints = list(string.printable)[0:84]\n #open all of the images from the VOC2008 dataset as jpegs\n\n font_size = np.random.randint(low = 50, high = 350)\n \n #create the watermark font for the image\n font = ImageFont.truetype(\"arial.ttf\", font_size) \n \n #generate image to hold the watermark text object\n img_temp = Image.new('L', (350,350))\n \n #create the watermark text, of random length, using random printable characters\n text_str = np.random.choice(prints, np.random.randint(low=5, high = 14))\n text_str = \"\".join(text_str)\n \n #draw on temporary image with text\n draw_temp = ImageDraw.Draw(img_temp) \n \n #generate a random integer for the opacity argument (fill)\n opac = np.random.randint(low= 70, high=150)\n \n #insert text onto the temporary image\n draw_temp.text((0, 0), text_str, font=font, fill=opac)\n \n #generate a random integer for rotation:\n rot_int = np.random.randint(low = 0, high = 180)\n \n #rotate the text on the temporary image\n rotated_text = img_temp.rotate(rot_int, expand=1)\n \n #generate a random location for the watermark on the image\n #merge the temporary image with text with the image passed in \n #third tuple also needs to be random: controls the location of the img\n return rotated_text", "title": "" }, { "docid": "dd6ae69856997e17329d5edc2394c316", "score": "0.52827543", "text": "def generate_text(model, data, n, nwords=20, k=3):\n x = np.zeros((1,n-1), dtype=int)\n # iword = 0\n iword = random.randint(1, data.nvocab)\n words = []\n for i in range(nwords):\n x = np.roll(x,-1) # flattens array, rotates to left, and reshapes it\n # print(x)\n if len(x[0])>0: # for n=1, x[0] will always be empty\n x[0,-1] = iword # insert new word\n probs = model.predict_proba(x, verbose=0)\n iword_probs = get_best_iword_probs(probs, k)\n iwords = choose_iwords(iword_probs, 1) # choose randomly\n iword = iwords[0]\n try: # in case iword is out of bounds - eg for tiny vocabulary\n word = data.iword_to_word[iword]\n words.append(word)\n except:\n pass\n sentence = ' '.join(words)\n return sentence", "title": "" }, { "docid": "39cc9ef3aa5a6caaa2731fa237c361ff", "score": "0.5275154", "text": "def generate(self):\n\n return \" \".join(self.random.sample(self.words, self.phrase_length))", "title": "" }, { "docid": "d45611397eff552fab385d52e78530b1", "score": "0.52718776", "text": "def create_sentence(corpora, person_name=None):\n if not person_name:\n person_name = list(corpora.keys())[0]\n\n mm = MarkovMaker(corpora, person_name)\n sentence = mm.create_sentence()\n return {\"name\": person_name,\n \"sentence\": sentence}", "title": "" }, { "docid": "55ba10d9bcc6c14fad189969627ee6ce", "score": "0.52537096", "text": "def test_generate_text(text, special_dict, num_words):\n from trigrams import generate_text\n assert len(generate_text(text, special_dict, num_words)) == num_words", "title": "" }, { "docid": "f6cd6b7b8a016554d761d1cf18d0377e", "score": "0.52485406", "text": "def fakedata(phrase, seed=42):\n\n phrase = phrase.split()\n\n phrase_fake_human = phrase\n phrase_fake_machine = phrase\n\n random.seed(seed)\n loss_human = np.random.uniform(0, 0.12) # values were chosen arbitrarily\n random.seed(seed)\n loss_machine = np.random.uniform(0.1, 0.3)\n\n length_phrase = len(phrase)\n\n # Choosing a few words to be forgotten\n words_loss_human = sorted(np.random.choice(range(0, length_phrase),\n int(np.floor(length_phrase * loss_human))), reverse=True)\n words_loss_machine = sorted(np.random.choice(range(0, length_phrase),\n int(np.floor(length_phrase * loss_machine))), reverse=True)\n\n phrase_fake_human = [phrase for i, phrase in enumerate(phrase_fake_human) if i not in words_loss_human]\n phrase_fake_machine = [phrase for i, phrase in enumerate(phrase_fake_machine) if i not in words_loss_machine]\n\n random.seed(seed)\n loss_machine = np.random.uniform(0.05, 0.1)\n random.seed(seed)\n words_change = np.random.choice(range(0, len(phrase_fake_machine)),\n int(np.floor(len(phrase_fake_machine) * loss_machine)),replace=False)\n random.seed(seed+1)\n move_to = np.random.choice(range(0, len(phrase_fake_machine)),\n int(np.floor(len(phrase_fake_machine) * loss_machine)),replace=False)\n\n # Changing a few words of place.\n for i in range(0, len(move_to)):\n aux = phrase_fake_machine[move_to[i]]\n phrase_fake_machine[move_to[i]] = phrase_fake_machine[words_change[i]]\n phrase_fake_machine[words_change[i]] = aux\n\n phrase_fake_human = ' '.join(phrase_fake_human)\n phrase_fake_machine = ' '.join(phrase_fake_machine)\n\n\n return phrase_fake_human, phrase_fake_machine", "title": "" }, { "docid": "22e654bb2fc90b44260d37740e4ef887", "score": "0.5248509", "text": "def random_paragraph():\n length = random.randint(3, 15)\n return (\" \".join(random_sentence() for i in xrange(length)))", "title": "" }, { "docid": "0792c78ff8b3fdd75e57f3feaaef1707", "score": "0.52370006", "text": "def make_text(chains):\n\n words = []\n start = random.choice(chains.keys())\n\n for word in tuple:\n words.append(word)\n\n next_word = random.choice(chains[start])\n words.append(next_word)\n\n # test to see if the last item in words ends with a period\n final_word = words[-1]\n end = final_word.endswith(\".\") \n\n ########START HERE###############\n while (end == False) and (len(words) < 30):\n new_tuple = (words[-3], words[-2], words[-1])\n print new_tuple\n next_word = random.choice(chains[new_tuple])\n words.append(next_word)\n final_word = words[-1] # this wasn't working b/c we weren't updating this\n end = final_word.endswith(\".\") \n\n words[0] = words[0].capitalize()\n return ' '.join(words)", "title": "" }, { "docid": "252fd9e85e4d09a49d8fd838b9e209d4", "score": "0.52256477", "text": "def prepositionalPhrase():\n return random.choice(prepositions) + \" \" + nounPhrase()", "title": "" }, { "docid": "239d8fa47905722ff0b2ed340ef11930", "score": "0.52162707", "text": "def generate_sentence(dictionary, length):\n result = \"\"\n word1, word2 = pick_first_two_words(dictionary)\n for i in range(length):\n result += '{} '.format(word1)\n possible_words = dictionary.get((word1, word2), False)\n if possible_words:\n word1, word2 = word2, random.choice(possible_words)\n else:\n word1, word2 = pick_first_two_words(dictionary)\n return result", "title": "" }, { "docid": "54a18fe4cb2505560700d324a76ac419", "score": "0.52137524", "text": "def mutate(txt: str, hamming_dist: int) -> str:\n length = len(txt)\n random_pos = sample(range(0,length),hamming_dist)\n random_pos.sort() # To ensure the for loop below doesnot increase the length of txt\n mutated_txt = \"\"\n i = 0\n for pos in random_pos:\n mutated_txt += txt[i:pos]\n i = pos+1\n if(txt[pos] == '1'):\n mutated_txt += '0'\n else:\n mutated_txt += '1'\n \n mutated_txt += txt[i:]\n return mutated_txt", "title": "" }, { "docid": "385b50afc9725d0dd855e2386ece020d", "score": "0.52123934", "text": "def generate_effect():\n\n#\tkey = choice(keys)\n#\tkey = ('someone', 'nearby')\n#\teffect = chain.generate_sentences(sentences=1, key=key, newlines=False)\n\teffect = chain.generate_sentences(sentences=1, key=None, newlines=False)\n\twhile 'caster' not in effect:\n\t\tprint('.', end='')\n\t\teffect = chain.generate_sentences(sentences=1, key=None, newlines=False)\n\tprint(effect)", "title": "" }, { "docid": "2e2adfba77b236ef166784b6b0bae2b1", "score": "0.52100533", "text": "def demo(word_graph):\n for word_length in range(3, 8):\n words = [w for w in word_graph if len(w) == word_length]\n for _ in range(3):\n initial, goal = random.sample(words, 2)\n print_word_chain(initial, goal, word_graph)\n print()", "title": "" }, { "docid": "327b1da26eb21f837caf9022d3ef3e40", "score": "0.5206194", "text": "def markov(self,text,):\n corpus = self.tokenize_sentence(text)\n x = 0\n for sentence in corpus: #For every sentence Add START STOP to create seperate chain\n sentence = sentence.split()\n x = 0\n self['START'].add_count(tuple(sentence[0:self.order]))\n # From the first to the to last word\n while x< len(sentence)-self.order:\n word = tuple(sentence[x:x+self.order])\n next_word = tuple(sentence[x+1:self.order+x+1])\n # print('{} | {}'.format(word,next_word))\n if word not in self.keys():\n self[word]= Dictogram()\n self[word].add_count(next_word)\n x+=1\n # Adding a stop token to the final word\n last_word= tuple(sentence[-self.order:])\n self[last_word] = Dictogram()\n self[last_word].add_count('STOP')", "title": "" } ]
2ad03de18bd34be8e2e96f5b72d4875f
Creates a target for each lastframe's pipe, any pipe starting
[ { "docid": "817c7e2943762f2215d581068f220a73", "score": "0.58443964", "text": "def _create_previous_pipes(self):\r\n if self.previous_pipes:\r\n self._prev_stage = UpdatePreviousPipesStage(self.pipeline)\r\n for prev_pipe, prev_tex in iteritems(self.previous_pipes):\r\n\r\n if prev_pipe not in self.pipes:\r\n self.error(\"Attempted to use previous frame data from pipe\",\r\n prev_pipe, \"- however, that pipe was never created!\")\r\n return False\r\n\r\n # Tell the stage to transfer the data from the current pipe to\r\n # the current texture\r\n self._prev_stage.add_transfer(self.pipes[prev_pipe], prev_tex)\r\n self._prev_stage.create()\r\n self.stages.append(self._prev_stage)", "title": "" } ]
[ { "docid": "660e16fa0b2a156010fe710e43475764", "score": "0.5539378", "text": "def genTarget():\n global loopCount\n lab = '.L' + str(loopCount); loopCount += 1\n putLab(lab)\n return lab", "title": "" }, { "docid": "dc17514220c7aae098f7252107767b6d", "score": "0.5448055", "text": "def CreateLogTarget(self):", "title": "" }, { "docid": "e301c0229794e5d63a91cc4b35f50e6f", "score": "0.5326749", "text": "def CreatePipe(*args, **kwargs): # real signature unknown\r\n pass", "title": "" }, { "docid": "431ce93c3a85878f13a0e08bd4b32571", "score": "0.5300913", "text": "def _create_pipe(self):\n try:\n r, w = os.pipe()\n self._output_pipe_w = eventlet.greenio.GreenPipe(w, 'wb', 0)\n self._output_pipe_r = eventlet.greenio.GreenPipe(r, 'rb', 0)\n except (ImportError, NotImplementedError):\n # Support for Windows that doesn't support pipes\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.bind(('localhost', 0))\n sock.listen(1)\n csock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n csock.connect(('localhost', sock.getsockname()[1]))\n nsock, addr = sock.accept()\n sock.close()\n self._output_pipe_w = nsock.makefile('wb', 0)\n gsock = eventlet.greenio.GreenSocket(csock)\n self._output_pipe_r = gsock.makefile('rb', 0)", "title": "" }, { "docid": "1435f4e3a425918ad4f5572513122809", "score": "0.5282156", "text": "def prepareTarget(self):\n\n self.numNonTarget = range(1,len(self.dictImgNames)+1)\n self.numTarget = self.numNonTarget.pop(rnd.randint(0,len(self.numNonTarget)-1))\n self.l.debug('Target Image: ' + str(self.numTarget) +\n ' Name: ' + self.dictImgNames[self.numTarget])\n self.l.debug('NonTarget Images: ' + str(self.numNonTarget))\n self.bufferTrigger = icfb.TRIG_IMG + self.numTarget\n info = json.load(open(os.path.join(self.data_path,\n self.dictImgNames[self.numTarget],\n 'info.json')))\n self.pic_w = 2*info['size'][0]\n self.pic_h = 2*info['size'][1]", "title": "" }, { "docid": "9fd75375d7b004399c12d17409e8df14", "score": "0.5204373", "text": "def build_pipeline(self):\n object = self.source\n for pipe in self.pipeline:\n keywords = set(pipe.class_trait_names())\n keywords.remove('trait_added')\n keywords.remove('trait_modified')\n this_kwargs = {}\n for key, value in self.kwargs.items():\n if key in keywords:\n this_kwargs[key] = value\n object = pipe(object, **this_kwargs)._target\n return object", "title": "" }, { "docid": "d1860ec8eb906a57b43d50b406ef8fa8", "score": "0.518722", "text": "def spawn_pipes(self):\n spawn_height = np.random.randint(220, 490)\n self.pipes.append(\n Pipe(\n SCREEN_WIDTH+100,\n spawn_height,\n 'pipeUp.png'\n )\n )\n # dont let the pipe go through the floor\n self.pipes[-1].rect.height = FLOOR - self.pipes[-1].y\n\n # make sure there is a distance between the pipe spawns\n spawn_height -= np.random.randint(SCREEN_HEIGHT+50,\n SCREEN_HEIGHT+100)\n self.pipes.append(\n Pipe(\n SCREEN_WIDTH+100,\n spawn_height,\n 'pipeDown.png'\n )\n )", "title": "" }, { "docid": "9f29b5686e965e065f306acf27dbcb77", "score": "0.51525", "text": "def create_target(self):\n\n # TODO: Decide what type of target to create and append it to the list\n target = Target()\n t1 = Strong()\n t2 = Safe()\n # r = [target,t1,t2]\n # r1 = random.uniform()\n\n #self.targets.append(r1)\n\n self.targets.append(t1)\n self.targets.append(t2)\n self.targets.append(target)", "title": "" }, { "docid": "f9e0cfe8194d22b7aa1478e0b1507c0e", "score": "0.51405036", "text": "def _bind_pipes_to_stage(self, stage):\r\n for pipe in stage.required_pipes:\r\n\r\n # Check if there is an input block named like the pipe\r\n if pipe in self.input_blocks:\r\n self.input_blocks[pipe].bind_to(stage)\r\n continue\r\n\r\n if pipe.startswith(\"PreviousFrame::\"):\r\n # Special case: Pipes from the previous frame. We assume those\r\n # pipes have the same size as the window and a format of\r\n # F_rgba16. Could be subject to change.\r\n pipe_name = pipe.split(\"::\")[-1]\r\n if pipe_name not in self.previous_pipes:\r\n tex_format = \"RGBA16\"\r\n\r\n # XXX: Assuming we have a depth texture whenever \"depth\"\r\n # occurs in the textures name\r\n if \"depth\" in pipe_name.lower():\r\n tex_format = \"R32\"\r\n\r\n pipe_tex = Image.create_2d(\r\n \"Prev-\" + pipe_name, Globals.resolution.x,\r\n Globals.resolution.y, tex_format)\r\n pipe_tex.clear_image()\r\n self.previous_pipes[pipe_name] = pipe_tex\r\n stage.set_shader_input(\"Previous_\" + pipe_name, self.previous_pipes[pipe_name])\r\n continue\r\n\r\n elif pipe.startswith(\"FuturePipe::\"):\r\n # Special case: Future Pipes which are not available yet.\r\n # They will contain the unmodified data from the last\r\n # frame.\r\n pipe_name = pipe.split(\"::\")[-1]\r\n self.debug(\"Awaiting future pipe\", pipe_name)\r\n self.future_bindings.append((pipe_name, stage))\r\n continue\r\n\r\n if pipe not in self.pipes:\r\n self.fatal(\"Pipe '\" + pipe + \"' is missing for\", stage)\r\n return False\r\n\r\n pipe_value = self.pipes[pipe]\r\n if isinstance(pipe_value, list) or isinstance(pipe_value, tuple):\r\n stage.set_shader_input(pipe, *pipe_value)\r\n else:\r\n stage.set_shader_input(pipe, pipe_value)\r\n return True", "title": "" }, { "docid": "09f6113e6b321b5a16ec55cc4f745985", "score": "0.50547105", "text": "def __init__(self):\n self._process_pipe = []\n self._gen = None", "title": "" }, { "docid": "87d1846641851115378442b7336cd5a1", "score": "0.50002015", "text": "def preprocess_targets(targets, i):\n # _shard_features called to ensure that the variable names match\n targets = self._shard_features({\"targets\": targets})[\"targets\"]\n with tf.variable_scope(target_modality.name + '/targets'):\n targets = target_modality.targets_bottom_sharded(targets, dp)[0]\n targets = common_layers.flatten4d3d(targets)\n\n targets = tf.cond(\n tf.equal(i, 0), lambda: tf.zeros_like(targets), lambda: targets)\n\n if hparams.pos == \"timing\":\n targets += timing_signal[:, i:i + 1]\n return targets", "title": "" }, { "docid": "210eff8b1e01fc0ec3f0295cb9865d87", "score": "0.4971457", "text": "def tail(num):\n \n assert num >= 0\n\n @filters\n def _dagpype_internal_fn_act(target):\n try:\n i, es = 0, []\n \n while len(es) < num:\n es.append((yield))\n if len(es) == 0:\n target.close()\n return\n \n while True:\n es[i] = (yield)\n i += 1\n if i == len(es):\n i = 0\n except GeneratorExit: \n for _ in es:\n target.send(es[i])\n i += 1\n if i == len(es):\n i = 0\n target.close() \n\n return _dagpype_internal_fn_act", "title": "" }, { "docid": "888093410efdc454766dc942a1e1d74c", "score": "0.4960165", "text": "def __call__(self, frame):\n for step in self:\n frame = step(frame)\n return frame", "title": "" }, { "docid": "2410fb2403d04a547c66a2994c6f7235", "score": "0.4960044", "text": "def __init__(self, frames):\n self._frames = frames\n self._out = None", "title": "" }, { "docid": "2410fb2403d04a547c66a2994c6f7235", "score": "0.4960044", "text": "def __init__(self, frames):\n self._frames = frames\n self._out = None", "title": "" }, { "docid": "2410fb2403d04a547c66a2994c6f7235", "score": "0.4960044", "text": "def __init__(self, frames):\n self._frames = frames\n self._out = None", "title": "" }, { "docid": "d89db7cf5460104a06de07322490e5da", "score": "0.4922473", "text": "def prepare_target(self, target_df):\n pass", "title": "" }, { "docid": "d89db7cf5460104a06de07322490e5da", "score": "0.4922473", "text": "def prepare_target(self, target_df):\n pass", "title": "" }, { "docid": "fa091da7ca730cbcde6e3b7c151ad75c", "score": "0.49066618", "text": "def spawn(self):\n self.check_spawn_valid()\n new_frame = Frame(\"spawn\")\n new_frame.worker = self\n new_frame.attach(self.deque.youngest_frame)\n new_stacklet = Stacklet(new_frame)\n self.deque.push(new_stacklet)", "title": "" }, { "docid": "8d9c9412ea1e36a628eaef77d3f95fc0", "score": "0.49056745", "text": "def createLoop(self):\n loopName = [self.le_LoopName.text()]\n pipes = []\n while self.tree_LoopPipes.topLevelItemCount() > 0:\n pipe = self.tree_LoopPipes.takeTopLevelItem(0)\n pipes.append([pipe.text(1)])\n loop = qtw.QTreeWidgetItem(loopName)\n loop.setFlags(qtc.Qt.ItemIsSelectable | qtc.Qt.ItemIsEditable | qtc.Qt.ItemIsDragEnabled | qtc.Qt.ItemIsUserCheckable | qtc.Qt.ItemIsEnabled)\n for p in pipes:\n itm = qtw.QTreeWidgetItem(p)\n itm.setFlags(qtc.Qt.ItemIsSelectable | qtc.Qt.ItemIsEditable | qtc.Qt.ItemIsDragEnabled | qtc.Qt.ItemIsUserCheckable | qtc.Qt.ItemIsEnabled)\n loop.addChild(itm)\n self.tree_Loops.addTopLevelItem(loop)\n self.Controller.addLoop(loop, PN=self.Model)", "title": "" }, { "docid": "fe7ee19e506a0d5cf2c2ae609271c0a8", "score": "0.49001798", "text": "def generate_targets(self, targets, plot_samples=False):\n print('Generating targets...')\n target_images = []\n target_poses = []\n if \"Billiard\" in self.env_tag:\n self.env.env.params.RANDOM_BALL_INIT_POSE = True\n #elif \"Ant\" in self.env_tag:\n # self.env.render()\n\n with progressbar.ProgressBar(max_value=targets) as bar:\n for k in range(targets): # Generate target datapoints\n obs = self.env.reset()\n\n if \"Ant\" in self.env_tag:\n for step in range(300):\n self.env.step(self.env.action_space.sample())\n CoM = np.array([self.env.env.data.qpos[:2]])\n t_pose = CoM\n if np.any(np.abs(CoM) >= np.array([3, 3])):\n break\n elif 'Billiard' in self.env_tag:\n t_pose = obs[0]\n\n tmp = self.env.render(mode='rgb_array')\n target_images.append(tmp)\n target_poses.append(t_pose)\n\n bar.update(k)\n\n target_images = np.stack(target_images)\n target_poses = np.stack(target_poses)\n\n if plot_samples:\n fig, ax = plt.subplots(4, 5)\n k = 0\n for i in range(4):\n for j in range(5):\n a = target_images[k]\n ax[i, j].imshow(a)\n ax[i, j].set_title(k)\n ax[i, j].set_ylabel(target_poses[k][1])\n ax[i, j].set_xlabel(target_poses[k][0])\n k += 1\n plt.subplots_adjust(left=0.02, right=.99, top=0.95, bottom=0.02, wspace=0.4)\n plt.show()\n\n if \"Billiard\" in self.env_tag:\n self.env.env.params.RANDOM_BALL_INIT_POSE = False\n print('Done.')\n\n return target_images, np.squeeze(target_poses)", "title": "" }, { "docid": "5de735ee56d95a6cde19a28d824d24c8", "score": "0.48799157", "text": "def end_pipe_positions(self):\n for y, row in enumerate(self.get_board_layout()):\n for x, tile in enumerate(row):\n position = (y, x)\n if tile.get_name() == START_PIPE:\n self._starting_point = position\n elif tile.get_name() == END_PIPE:\n self._ending_point = position", "title": "" }, { "docid": "39099f679018cc9116c79530af11010f", "score": "0.4872783", "text": "def __init__(self, sources=[], outputs=[], delay=10):\n self.outputs = outputs\n self.sources = sources\n self.delay = delay", "title": "" }, { "docid": "e60cdf5e039e43b8fdf3f24b8faf7f74", "score": "0.4857962", "text": "def preprocess_targets(targets, i):\n\t\t\t# _shard_features called to ensure that the variable names match\n\t\t\ttargets = self._shard_features({\"targets\": targets})[\"targets\"]\n\t\t\twith tf.variable_scope(target_modality.name):\n\t\t\t\ttargets = target_modality.targets_bottom_sharded(targets, dp)[0]\n\t\t\ttargets = common_layers.flatten4d3d(targets)\n\n\t\t\t# TODO(llion): Explain! Is this even needed?\n\t\t\ttargets = tf.cond(\n\t\t\t\ttf.equal(i, 0), lambda: tf.zeros_like(targets), lambda: targets)\n\n\t\t\tif hparams.pos == \"timing\":\n\t\t\t\ttargets += timing_signal[:, i:i + 1]\n\t\t\treturn targets", "title": "" }, { "docid": "0bd50979ed84a9e7360f1a66796a0105", "score": "0.48340333", "text": "def __init__(self, *args):\n this = _digital_swig.new_digital_ofdm_frame_sink_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "title": "" }, { "docid": "50ab1d1c83b76f819dbbf9f49751ff0b", "score": "0.48298928", "text": "def grab_frames(env):\n frames = []\n for i in range(4):\n frames.append(grab_frame(env, camera_id=i))\n \n # stack frames\n frame = np.concatenate(\n (np.concatenate((frames[0], frames[1]), axis=0),\n np.concatenate((frames[2], frames[3]), axis=0),),\n axis=1)\n return frame", "title": "" }, { "docid": "6234ed46928fdd7fdb2556c7070ec60d", "score": "0.48042303", "text": "def _pass_pipe(self):\n\n for pipe in self.pipes:\n\n # If the pipe is passed create a new one\n if self.birds and (not pipe.passed and pipe.x < self.birds[0].x):\n # Update score\n self.game.score += 1\n pipe.passed = True\n self.pipes.append(Pipe(x=INITIAL_PIPE_X))\n\n # Increase the fitness score of the genome (bird) still alive\n for genome in self.genomes:\n genome.fitness += FITNESS_INCREASE_PASS_PIE", "title": "" }, { "docid": "9fba9492744d666f905576c08359da98", "score": "0.47645664", "text": "def add_target_states(graph, resource, targets, to_df=False, final_state=None):\n targets = targets or []\n key_column = resource.spec.entities[0].name\n timestamp_key = resource.spec.timestamp_key\n features = resource.spec.features\n table = None\n\n for target in targets:\n driver = get_target_driver(target, resource)\n table = driver.get_table_object() or table\n driver.update_resource_status()\n driver.add_writer_state(\n graph,\n target.after_state or final_state,\n features=features,\n key_column=key_column,\n timestamp_key=timestamp_key,\n )\n if to_df:\n # add dataframe target, will return a dataframe\n driver = DFTarget()\n driver.add_writer_state(\n graph,\n final_state,\n features=features,\n key_column=key_column,\n timestamp_key=timestamp_key,\n )\n\n return table", "title": "" }, { "docid": "1831b8d19ed88228a208590694a86516", "score": "0.4739814", "text": "def create(name: str) -> ReceptionTarget:\n recep_target = ReceptionTarget()\n recep_target.name = name\n recep_target.save()\n return recep_target", "title": "" }, { "docid": "3c6624ae45243ca4d68d16c1608ffd37", "score": "0.47231865", "text": "def build_video_model(self, all_frames, all_actions, latent):\n hparams = self.hparams\n\n res_frames = []\n internal_states = [None] * 7\n\n pred_image = all_frames[0]\n for i in range(self.video_len):\n cur_action = all_actions[i]\n\n done_warm_start = (i >= hparams.video_num_input_frames)\n if done_warm_start:\n if self.is_training:\n cur_frame = self.scheduled_sample_prob(all_frames[i], pred_image)\n else:\n cur_frame = pred_image\n else:\n cur_frame = all_frames[i]\n\n with tf.variable_scope(\"main\", reuse=tf.AUTO_REUSE):\n pred_image, internal_states, _ = self.construct_predictive_tower(\n cur_frame, cur_action, None, internal_states, latent)\n res_frames.append(pred_image)\n return res_frames", "title": "" }, { "docid": "7547398a9d3125b038805bd76bef6086", "score": "0.4718499", "text": "def create_signal_target(data, events, fs, name_to_start_codes, epoch_ival_ms,\n name_to_stop_codes=None):\n if name_to_stop_codes is None:\n return _create_signal_target_from_start_and_ival(\n data, events, fs, name_to_start_codes, epoch_ival_ms)\n else:\n return _create_signal_target_from_start_and_stop(\n data, events, fs, name_to_start_codes, epoch_ival_ms,\n name_to_stop_codes)", "title": "" }, { "docid": "9ad83052451f70b1c2e906addd91b9f5", "score": "0.47161567", "text": "def build_pipe(self, hash_size = 100):\n \n self.read_yaml_file()\n self.read_csv()\n self.fill_na()\n self.data.drop(['msisdn'],axis=1,inplace=True)\n self.hash_list()\n self.pipeline(hash_size)\n \n self.full_pipeline = ColumnTransformer(\n transformers=[\n ('num', self.num_pipeline, self.num),\n ('cat', self.cat_pipeline, self.low_cat),\n ('hash', self.hash_pipeline, self.hash_features)\n ])\n \n self.X = self.data\n \n self.full_pipeline.fit(self.X)\n \n self.X = self.full_pipeline.transform(self.X)\n \n print(self.X.shape)\n return self.X, self.full_pipeline", "title": "" }, { "docid": "fbb9f4d77345ae96b56489dd6837cde8", "score": "0.46932665", "text": "def targets(self):\r\n\r\n\r\n if self.verbose:\r\n print (\"Tilt: \" + str(self.tilt) + \" Angle: \" + str(self.final_angle) + \" Resolution: \" + str(self.resolution))\r\n\r\n points = []\r\n costilt = math.cos(math.radians(self.tilt))\r\n sinangle = math.sin(math.radians(self.final_angle))\r\n cosangle = math.cos(math.radians(self.final_angle))\r\n for i, dist in enumerate(self.properties[\"TargetDistances\"]):\r\n # iterate to create the Points for the targets\r\n points.append( \r\n ( self.final_x + (dist[0] * cosangle * costilt + dist[1] * sinangle) / self.resolution,\r\n self.final_y + (dist[1] * cosangle - dist[0] * sinangle * costilt) / self.resolution ) )\r\n\r\n if self.show_images:\r\n pyplot.set_cmap(pyplot.gray())\r\n pyplot.imshow(self.image_final)\r\n pyplot.title(\"Final image with \" + \r\n (\"target points in green\" if self.values_are_ground_truth else \r\n \"assumed target points in blue\"))\r\n pyplot.scatter(\r\n tuple(p[0] for p in points), \r\n tuple(p[1] for p in points), \r\n marker='x', s=50, \r\n color='green' if self.values_are_ground_truth else 'blue')\r\n pyplot.show()\r\n\r\n if self.properties[\"Mode\"] == \"end-to-end\":\r\n points.append((self.final_angle, self.tilt, self.resolution))\r\n return points", "title": "" }, { "docid": "082ad9ab85c7b913c09bda9f92b28d0c", "score": "0.46888518", "text": "def test_frame_output(self):\n # Requires: mobs.Sequence object with video output capabilities\n # mobs.Event object with src frame to rec frame conversion\n # ability to create multiple frame grabs from single video frame\n sequence = ryglist.input_edls(self.edls)\n sequence.mediapath = get_test_files('ryg_three_tracks.mxf')\n outputdir = tempfile.TemporaryDirectory()\n sequence.output_event_posters(outputdir=outputdir.name, offset='middle')\n for event in sequence.flatten():\n jpeg = f'{event.ref_name}.jpg'\n output_file = os.path.join(outputdir.name, jpeg)\n self.assertTrue(os.path.isfile(output_file))\n self.assertEqual(os.stat(output_file).st_size,\n os.stat(get_control(jpeg)).st_size)", "title": "" }, { "docid": "a065361fd340017a41fc2f621bd0bcec", "score": "0.4680712", "text": "def make_pipeline():\n \n # Base universe set to the Q500US\n base_universe = Q1500US()\n\n # Factor of yesterday's close price.\n yesterday_close = USEquityPricing.close.latest\n \n pipe = Pipeline(\n screen = base_universe,\n columns = {\n 'close': yesterday_close,\n }\n )\n return pipe", "title": "" }, { "docid": "bd5d8d54b5362de3551eca22604f2678", "score": "0.46780723", "text": "def create_pipe(net, from_bus, to_bus, length_m, diameter_m, name, material=\"steel\", in_service=True):\n _try_existing_bus(net, from_bus)\n _try_existing_bus(net, to_bus)\n _check_level(net, from_bus, to_bus)\n\n idx = len(net.pipe.index)\n net.pipe.loc[idx] = [name, from_bus, to_bus, length_m, diameter_m, material, in_service]\n return name", "title": "" }, { "docid": "6bd6c6d55635be3dbf7b7568ad67cef3", "score": "0.4671852", "text": "def makepipebranch(idf, bname):\n # make the pipe component first\n pname = \"%s_pipe\" % (bname,)\n apipe = makepipecomponent(idf, pname)\n # now make the branch with the pipe in it\n abranch = idf.newidfobject(\"BRANCH\", Name=bname)\n abranch.Component_1_Object_Type = \"Pipe:Adiabatic\"\n abranch.Component_1_Name = pname\n abranch.Component_1_Inlet_Node_Name = apipe.Inlet_Node_Name\n abranch.Component_1_Outlet_Node_Name = apipe.Outlet_Node_Name\n abranch.Component_1_Branch_Control_Type = \"Bypass\"\n return abranch", "title": "" }, { "docid": "4918379e101a917d88c26f55250538f3", "score": "0.4666622", "text": "def get_synapse_targets(self):", "title": "" }, { "docid": "3848c7fe8f8c6af5152f515883cbd34a", "score": "0.46491164", "text": "def build_merged_model(self, all_frames, all_actions, all_rewards, latent):\n hparams = self.hparams\n\n res_frames, res_rewards = [], []\n internal_states = [None] * 7\n\n pred_image = all_frames[0]\n pred_reward = all_rewards[0]\n for i in range(self.video_len):\n cur_action = all_actions[i]\n\n done_warm_start = (i >= hparams.video_num_input_frames)\n if done_warm_start:\n if self.is_training:\n cur_frame = self.scheduled_sample_prob(all_frames[i], pred_image)\n cur_reward = self.scheduled_sample_prob(all_rewards[i], pred_reward)\n else:\n cur_frame = pred_image\n cur_reward = pred_reward\n else:\n cur_frame = all_frames[i]\n cur_reward = all_rewards[i]\n\n with tf.variable_scope(\"main\", reuse=tf.AUTO_REUSE):\n pred_image, internal_states, mids = self.construct_predictive_tower(\n cur_frame, cur_action, cur_reward, internal_states, latent)\n if hparams.reward_model_stop_gradient:\n mids = [tf.stop_gradient(x) for x in mids]\n pred_reward = self.reward_prediction(mids)\n\n res_frames.append(pred_image)\n res_rewards.append(pred_reward)\n\n return [res_frames, res_rewards]", "title": "" }, { "docid": "4ab57fb936d17c796e937df740d6fe7e", "score": "0.46416813", "text": "def __init__(self, pipeline, sink):\n self.pipeline = pipeline\n self.sink = sink", "title": "" }, { "docid": "4ab57fb936d17c796e937df740d6fe7e", "score": "0.46416813", "text": "def __init__(self, pipeline, sink):\n self.pipeline = pipeline\n self.sink = sink", "title": "" }, { "docid": "57cf261bb44b54162d6333631bed5c65", "score": "0.46412307", "text": "def source(env, mean_ia_time, mean_srv_time, server, delays, number, trace):\r\n for i in range(number):\r\n ia_time = random.expovariate(1.0 / mean_ia_time)\r\n srv_time = random.expovariate(1.0 / mean_srv_time)\r\n pkt = packet(env, 'Packet-%d' % i, server, srv_time, delays, trace)\r\n env.process(pkt)\r\n yield env.timeout(ia_time)", "title": "" }, { "docid": "5947e1433689088e93a3992c59d9c214", "score": "0.46404457", "text": "def __init__(self,filename):\n\n self.built=False\n self.filename=filename\n Target.instances.append(self)", "title": "" }, { "docid": "b3b9644e198bbe09c8e60f3d3622b5ba", "score": "0.46402997", "text": "def create_pipeline(self):\n\n # Here the video capture pipeline gets created. Elements that are\n # referenced in other places in the application are given a name, so\n # that they could later be retrieved.\n #\n # The pipeline consists of the following elements:\n #\n # tcambin: This is the main capture element that handles all basic\n # operations needed to capture video images from The Imaging Source\n # cameras.\n #\n # queue: The queue is a FIFO buffer element. It is set to a capacity of\n # 2 buffers at maximum to prevent it from filling up indifinitely\n # should the camera produce video frames faster than the host computer\n # can handle. The creates a new thread on the downstream side of the\n # pipeline so that all elements coming after the queue operate in\n # separate thread.\n #\n # videoconvert: This element converts the videoformat coming from the\n # camera to match the specification given by the \"capsfilter\" element\n # that comes next in the pipeline\n #\n # capsfilter: This element specifies the video format. This example just\n # specifies a BGRx pixel format which means that we just want a color\n # image format without any preferences on width, height or framerate.\n # The tcambin will automatically select the biggest image size\n # supported by the device and sets the maximum frame rate allowed for\n # this format. If the camera only supports monochrome formats they get\n # converted to BGRx by the preceeding 'videoconvert' element.\n #\n # videoconvert: The second videoconvert element in the pipeline converts\n # the BGRx format to a format understood by the video display element.\n # Since the gtksink should natively support BGRx, the videoconvert\n # element will just pass the buffers through without touching them.\n #\n # gtksink: This element displays the incoming video buffers. It also\n # stores a reference to the last buffer at any time so it could be\n # saved as a still image\n pipeline = Gst.parse_launch(\n 'tcambin name=src ! queue max_size_buffers=2 ! videoconvert ! capsfilter caps=\"video/x-raw,format=BGRx\" ! videoconvert ! gtksink name=sink')\n\n # Enable the \"last-sample\" support in the sink. This way the last buffer\n # seen by the display element could be retrieved when saving a still\n # image is requested\n sink = pipeline.get_by_name(\"sink\")\n sink.set_property(\"enable-last-sample\", True)\n\n return pipeline", "title": "" }, { "docid": "8840320c92cc98ad56ff847e73f3bb12", "score": "0.4637885", "text": "def make_bird_jump(bird, pipes):\n index = 0\n if len(pipes) > 1 and bird.x > (pipes[index].x + pipes[index].PIPE_TOP.get_width()):\n index = 1\n value = bird.W1*(pipes[index].x - bird.x) + bird.W2*(pipes[index].bottom - bird.y) + bird.W3*(bird.y - pipes[index].height) + bird.B\n if math.tanh(value) > 0:\n bird.jump()", "title": "" }, { "docid": "dff039568f37758b461d4ccfc45446f3", "score": "0.46234575", "text": "def generate_from_template_emitter(target, source, env):\n base = splitext(pbasename(str(source[0])))[0]\n t = pjoin(pdirname(str(target[0])), base)\n return ([t], source)", "title": "" }, { "docid": "82c2a685b3b1f0d2c3bd0c79c6fbbb88", "score": "0.46191368", "text": "def __init__(self, targetname=None, list_datasets=None, list_pointings=None,\n pointing_table=None, pointing_table_format='ascii', pointing_table_folder='',\n folder_config=\"\", rc_filename=None, cal_filename=None,\n suffix=\"\", name_offset_table=None, folder_offset_table=None,\n log_filename=\"MusePipeCombine.log\", verbose=True, debug=False, **kwargs):\n # Verbose option\n self.verbose = verbose\n self._debug = debug\n if self._debug:\n upipe.print_warning(\"In DEBUG Mode [more printing]\")\n check = kwargs.pop(\"check\", True)\n\n # Warnings for astropy\n self.warnings = kwargs.pop(\"warnings\", 'ignore')\n if self.warnings == 'ignore':\n warnings.simplefilter('ignore', category=AstropyWarning)\n\n # Setting the default attibutes --------------------------------\n self.targetname = targetname\n self.__phangs = kwargs.pop(\"PHANGS\", False)\n if self.__phangs:\n self.filter_list = kwargs.pop(\"filter_list\",\n default_PHANGS_filter_list)\n else:\n self.filter_list = kwargs.pop(\"filter_list\",\n default_filter_list)\n\n self.combined_folder_name = kwargs.pop(\"combined_folder_name\", \"Combined\")\n self.vsystemic = float(kwargs.pop(\"vsystemic\", 0.))\n\n # Including or not the masked Pixtables in place of the original ones\n self.prefix_masked_pixtables = kwargs.pop(\"prefix_masked_pixtables\", \"tmask\")\n self.use_masked_pixtables = kwargs.pop(\"use_masked_pixtables\", False)\n\n # Setting other default attributes -------------------------------\n if log_filename is None:\n log_filename = \"log_{timestamp}.txt\".format(timestamp=upipe.create_time_name())\n upipe.print_info(\"The Log file will be {0}\".format(log_filename))\n self.log_filename = log_filename\n self.suffix = suffix\n self.add_targetname = kwargs.pop(\"add_targetname\", True)\n # Checking input datasets and pixtables\n self._pixtab_in_comb_folder = kwargs.pop(\"pixtab_in_comb_folder\", True)\n self._pixtable_type = kwargs.pop(\"pixtable_type\", \"REDUCED\")\n # Using scipost or exp_combine\n self.use_scipost = kwargs.pop(\"use_scipost\", True)\n\n # End of parameter settings =======================================\n\n # Init of the subclasses\n PipeRecipes.__init__(self, **kwargs)\n SofPipe.__init__(self)\n\n # ---------------------------------------------------------\n # Setting up the folders and names for the data reduction\n # Can be initialised by either an rc_file, \n # or a default rc_file or hardcoded defaults.\n self.pipe_params = InitMuseParameters(folder_config=folder_config,\n rc_filename=rc_filename,\n cal_filename=cal_filename,\n verbose=verbose)\n\n # Setting up the relative path for the data, using Galaxy Name + Dataset\n self.pipe_params.data = \"{0}/{1}/\".format(self.targetname,\n self.combined_folder_name)\n\n self.pipe_params.init_default_param(dict_combined_folders)\n self._dict_combined_folders = dict_combined_folders\n # List of datasets to process\n self.list_datasets = self._check_list_datasets(list_datasets)\n\n # Setting all the useful paths\n self.set_fullpath_names()\n self.paths.log_filename = joinpath(self.paths.log, log_filename)\n\n # and Recording the folder where we start\n self.paths.orig = os.getcwd()\n # END Set up params =======================================\n\n # =========================================================== \n # Create the Combined folder\n # Making the output folders in a safe mode\n if self.verbose:\n upipe.print_info(\"Creating directory structure\")\n upipe.safely_create_folder(self.paths.data, verbose=verbose)\n\n # Go to the Combined Folder\n self.goto_folder(self.paths.data)\n\n # Now create full path folder \n for folder in self._dict_combined_folders:\n upipe.safely_create_folder(self._dict_combined_folders[folder], verbose=verbose)\n\n # Setting of pointing table ---------------------------------------\n if check:\n self.get_all_pixtables()\n self.assign_pointing_table(input_table=pointing_table,\n table_format=pointing_table_format,\n folder=pointing_table_folder)\n self.list_pointings = self._check_list_pointings(list_pointings)\n self.filter_pixtables_with_list(overwrite=False)\n\n # Checking input offset table and corresponding pixtables\n self._check_offset_table(name_offset_table, folder_offset_table)\n\n # Going back to initial working directory\n self.goto_origfolder()", "title": "" }, { "docid": "5502e73fb445c7b876d34ba614f9bf9b", "score": "0.46120638", "text": "def gen_bp_target(imgs, model, target_index=None, classes=get_imagenet_classes(), device='cuda', prep=True):\n\n # Get model and forward pass \n bp, probs, ids, images = gen_model_forward(imgs, model, device=device, prep=prep, type='bp')\n \n ids_ = torch.LongTensor([[x] for x in target_index]).to(device)\n bp.backward(ids=ids_)\n gradients = bp.generate()\n masks = []\n for j in range(len(images)):\n mask = save_gradient(\n gradient=gradients[j],\n )\n mask /= np.max(mask)\n masks += [mask]\n if len(masks) == 1:\n return masks[0]\n return masks", "title": "" }, { "docid": "b509af9e9f1dbf3cfe1a3d971f032b28", "score": "0.46111888", "text": "def _newpipe(encoder, decoder):\n r, w = os.pipe()\n return (_GIPCReader(r, decoder), _GIPCWriter(w, encoder))", "title": "" }, { "docid": "29e32ea50c627b9ecc46686c68950a09", "score": "0.46067962", "text": "def test_sequencing(self):\n os.mkdir(\"res/\")\n \n py_init_proc_c()\n py_start_proc_c()\n py_sequence_init(0)\n \n for frame in xrange(497, 598):\n for cam in xrange(4):\n img = imread(\n \"scene83_event1/cam%d_Scene83_%04d\" % (cam + 1, frame))\n py_set_img(img, cam)\n py_sequence_loop(0, frame)\n \n self.failUnless(compare_directories(\"res/\", \"after_sequencing/\"))\n self.failUnless(compare_directories(\n \"scene83_event1/\", \"after_sequencing_targets/\",\n mask=re.compile(\"_targets$\")))", "title": "" }, { "docid": "776b7667624b8756ff79ae8b0795b3a3", "score": "0.4603603", "text": "def _frame_worker_target_gen(img_queue, rec_queue, send_queue, ms_per_frame, frame_size,\r\n dpi, scenes, traj, s, markers, scalar_mapping, norm, cmap, logfile):\r\n worker = GenFrameWorker(\r\n myq.ZeroMQQueue.deser(img_queue), myq.ZeroMQQueue.deser(rec_queue),\r\n myq.ZeroMQQueue.deser(send_queue), ms_per_frame, frame_size, dpi, scenes, traj, s,\r\n mutils.get_fixed_single(markers)(), mutils.get_fixed_single(scalar_mapping)(),\r\n mutils.get_fixed_single(norm)(), cmap\r\n )\r\n\r\n try:\r\n worker.do_all()\r\n except:\r\n traceback.print_exc()\r\n with open(logfile, 'w') as outfile:\r\n traceback.print_exc(file=outfile)\r\n raise", "title": "" }, { "docid": "9ab4ea2216cd613a8349227affa46d64", "score": "0.4597896", "text": "def makepipecomponent(idf, pname):\n apipe = idf.newidfobject(\"Pipe:Adiabatic\".upper(), Name=pname)\n apipe.Inlet_Node_Name = \"%s_inlet\" % (pname,)\n apipe.Outlet_Node_Name = \"%s_outlet\" % (pname,)\n return apipe", "title": "" }, { "docid": "278f17b310feff9f34945aa702b346dd", "score": "0.4588126", "text": "def create_bolt(self, bolt):\n bolt._nFrames = 3\n bolt._framesPerSecond = 7\n bolt._frame = 1\n bolt._world_bound = False\n bolt.get_current_frame()", "title": "" }, { "docid": "d141f1f6a66bbf239d2edcc410d4c953", "score": "0.45780647", "text": "def pipeline():\n samples = getInputSamples()\n model = buildModel()\n history = trainModel(model, samples)", "title": "" }, { "docid": "6d90e7086957da6540c171e42c8c7d73", "score": "0.45568675", "text": "def prepare_test_frames(self, idx):\n results = copy.deepcopy(self.video_infos[idx])\n img_key = results['img_key']\n\n results['filename_tmpl'] = self.filename_tmpl\n results['modality'] = self.modality\n results['start_index'] = self.start_index\n results['timestamp_start'] = self.timestamp_start\n results['timestamp_end'] = self.timestamp_end\n results['proposals'] = self.proposals[img_key][:self.num_max_proposals]\n return self.pipeline(results)", "title": "" }, { "docid": "3bdad4aa02750954ec5ee1b6ab19159d", "score": "0.4553781", "text": "def to(self, target=None, *args, **kws):\n if target is None:\n target = Frame\n return target(self, *args, **kws)", "title": "" }, { "docid": "f29a32e7648eb4a09588471c856f26b6", "score": "0.45524555", "text": "def __init__(self):\n self.target = None", "title": "" }, { "docid": "3ad548162600b4cdb2ef99c9821cff7e", "score": "0.45519668", "text": "def __init__(self, env):\n super(Skip4FramesAndReturnMaxFrom2FramesWrapper, self).__init__(env)\n self.frames_buffer = collections.deque(maxlen=2)", "title": "" }, { "docid": "791845ec0d1b014cb04a2464360c1be0", "score": "0.4550828", "text": "def logFrameTask(self, t):\r\n\r\n if self.lastEyeSample:\r\n eyesample = \"[%.3f,%.3f]\\n\" % (self.lastEyeSample[0], self.lastEyeSample[1])\r\n self.replayLog.logEvent(\"E:\" + eyesample)\r\n\r\n # grab all parachutes (name,value) and save their position\r\n for p_name, par in self.parachutes.items():\r\n np = par.modelNP\r\n if (np.getX() != -1000 and par.hitted == False ):\r\n position = \"[\\'%s\\',%.3f,%.3f,%.3f]\\n\" % (p_name, np.getX(), np.getY(), np.getZ())\r\n self.replayLog.logEvent(\"P:\" + position)\r\n # grab heading and pitch of the cannon\r\n hpr = self.cannon.getHpr()\r\n if (self.lastHpr != hpr):\r\n hpr_str = \"[%.3f,%.3f]\\n\" % (hpr[0], hpr[1])\r\n self.replayLog.logEvent(\"C:\" + hpr_str)\r\n self.lastHpr = hpr\r\n\r\n crossPos = self.crosshairNP.getPos()\r\n if (self.lastCrossHairPos != crossPos):\r\n crossPos_str = \"[%.3f,%.3f,%.3f]\\n\" % (crossPos[0], crossPos[1], crossPos[2])\r\n self.replayLog.logEvent(\"T:\" + crossPos_str)\r\n self.lastCrossHairPos = crossPos\r\n\r\n\r\n #for b in range(len(self.bullets)):\r\n # np = self.bullets[b]\r\n # if (np.getX() != 0):\r\n # position = \"[%d,%.7f,%.7f,%.7f]\" % (b,np.getX(),np.getY(),np.getZ())\r\n # self.replayLog.logEvent(\"B:\"+position+\"\\n\", curr_t)\r\n return Task.cont", "title": "" }, { "docid": "0873e6be484ca3868ce51953e1d64e14", "score": "0.4548634", "text": "def rule_create_beam(self, frame, length, width, height,name):\n self.new_beam = Beam(frame, length, width, height,name)\n self.beams.append(self.new_beam)\n return self.new_beam", "title": "" }, { "docid": "cff009272e14dfe499a29ec64fba2820", "score": "0.4545301", "text": "def test_backtracking(self):\n shutil.copytree(\"after_tracking/\", \"res/\")\n for fname in glob.iglob(\"after_tracking_targets/*\"):\n shutil.copy(fname, \"scene83_event1/\")\n \n py_init_proc_c()\n py_start_proc_c()\n py_trackback_c()\n \n self.failUnless(compare_directories(\"res/\", \"after_backtracking/\"))\n self.failUnless(compare_directories(\n \"scene83_event1/\", \"after_backtracking_targets/\",\n mask=re.compile(\"_targets$\")))", "title": "" }, { "docid": "9fb3d75d76a3c73d14e9b61c222fdd69", "score": "0.45413336", "text": "def create_data_flow():\n streams_to_send_top_on = []\n camera_setups = create_camera_setups()\n # Creates a dataflow stream for each camera.\n camera_streams = {}\n for name in camera_setups:\n camera_streams[name] = erdos.IngestStream()\n # Creates a stream on which the agent sends the high-level route the\n # agent must follow in the challenge.\n global_trajectory_stream = erdos.IngestStream()\n # Creates a stream on which the agent sends the open drive stream it\n # receives when it executes in the MAP track.\n open_drive_stream = erdos.IngestStream()\n # Creates a stream on which the agent sends point cloud messages it\n # receives from the LiDAR sensor.\n point_cloud_stream = erdos.IngestStream()\n imu_stream = erdos.IngestStream()\n gnss_stream = erdos.IngestStream()\n route_stream = erdos.IngestStream()\n time_to_decision_loop_stream = erdos.LoopStream()\n\n if FLAGS.localization:\n # Pylot localization is enabled. Add the localization operator to\n # the dataflow. The operator receives GNSS and IMU messages, and\n # uses an Extended Kalman Filter to compute poses.\n pose_stream = pylot.operator_creator.add_localization(\n imu_stream, gnss_stream, route_stream)\n else:\n # The agent either directly forwards the poses it receives from\n # the challenge, which are noisy, or the perfect poses if the\n # --perfect_localization flag is set.\n pose_stream = erdos.IngestStream()\n\n # Stream on which the obstacles are sent when the agent is using perfect\n # detection.\n perfect_obstacles_stream = erdos.IngestStream()\n if FLAGS.simulator_obstacle_detection:\n # Execute with perfect perception. In this configuration, the agent\n # directly gets the location of the other agents from the simulator.\n # This configuration is meant for testing and debugging.\n obstacles_stream = perfect_obstacles_stream\n elif any('efficientdet' in model\n for model in FLAGS.obstacle_detection_model_names):\n # Add an operator that runs EfficientDet to detect obstacles.\n obstacles_stream = pylot.operator_creator.\\\n add_efficientdet_obstacle_detection(\n camera_streams[CENTER_CAMERA_NAME],\n time_to_decision_loop_stream)[0]\n if not (FLAGS.evaluate_obstacle_detection\n or FLAGS.evaluate_obstacle_tracking):\n streams_to_send_top_on.append(perfect_obstacles_stream)\n else:\n # Add an operator that uses one of the Tensorflow model zoo\n # detection models. By default, we use FasterRCNN.\n obstacles_stream = pylot.operator_creator.add_obstacle_detection(\n camera_streams[CENTER_CAMERA_NAME],\n time_to_decision_loop_stream)[0]\n if not (FLAGS.evaluate_obstacle_detection\n or FLAGS.evaluate_obstacle_tracking):\n streams_to_send_top_on.append(perfect_obstacles_stream)\n\n # Stream on which the traffic lights are sent when the agent is\n # using perfect traffic light detection.\n perfect_traffic_lights_stream = erdos.IngestStream()\n if FLAGS.simulator_traffic_light_detection:\n # In this debug configuration, the agent is using perfectly located\n # traffic lights it receives directly from the simulator. Therefore,\n # there's no need to a traffic light detector.\n traffic_lights_stream = perfect_traffic_lights_stream\n camera_streams[TL_CAMERA_NAME] = erdos.IngestStream()\n streams_to_send_top_on.append(camera_streams[TL_CAMERA_NAME])\n else:\n # Adds a traffic light detector operator, which uses the camera with\n # the small fov.\n traffic_lights_stream = \\\n pylot.operator_creator.add_traffic_light_detector(\n camera_streams[TL_CAMERA_NAME], time_to_decision_loop_stream)\n # Adds an operator that finds the world location of the traffic lights.\n # The operator synchronizes LiDAR point cloud readings with camera\n # frames, and uses them to compute the depth to traffic light bounding\n # boxes.\n traffic_lights_stream = \\\n pylot.operator_creator.add_obstacle_location_finder(\n traffic_lights_stream, point_cloud_stream, pose_stream,\n camera_setups[TL_CAMERA_NAME])\n # We do not send perfectly located traffic lights in this\n # configuration. Therefore, ensure that the stream is \"closed\"\n # (i.e., send a top watermark)\n streams_to_send_top_on.append(perfect_traffic_lights_stream)\n\n vehicle_id_stream = erdos.IngestStream()\n if not (FLAGS.perfect_obstacle_tracking or FLAGS.perfect_localization):\n # The vehicle_id_stream is only used when perfect localization\n # or perfect obstacle tracking are enabled.\n streams_to_send_top_on.append(vehicle_id_stream)\n\n # Adds an operator for tracking detected agents. The operator uses the\n # frames from the center camera, and the bounding boxes found by the\n # obstacle detector operator.\n obstacles_tracking_stream = pylot.component_creator.add_obstacle_tracking(\n camera_streams[CENTER_CAMERA_NAME],\n camera_setups[CENTER_CAMERA_NAME],\n obstacles_stream,\n depth_stream=point_cloud_stream,\n vehicle_id_stream=vehicle_id_stream,\n pose_stream=pose_stream,\n ground_obstacles_stream=perfect_obstacles_stream,\n time_to_decision_stream=time_to_decision_loop_stream)\n\n if FLAGS.execution_mode == 'challenge-sensors':\n # The agent is running is sensors-only track. Therefore, we need\n # to add a lane detector because the agent does not have access to\n # the OpenDrive map.\n lanes_stream = pylot.operator_creator.add_lanenet_detection(\n camera_streams[LANE_CAMERA_NAME])\n else:\n # The lanes stream is not used when running in the Map track.\n # We add the stream to the list of streams that are not used, and\n # must be manually \"closed\" (i.e., send a top watermark).\n lanes_stream = erdos.IngestStream()\n streams_to_send_top_on.append(lanes_stream)\n\n # The agent uses a linear predictor to compute future trajectories\n # of the other agents.\n prediction_stream, _, _ = pylot.component_creator.add_prediction(\n obstacles_tracking_stream,\n vehicle_id_stream,\n time_to_decision_loop_stream,\n pose_stream=pose_stream)\n\n # Adds a planner to the agent. The planner receives the pose of\n # the ego-vehicle, detected traffic lights, predictions for other\n # agents, the route the agent must follow, and the open drive data if\n # the agent is executing in the Map track, or detected lanes if it is\n # executing in the Sensors-only track.\n waypoints_stream = pylot.component_creator.add_planning(\n None, pose_stream, prediction_stream, traffic_lights_stream,\n lanes_stream, open_drive_stream, global_trajectory_stream,\n time_to_decision_loop_stream)\n\n if pylot.flags.must_visualize():\n # Adds a visualization dataflow operator if any of the\n # --visualize_* is enabled. The operator creates a pygame window\n # in which the different sensors, detections can be visualized.\n control_display_stream, ingest_streams = \\\n pylot.operator_creator.add_visualizer(\n pose_stream=pose_stream,\n camera_stream=camera_streams[CENTER_CAMERA_NAME],\n tl_camera_stream=camera_streams[TL_CAMERA_NAME],\n point_cloud_stream=point_cloud_stream,\n obstacles_stream=obstacles_stream,\n traffic_lights_stream=traffic_lights_stream,\n tracked_obstacles_stream=obstacles_tracking_stream,\n waypoints_stream=waypoints_stream,\n lane_detection_stream=lanes_stream,\n prediction_stream=prediction_stream)\n streams_to_send_top_on += ingest_streams\n else:\n control_display_stream = None\n\n # Adds a controller which tries to follow the waypoints computed\n # by the planner.\n control_stream = pylot.component_creator.add_control(\n pose_stream, waypoints_stream)\n # The controller returns a stream of commands (i.e., throttle, steer)\n # from which the agent can read the command it must return to the\n # challenge.\n extract_control_stream = erdos.ExtractStream(control_stream)\n\n pylot.component_creator.add_evaluation(vehicle_id_stream, pose_stream,\n imu_stream)\n\n # Operator that computes how much time each component gets to execute.\n # This is needed in Pylot, but can be ignored when running in challenge\n # mode.\n time_to_decision_stream = pylot.operator_creator.add_time_to_decision(\n pose_stream, obstacles_stream)\n time_to_decision_loop_stream.set(time_to_decision_stream)\n\n return (camera_streams, pose_stream, route_stream,\n global_trajectory_stream, open_drive_stream, point_cloud_stream,\n imu_stream, gnss_stream, extract_control_stream,\n control_display_stream, perfect_obstacles_stream,\n perfect_traffic_lights_stream, vehicle_id_stream,\n streams_to_send_top_on)", "title": "" }, { "docid": "1e29e6b5cadaa86cd8a59e4d6ef77ea1", "score": "0.45318547", "text": "def _build_meta(meta: str, pipelines: Iterable[\"Pipeline\"]) -> \"Pipeline\":\n return Pipeline(\n protocol=[\n {\n \"meta\": meta,\n \"pipelines\": [pipeline.protocol for pipeline in pipelines],\n },\n ],\n )", "title": "" }, { "docid": "ad88980c9b9de464209d534a7819c9c1", "score": "0.4524872", "text": "def create_target(self):\n random_target = random.randint(1, 3)\n # TODO: Decide what type of target to create and append it to the list\n '''if random_target == 1:\n self.targets.append(StandardTarget())\n elif random_target == 2:\n self.targets.append(StrongTarget())\n else:\n self.targets.append(SafeTarget())\n '''\n self.targets.append(Target())", "title": "" }, { "docid": "132fbef10497af84701fe2d945e8e1f0", "score": "0.45136467", "text": "def ant_spawn(dico_1) :", "title": "" }, { "docid": "6356581431af201bc6a1b1727790ec15", "score": "0.45120698", "text": "def create_target_for_point(cls, route, point, relative_arrival_delta):\n pass", "title": "" }, { "docid": "c858899cfe696b716d2a077556322d03", "score": "0.45109668", "text": "def target(self):", "title": "" }, { "docid": "44901cff115d274aac51940c70926cce", "score": "0.45102447", "text": "def phastcons_pipeline_wrapper(target, args):\n if args.target_genomes is None:\n args.target_genomes = extract_model_tree(args.model)\n if args.ref_fasta_path is None:\n args.ref_fasta_path = get_ref_genome_fasta(args.hal, args.ref_genome, target.getGlobalTempDir())\n if args.pre_extracted is None:\n tmp_ss_path = os.path.join(target.getGlobalTempDir(), 'extracted_sub_alignments')\n target.addChildTargetFn(subset_hal_pipeline, args=(args, tmp_ss_path))\n split_ss_dict = read_subalignment_dir(tmp_ss_path)\n else:\n split_ss_dict = read_subalignment_dir(args.pre_extracted)\n target.setFollowOnTargetFn(phastcons_estimate_models_wrapper, args=(args, split_ss_dict))", "title": "" }, { "docid": "2f0cfc434a79473a1d6e15658a715656", "score": "0.45069364", "text": "def agent_step(self, state_new, command_new, decoder_obs_new):", "title": "" }, { "docid": "bae0f34efdbe9bcfaf8125f62c7fb3aa", "score": "0.4506864", "text": "def create_pipeline(input_model, reference_files):\n exp_type = input_model.meta.exposure.type.lower()\n pipeline = exp_type2transform[exp_type](input_model, reference_files)\n log.info(\"Creating a FGS {0} pipeline with references {1}\".format(\n exp_type, reference_files))\n return pipeline", "title": "" }, { "docid": "2842f6fc5eb3b5ee053e0543a1cdfd5c", "score": "0.45035422", "text": "def _make_stage(self, i_ch, o_ch, num_blocks, stride=1):\n layers = []\n layers.append(Bottleneck(i_ch, o_ch, stride)) # only the first stage in the module need stride=2\n for i in range(1, num_blocks):\n layers.append(Bottleneck(o_ch, o_ch))\n return nn.Sequential(*layers)", "title": "" }, { "docid": "27e1505ad4772cadeb76d21b01fcfff3", "score": "0.4501311", "text": "def generate(cls):\n\n while True:\n cls.stream = cls.video_camera.get_frame()\n cls.video_camera = None\n cls.stream = None\n cls.thread = None", "title": "" }, { "docid": "dab8ddfba18cd1805eed078ac0914078", "score": "0.44943553", "text": "def create_model_target(self):\n outputs, reg = self.nn(self.input_placeholder)\n\n self.predictions_target = outputs", "title": "" }, { "docid": "c2e4e1087cb396a94b5f7c6968e2a02c", "score": "0.44930854", "text": "def create_loops(filenames):\n current = get_loops()\n def create_loop(filename):\n \"\"\"\n Creates or identifies a loop device corresponding to a file.\n \"\"\"\n if file in current:\n return current[filename]\n else:\n return check_output([\"losetup\", \"-f\", \"--show\", filename]).rstrip()\n return map(create_loop, filenames)", "title": "" }, { "docid": "e698c989e5c0b554087f7a42fe5244f6", "score": "0.44848165", "text": "def target(self, *targets):\n self.targets = targets", "title": "" }, { "docid": "e698c989e5c0b554087f7a42fe5244f6", "score": "0.44848165", "text": "def target(self, *targets):\n self.targets = targets", "title": "" }, { "docid": "06e9c46e57af07fc62975204ecede52c", "score": "0.4484183", "text": "def new_target(i):\n targets[i][0] = randint(100, 700)\n targets[i][1] = randint(100, 500)\n targets[i][2] = randint(10, 20)\n targets[i][3] = randint(10, 30)\n targets[i][4] = randint(10, 30)\n targets[i][5] = COLORS[randint(0, 5)]", "title": "" }, { "docid": "13b81c76707e2c89bf27f42c4056d63b", "score": "0.44788545", "text": "def animate_lane(lane, framecount, net_group):\n for sprite in lane.sprites():\n if isinstance(sprite, TurtleSinker):\n if not sprite.animation_started and framecount - sprite.last_animation >= 30:\n sprite.start_animation(framecount, net_group)\n\n else:\n sprite.next_frame(framecount, net_group)\n\n elif not isinstance(sprite, Player):\n sprite.next_frame(framecount)", "title": "" }, { "docid": "03f5c1736bbdc13147c59757297bb214", "score": "0.44759056", "text": "def run(self):\n\n bundle_processing_name, add_hf_uuid = self.bundle_outputs()[0] # @UnusedVariable\n bundle_hframe_file = self.output()[PipeBase.HFRAME].path\n managed_path = os.path.dirname(bundle_hframe_file)\n\n if os.path.isdir(self.input_path):\n \"\"\" With a directory, add all files under one special frame \"\"\"\n abs_input_path = os.path.abspath(self.input_path)\n files = [urllib.parse.urljoin('file:', os.path.join(abs_input_path, f)) for f in os.listdir(abs_input_path)]\n file_set = DataContext.copy_in_files(files, managed_path)\n frames = [FrameRecord.make_link_frame(add_hf_uuid, constants.FILE, file_set, managed_path), ]\n presentation = hyperframe_pb2.TENSOR\n elif os.path.isfile(self.input_path):\n if str(self.input_path).endswith('.csv') or str(self.input_path).endswith('.tsv'):\n bundle_df = pd.read_csv(self.input_path, sep=None) # sep=None means python parse engine detects sep\n frames = DataContext.convert_df2frames(add_hf_uuid, bundle_df, managed_path=managed_path)\n presentation = hyperframe_pb2.DF\n else:\n \"\"\" Other kinds of file \"\"\"\n abs_input_path = os.path.abspath(self.input_path)\n files = [urllib.parse.urljoin('file:', abs_input_path)]\n file_set = DataContext.copy_in_files(files, managed_path)\n frames = [FrameRecord.make_link_frame(add_hf_uuid, constants.FILE, file_set, managed_path), ]\n presentation = hyperframe_pb2.TENSOR\n else:\n raise RuntimeError('Unable to find input file or path {}'.format(self.input_path))\n\n \"\"\" Make a single HyperFrame output for an add \"\"\"\n\n if 'taskname' in self.tags or 'presentable' in self.tags:\n print(\"Unable to add bundle {}: tags contain reserved keys 'taskname' or 'presentable'\".format(self.output_bundle))\n # Todo: Delete temporary bundle here\n return\n\n tags = {'taskname': 'add', 'presentable': 'True', 'root_task':'True'}\n\n tags.update(self.tags)\n\n task_hfr = self.make_hframe(frames, add_hf_uuid, self.bundle_inputs(),\n self.pipeline_id(), self.pipe_id(), self,\n tags=tags,\n presentation=presentation)\n\n self.pfs.get_curr_context().write_hframe(task_hfr)", "title": "" }, { "docid": "8411e79b9d4819cac9e280880a096112", "score": "0.44748867", "text": "def stream_frames(fps):\n counter = 0\n while CAMERA.isOpened():\n counter += 1\n eel.setFrame(get_frame())\n print(f\"Sent the frame to the frontend ({counter})!\")\n eel.sleep(1/fps)", "title": "" }, { "docid": "0041615bfaf2b61afa7b1c70b792729b", "score": "0.44745582", "text": "def _add_output_command_stream(self):\n self.command_read_fd, self.command_write_fd = os.pipe()\n self.fds.append(self.command_write_fd)\n md = dict()\n md['type'] = \"COMMAND_FD\"\n self.fdmd.append(md)", "title": "" }, { "docid": "2c1489d81b92ce7fe817a5eb187de59c", "score": "0.44681868", "text": "def IM871A_pipe():\n pipe_path = './'\n return pipe_path", "title": "" }, { "docid": "367d762f90705839e61f9ea1098fc3ee", "score": "0.44596827", "text": "def __init__(self, target):\n self.target = target", "title": "" }, { "docid": "367d762f90705839e61f9ea1098fc3ee", "score": "0.44596827", "text": "def __init__(self, target):\n self.target = target", "title": "" }, { "docid": "75d4416ed036fb0045e95a4ab4246a22", "score": "0.44423506", "text": "def extract_data(sample_dir, start_timestamp, end_timestamp):\n skeleton_data_path = os.path.join(sample_dir, 'skeleton.txt')\n rgb_video_path = os.path.join(sample_dir, 'rgb.avi')\n depth_video_path = os.path.join(sample_dir, 'depth.avi')\n infrared_video_path = os.path.join(sample_dir, 'infrared.avi')\n\n multi_modal = dict()\n\n # parse skeleton\n with open(skeleton_data_path, 'r') as fp:\n lines = fp.readlines()\n skeletons = [line.strip().split(' ') for line in lines]\n interested_frame = skeletons[start_timestamp : end_timestamp + 1]\n multi_modal['skeleton'] = preprocess_skeleton_frames(interested_frame)\n \n # parse rgb and optical flow\n rgb_video_capture = cv2.VideoCapture(rgb_video_path)\n rgb_index = 0\n _, rgb_frame = rgb_video_capture.read()\n while rgb_index < start_timestamp:\n rgb_index += 1\n _, rgb_frame = rgb_video_capture.read()\n # optical flow needed\n prvs = cv2.cvtColor(rgb_frame, cv2.COLOR_BGR2GRAY)\n hsv = np.zeros_like(rgb_frame)\n hsv[..., 1] = 255\n # sample frames\n sample_rgb_frames, sample_optical_flow_frames = [], []\n current_sample_count = 0\n while rgb_index < end_timestamp:\n # optical flow related\n next_frame = cv2.cvtColor(rgb_frame, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next_frame, None, 0.5, 3, 15, 3, 5, 1.2, 0)\n mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])\n hsv[...,0] = ang * 180 / np.pi / 2\n hsv[...,2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n bgr = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)\n prvs = next_frame\n # sampled frame\n if (current_sample_count + 1) % 16 == 0:\n sample_rgb_frames.append(rgb_frame)\n sample_optical_flow_frames.append(bgr)\n current_sample_count, rgb_index = current_sample_count + 1, rgb_index + 1\n _, rgb_frame = rgb_video_capture.read()\n multi_modal['rgb'] = random.choice(sample_rgb_frames)\n multi_modal['optical_flow'] = random.choice(sample_optical_flow_frames)\n \n # parse depth, infrared and depth\n depth_video_capture = cv2.VideoCapture(depth_video_path)\n infrared_video_capture = cv2.VideoCapture(infrared_video_path)\n video_index = 0\n _, depth_frame = depth_video_capture.read()\n _, infrared_frame = infrared_video_capture.read()\n while video_index < start_timestamp:\n video_index += 1\n _, depth_frame = depth_video_capture.read()\n _, infrared_frame = infrared_video_capture.read()\n sample_depth_frames, sample_infrared_frames, sample_infrared_depth_frames = [], [], []\n current_sample_count = 0\n while video_index < end_timestamp:\n if (current_sample_count + 1) % 16 == 0:\n sample_depth_frames.append(depth_frame)\n sample_infrared_frames.append(infrared_frame)\n # infrared + depth process\n infrared_depth_frame = np.zeros_like(np.array(infrared_frame))\n infrared_frame = np.uint8(np.clip((10 * infrared_frame + 50), 0, 255))\n depth_grayscale = cv2.cvtColor(depth_frame, cv2.COLOR_BGR2GRAY)\n depth_grayscale = np.uint8(np.clip((1.5 * depth_grayscale + 20), 0, 255))\n for x in range(len(depth_grayscale)):\n for y in range(len(depth_frame[0])):\n if 33 <= depth_grayscale[x][y] <= 37:\n infrared_depth_frame[x][y] = infrared_frame[x][y]\n else:\n infrared_depth_frame[x][y] = [0] * 3\n infrared_depth_frame = cv2.cvtColor(infrared_depth_frame, cv2.COLOR_BGR2GRAY)\n sample_infrared_depth_frames.append(infrared_depth_frame)\n current_sample_count, video_index = current_sample_count + 1, video_index + 1\n _, depth_frame = depth_video_capture.read()\n _, infrared_frame = infrared_video_capture.read()\n multi_modal['depth'] = random.choice(sample_depth_frames)\n multi_modal['infrared'] = random.choice(sample_infrared_frames)\n multi_modal['infrared_depth'] = random.choice(sample_infrared_depth_frames)\n\n return multi_modal", "title": "" }, { "docid": "babbb0ac311b4c59a89886d69d1c54ae", "score": "0.4442254", "text": "def __init__(self,tello,outputpath): \n\n self.tello = tello # videostream device\n self.outputPath = outputpath # the path that save pictures created by clicking the takeSnapshot button \n self.frame = None # frame read from h264decoder and used for pose recognition \n self.thread = None # thread of the Tkinter mainloop\n self.stopEvent = None \n \n # control variables\n self.distance = 0.1 # default distance for 'move' cmd\n self.degree = 30 # default degree for 'cw' or 'ccw' cmd\n \n # start a thread that constantly pools the video sensor for\n # the most recently read frame\n self.stopEvent = False\n self.thread = threading.Thread(target=self.videoLoop, args=())\n self.thread.start()\n self.isBusy = False\n\n # the sending_command will send command to tello every 5 seconds\n self.sending_command_thread = threading.Thread(target = self._sendingCommand)", "title": "" }, { "docid": "2109fa5f14b17fa6b90b7b48133cfbbc", "score": "0.4436141", "text": "def __init__(self, tree, name):\n self.pipe_buffer = b\"\"\n super(OutputPipeBytes, self).__init__(tree, name)", "title": "" }, { "docid": "0e6b3336bb9f41d96c2c9b7d81a35486", "score": "0.44305664", "text": "def build_image_sequence(frames):\n return [process_image(x, (224, 224, 3)) for x in frames]", "title": "" }, { "docid": "38adf371d90ee2c2196e7d7b76516ce4", "score": "0.44300538", "text": "def _callback_target(self, metadata, name, tables):\n prv_sources = self._paths_from_node(tables.sources, self.bld.srcnode)\n prv_includes = self._paths_from_node(tables.prv_includes, self.bld.srcnode)\n\n # Convert the name into a path relative to the source node\n name = Globitool.GlobifestLib.Util.get_abs_path(name, metadata.prj_dir)\n name = self._paths_from_node(name, self.bld.srcnode)\n # Then remove all escapes\n name = \"_\".join(os.path.normpath(name).split(os.sep))\n\n self._debug([\n \"TARGET\",\n \"package={}\".format(name),\n \"prv_includes={}\".format(prv_includes),\n \"prv_defines={}\".format(tables.prv_defines),\n \"sources={}\".format(prv_sources)\n # Aux files are not part of the build system, so not used\n ])\n\n tgt_name = \"_GFT_{}_{}\".format(self.target, name)\n\n # Add a taskgen for this target, copying the target params\n params = copy.copy(self.tgt_params)\n params[\"target\"] = tgt_name\n params[\"source\"] = prv_sources\n params[\"includes\"] = prv_includes + self.pub_includes\n params[\"export_includes\"] = self.pub_includes\n if \"defines\" not in params:\n params[\"defines\"] = []\n if \"use\" not in params:\n params[\"use\"] = \"\"\n if hasattr(self, \"use\"):\n params[\"use\"] += \" \" + self.use\n params[\"defines\"] += tables.prv_defines + self.pub_defines\n Logs.info(\"globitool: Target taskgen {}\".format(tgt_name))\n Logs.debug(\"globitool: {}\".format(params))\n tg = self.bld(**params)\n self.taskgens.append(tg)", "title": "" }, { "docid": "bca342e9c5a69f2966daa1206347337f", "score": "0.44295985", "text": "def addPipeToLoop(self):\n for p in self.tree_Pipes.selectedItems():\n name = p.text(0)\n rows = str(self.tree_LoopPipes.topLevelItemCount())\n itm = qtw.QTreeWidgetItem((rows, name))\n itm.setFlags(qtc.Qt.ItemIsSelectable | qtc.Qt.ItemIsEditable | qtc.Qt.ItemIsDragEnabled | qtc.Qt.ItemIsUserCheckable | qtc.Qt.ItemIsEnabled)\n self.tree_LoopPipes.addTopLevelItem(itm)\n rows = self.tree_LoopPipes.topLevelItemCount()", "title": "" }, { "docid": "2b1d2ae43c73845904b54ec6f9093f12", "score": "0.44166884", "text": "def on_target(self, target):\n pass", "title": "" }, { "docid": "e983757f5914bf5167d639dbbc6b6732", "score": "0.44098032", "text": "def spawn(self):\n\t\tpass", "title": "" }, { "docid": "2ea9ca59b84cc598351cfaa8cc5969bb", "score": "0.44094416", "text": "def gen(camera):\n i = 0\n while True:\n # if i == 10000:\n # break\n frame = camera.get_frame()\n # response = requests.post(test_url,\n # data=frame,\n # headers=headers)\n # print(response.text)\n yield(b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')", "title": "" }, { "docid": "c636d70455417d62b4be0d016cdff718", "score": "0.4399161", "text": "def getRandomPipe():\n pipeHeight = gameSprites['pipe'][0].get_height()\n offset = screenHeight/3\n # pipeWidth = gameSprites['pipe'][1].get_width()\n y2 = offset + rd.randrange(0, int(screenHeight - gameSprites['base'].get_height() - 1.2*offset))\n y1 = pipeHeight - y2 + offset\n pipeX = screenWidth+10\n pipe = [\n {'x': pipeX, 'y': -y1},\n {'x': pipeX, 'y': y2}\n ]\n return pipe", "title": "" }, { "docid": "cdf98672ff80d819b65e0fe5d9277d37", "score": "0.43934953", "text": "def _populate_targets() -> List[TargetPrimitive]:\n targets: List[TargetPrimitive] = [TargetPrimitive(name=t.name) for t in CardTarget]\n commit()\n return targets", "title": "" }, { "docid": "afb376f01b409ff88e3f59d8fc1cf57d", "score": "0.439148", "text": "def initialize_target_nets(self):\n self.perform_targ_hard_updates({})", "title": "" }, { "docid": "a175ca4f5a5e3a79a4329e4ab1aa293a", "score": "0.43865225", "text": "def action(self, framer, frame, index=None, **kwa):\n clone = self.store.house.cloneFramer(framer, index.value)\n frame.addAux(clone)\n index.value += 1\n\n return None", "title": "" }, { "docid": "d5e29fe1f5dcb3f839251577bbaf7a1a", "score": "0.43858552", "text": "def _make_one(self, *args, **kwargs):\n return self._get_target()(*args, **kwargs)", "title": "" }, { "docid": "d5e29fe1f5dcb3f839251577bbaf7a1a", "score": "0.43858552", "text": "def _make_one(self, *args, **kwargs):\n return self._get_target()(*args, **kwargs)", "title": "" }, { "docid": "d5e29fe1f5dcb3f839251577bbaf7a1a", "score": "0.43858552", "text": "def _make_one(self, *args, **kwargs):\n return self._get_target()(*args, **kwargs)", "title": "" } ]
c90645975d58916657ebdd4881216503
If applicable unselect this option
[ { "docid": "dec4f83937a4ff047c32f156f0aa0fa8", "score": "0.81006885", "text": "def unselectOption(self):\r\n return self.client.nowait('browser.unselectOption', (self.element,))", "title": "" } ]
[ { "docid": "c884c47bec55f667bce84d4465de8573", "score": "0.8056833", "text": "def unselectOption(self):\n return self.client.nowait('browser.unselectOption', (self.element,))", "title": "" }, { "docid": "f7ffa51ac93c005096355e5ae813837c", "score": "0.7896933", "text": "def unselectOption(self, selector):\r\n self.query(selector).unselectOption()\r\n return self", "title": "" }, { "docid": "d0761ae27fe186c03474957b3fc5ccaf", "score": "0.7745869", "text": "def unselectOption(self, selector):\n self.query(selector).unselectOption()\n return self", "title": "" }, { "docid": "5e3ffe627b0fc2c4b1908a935052cb13", "score": "0.74817795", "text": "def unselect(self) -> bool:\n return self.select(None)", "title": "" }, { "docid": "8a883f5facb6987c77186e00f69f18b7", "score": "0.7161555", "text": "def unselect(self, value):\r\n return self.browser.unselect(self.element, value)", "title": "" }, { "docid": "07955cf154c218936d01fb5afe69d496", "score": "0.70513767", "text": "def unselect(self, value):\n return self.browser.unselect(self.element, value)", "title": "" }, { "docid": "a4b887f11868c3395693d8f7818c11e9", "score": "0.68834114", "text": "def unselect_job(self):\n result = super().unselect_job()\n if result.is_ok():\n self._print_action(result.jobname, \"unselected\", more_title=\"unselect\")\n else:\n self._print_error(result.jobname, result.status)", "title": "" }, { "docid": "b6286f44d76976ad70861eb2b365fc74", "score": "0.68604916", "text": "def deselectAll():\n for obj in bpy.context.selected_objects:\n if obj.select:\n obj.select = False", "title": "" }, { "docid": "3fc33da9fbfbdc49cc07be864face879", "score": "0.6828886", "text": "def unselect_pdf_agrupamiento(self):\n self.checkbox.uncheck(emision_liquidacion_historicas_catalog.CHBOX_PDF_AGRUPAMIENTO)", "title": "" }, { "docid": "395bb7ab632e0ad48db429f244aa137d", "score": "0.6732614", "text": "def deactivate(self):\n self.choice_cbk = None\n if self.active:\n self.active = 0\n self.deactivateAll()", "title": "" }, { "docid": "c1fe6e1bc9f34ca292987544f4ab0b18", "score": "0.670402", "text": "def clear_selected(self):\n self.state.selected = []", "title": "" }, { "docid": "a4e18f45ffa76237fe9a3a67078440a5", "score": "0.6605303", "text": "def ClearSelection(self):", "title": "" }, { "docid": "bc771f1f814fa587107ed2b1c588ec4f", "score": "0.6598857", "text": "def uncheck(self):\n self.set_checked(False)", "title": "" }, { "docid": "69f28027101147c699202a6951955cdd", "score": "0.6593936", "text": "def ToggleSelect(self):\n self.isSelected = not self.isSelected", "title": "" }, { "docid": "4eb8ed61b09d7f7ef466a4421ecc9436", "score": "0.6589447", "text": "def deselect_all(self):\n self.list_model.deselect_all()", "title": "" }, { "docid": "1d3cad218ab2c28b174c212f62b397ac", "score": "0.6555972", "text": "def deselect(objList):\n # confirm objList is a list of objects\n objList = confirmList(objList)\n # select/deselect objects in list\n for obj in objList:\n if obj is not None and obj.select_get():\n obj.select_set(False)", "title": "" }, { "docid": "e597c1f325684bb7bbc09e6f086d06cc", "score": "0.6554246", "text": "def clear_select_fields(self):\r\n self.select = []", "title": "" }, { "docid": "517810071f3d370478f002b94af3a315", "score": "0.6522489", "text": "def deselect(objList):\n # confirm objList is a list of objects\n objList = confirmList(objList)\n # select/deselect objects in list\n for obj in objList:\n if obj is not None and obj.select:\n obj.select = False", "title": "" }, { "docid": "e277ee9265d583b77743b085b7083ad5", "score": "0.6504648", "text": "def uncheck(self):\n return self.browser.uncheck(self.element)", "title": "" }, { "docid": "5853e06442b977a502fa3cca2f7ffcbe", "score": "0.64769506", "text": "def deactivate(self):\n if self.is_active():\n SelectGramBase.deactivate(self)\n self.active = 0", "title": "" }, { "docid": "008fb7c32b70988cee1762a9dcb55892", "score": "0.6462555", "text": "def uncheck(self):\r\n return self.browser.uncheck(self.element)", "title": "" }, { "docid": "c733b1869d20fec088173f4c3940078a", "score": "0.6449415", "text": "def remove_selected_item(self) -> None:\n\n del self._selected_item_dict[self.get()]\n super().remove_selected_item()", "title": "" }, { "docid": "72e12f0a3af2fa7a31359e3e18a90553", "score": "0.6444308", "text": "def deselectAllClicked(self):\n\n for n in range(self.firstrow, self.nextrow):\n selected = self.filetypes_grid.itemAtPosition(n, 0).widget()\n selected.setChecked(False)", "title": "" }, { "docid": "12c2a423ddd477fefa716f6d4445d18f", "score": "0.643059", "text": "def clear(self):\n self._options = []\n self._combo_menu.tk.delete(0, END)\n self._selected.set(\"\")", "title": "" }, { "docid": "60ffe59c23f4963af948028f6b8cbc0e", "score": "0.6390072", "text": "def DeselectRow(self, row):", "title": "" }, { "docid": "37cc9f7b9f1e11fb543381da9ca83100", "score": "0.6345775", "text": "def _drop_selection(self):\n if self._selected_item:\n self._selected_item.clear_selection()\n self._selected_item = None", "title": "" }, { "docid": "031429c4731a33328100345312888ff0", "score": "0.6307776", "text": "def DeselectCol(self, col):", "title": "" }, { "docid": "4945edcce13fb52b5a543ba99394ea5f", "score": "0.62926024", "text": "def unchecked_selected_field(self):\n if self.turn == BLACK:\n self.board_button[self.last_clicked_button].background_normal = BLACK_PAWN\n self.last_clicked_button = None\n self.reset_fields_on_board()\n else:\n self.board_button[self.last_clicked_button].background_normal = WHITE_PAWN\n self.last_clicked_button = None\n self.reset_fields_on_board()", "title": "" }, { "docid": "b7d2e1db6bbc3d820856f81ad5bd9766", "score": "0.6240054", "text": "def clearSelection(self):\n self.unitIsSelected = False\n self.selection = None\n self.clearMovementRange()", "title": "" }, { "docid": "98621afe1cc125df053815957818ea3d", "score": "0.6205199", "text": "def clearSelection(self):\n\t\tom.MGlobal.clearSelectionList()\n\t\treturn True", "title": "" }, { "docid": "8acecdfae90195da742775033eeae988", "score": "0.6196327", "text": "def untag_selected_labels(self, tag):\n self._collection.select_labels(\n labels=self.selected_labels\n ).untag_labels(tag)", "title": "" }, { "docid": "e2a3c373abfe7647b2f02def46ae9f5b", "score": "0.61520123", "text": "def remove_option(self, optionname):\n if optionname in self.options:\n del self.options[optionname]", "title": "" }, { "docid": "9c0f8917b3023b99f1dbb364ee4ad36d", "score": "0.61380535", "text": "def unselect_envia_liquidaciones_por_mail(self):\n self.checkbox.uncheck(emision_liquidacion_historicas_catalog.CHBOX_ENVIA_POR_MAIL)", "title": "" }, { "docid": "e5c68a5fceef8058e0b2a2f3a68ec80e", "score": "0.6136075", "text": "def uncheck(self, name):\n try:\n self.buttons[name].off()\n except KeyError:\n pass", "title": "" }, { "docid": "e5c68a5fceef8058e0b2a2f3a68ec80e", "score": "0.6136075", "text": "def uncheck(self, name):\n try:\n self.buttons[name].off()\n except KeyError:\n pass", "title": "" }, { "docid": "a6ea5bf6aa0b9b9a65f8f7606ad6b2a2", "score": "0.6135845", "text": "def onSelectWithoutButtonClick(self, event):\n checked_items = self.items_checkList.GetCheckedStrings()\n self._selected_items = [item for item in self.items_checkList.GetItems() if item not in checked_items]\n self.EndModal(wx.ID_OK)\n event.Skip()", "title": "" }, { "docid": "6ca7f06fc4df7584f4e0731a2fb06505", "score": "0.61270183", "text": "def on_mousedrop(self, event):\n self.selected = None\n self.mouseclick = False", "title": "" }, { "docid": "551fff87c2b172a1618f7e41492c89d0", "score": "0.60998577", "text": "def deselectPackage(self, pkg):\r\n self.to_deselect.append(pkg)", "title": "" }, { "docid": "e8af9bb1c6244d5551e7f8cfc4346261", "score": "0.6096858", "text": "def unselect_no_emite_liquidaciones_neto_0(self):\n self.checkbox.uncheck(emision_liquidacion_historicas_catalog.CHBOX_NO_EMITE_NETO_0)", "title": "" }, { "docid": "eedb9e3588ae0fa4ffb22cf5ef5e3273", "score": "0.60656357", "text": "def deactivate():\r\n if hasattr(_active, \"value\"):\r\n del _active.value", "title": "" }, { "docid": "eedb9e3588ae0fa4ffb22cf5ef5e3273", "score": "0.60656357", "text": "def deactivate():\r\n if hasattr(_active, \"value\"):\r\n del _active.value", "title": "" }, { "docid": "823114ad23dc2bc5b9030f5b22e65541", "score": "0.6053703", "text": "def clear_selected_labels(self):\n self.state.selected_labels = []", "title": "" }, { "docid": "fcba7d631c872904c56b0d0de28fccab", "score": "0.6019569", "text": "def clearSelection(self):\n flag = \"filterObject-itemList\"\n elem = self.getElement(self.locator)\n ulElem = elem.find_element_by_id(flag)\n liElems = ulElem.find_elements_by_tag_name(\"li\")\n if len(liElems) > 1:\n liElem = liElems[0]\n flag = \"x-tagfield-item-close\"\n divElem = liElem.find_element_by_class_name(flag)\n divElem.click()\n self.clearSelection()", "title": "" }, { "docid": "e93695b96f3290510e7beb44372ec2e3", "score": "0.6009066", "text": "def DeselectCell(self, row, col):", "title": "" }, { "docid": "3330bc6135ac2f22208655832444dfb7", "score": "0.6000478", "text": "def clear_selection(self):\n self.tree_view.selectionModel().clearSelection()", "title": "" }, { "docid": "49dc4c0c5c4de1c760cc428a615d9853", "score": "0.59409326", "text": "def clear_selection(self):\n if self.data.has_key('DNA_selection'):\n del self.data['DNA_selection']\n\n # Clear display\n if self.data.has_key('sel_objs'):\n for obj in self.data['sel_objs'].keys():\n self.seqframe.delete(obj)\n return", "title": "" }, { "docid": "c11c674b0754cc3a85905b61d96f9f1e", "score": "0.5935112", "text": "def deselect_all_vendors(self):\n for i in range(self.vendor_list_model.rowCount()):\n self.vendor_list_model.item(i).setCheckState(Qt.Unchecked)", "title": "" }, { "docid": "c11c674b0754cc3a85905b61d96f9f1e", "score": "0.5935112", "text": "def deselect_all_vendors(self):\n for i in range(self.vendor_list_model.rowCount()):\n self.vendor_list_model.item(i).setCheckState(Qt.Unchecked)", "title": "" }, { "docid": "31a1fdbb0aa9d257a3995b3b9791df03", "score": "0.5924704", "text": "def deselectToolsActions(self):\n self.actionZoom_Cursor.setChecked(False)\n self.actionRectangular_Cursor.setChecked(False)\n self.actionPointer_Cursor.setChecked(False)", "title": "" }, { "docid": "e7766a11dfbf14156cbfbbc08252123b", "score": "0.5916343", "text": "def deselect_all(self):\n for i in self.installed_apps:\n if i.isChecked():\n i.setChecked(False)\n self.enable_buttons()", "title": "" }, { "docid": "47f06b4deccdcec22121981c604586d6", "score": "0.59090644", "text": "def resetPackageSelections(self):\n for txmbr in self.ayum.tsInfo:\n self.ayum.tsInfo.remove(txmbr.pkgtup)\n self.ayum.tsInfo.conditionals.clear()\n for grp in self.ayum.comps.groups:\n grp.selected = False", "title": "" }, { "docid": "4c248107a460cacf3c7d11fac76d42f1", "score": "0.590468", "text": "def resetSelected(self):\r\n\r\n def _resethelp(button):\r\n #All button groups must be set to non exclusive for setChecked to work\r\n button.setAutoExclusive(False)\r\n button.setChecked(False)\r\n button.setAutoExclusive(True)\r\n\r\n for button in self.ttypes:\r\n _resethelp(button)\r\n for button in self.tclasses:\r\n _resethelp(button)\r\n for button in self.colors:\r\n _resethelp(button)\r\n\r\n for button in self.ptypes:\r\n _resethelp(button)\r\n for button in self.plocations:\r\n _resethelp(button)\r\n\r\n self.tattoogroup.setChecked(False)\r\n self.piercinggroup.setChecked(False)", "title": "" }, { "docid": "baae82fd1dfc1e5ab5c698ff186565cb", "score": "0.58991444", "text": "def clear(self):\n if self.selected:\n self.clear_event.send(self, x=self.x, y=self.y)\n self.selected = False\n return True\n return False", "title": "" }, { "docid": "0bec856e94ef6aba5eee3ea359fcf7df", "score": "0.5877415", "text": "def unselect_checkbox(driver, by, value):\n checkbox = driver.find_element(by=by, value=value)\n if checkbox.is_selected():\n checkbox.click()\n else:\n \"The element is already unchecked.\"", "title": "" }, { "docid": "06150963f8152f2e45aaff342f01fb9a", "score": "0.58536303", "text": "def deselectAll():\n try:\n selected_objects = bpy.context.selected_objects\n except AttributeError:\n selected_objects = [obj for obj in bpy.context.view_layer.objects if obj.select_get()]\n deselect(selected_objects)", "title": "" }, { "docid": "9bcad4f0121b045e68bf7548d9b97254", "score": "0.58452266", "text": "def do_unset(self, args):\n if len(args.split()) != 1:\n print 'Usage: unset <option>'\n return False\n option = args.upper()\n if self.set_query_options.get(option):\n print 'Unsetting %s' % option\n del self.set_query_options[option]\n else:\n print \"No option called %s is set\" % args\n return True", "title": "" }, { "docid": "76315f8e65c5b810a82f473b85a8d179", "score": "0.58273107", "text": "def unset_option(self, option):\n self._check_option(option)\n levels = option.split('.')\n attr = levels.pop()\n cur = self._config['options']\n for level in levels:\n if level not in cur:\n return\n cur = cur[level]\n if attr in cur:\n del cur[attr]\n # Clean up any empty sub-sections\n self._remove_empty_dicts(self._config['options'])", "title": "" }, { "docid": "6b41639d1562bda4323c08625ccfa5ba", "score": "0.5779279", "text": "def selected(self, selected):\n value = bool(selected)\n if value:\n CCAPI.add_option_to_product(\n range_id=self.product_range.id, option_id=self.id\n )\n else:\n CCAPI.remove_option_from_product(\n range_id=self.product_range.id, option_id=self.id\n )\n for product in self.product_range:\n product._options = None\n self._selected = value", "title": "" }, { "docid": "10bc9d0188caf8af17ea13990be54eef", "score": "0.5774093", "text": "def deactivate(self):\n self.label.setTextColor('darkgrey')\n self.rect.setWidth(1)\n self.active = False", "title": "" }, { "docid": "5e68f58b1f6bff20faebb6b6d6607068", "score": "0.57739484", "text": "def remove_option(self, ref):\r\n real_ref = 'opt_' + ref\r\n obj = self._get(real_ref)\r\n if not isinstance(obj, Option):\r\n raise TypeError('Trying to remove option with ref %r, but object with ref %r is not an instance of Option (%s instead)' % (ref, real_ref, type(obj)))\r\n self._remove(real_ref)", "title": "" }, { "docid": "f1e101c5b8c447acd01ac330f8edfe1b", "score": "0.5766753", "text": "def tryToClearSelection(self):\n\n self.__networkManager.clearSelectionWithoutSavingHistory()", "title": "" }, { "docid": "6a065457362f2cec4cf98e585e734d4b", "score": "0.5753168", "text": "def deselectAll(self):\n tables = [self.ui.auto_table, self.ui.assist_table, self.ui.train_table]\n for table in tables:\n for row in range(table.rowCount()):\n chk = table.cellWidget(row, 0)\n chk.setChecked(False)", "title": "" }, { "docid": "7059526b5b506eba6d88c7a227d6da14", "score": "0.574025", "text": "def deselect_all_report_types(self):\n for i in range(self.report_type_list_model.rowCount()):\n self.report_type_list_model.item(i).setCheckState(Qt.Unchecked)", "title": "" }, { "docid": "c5026bdab2b10761aa3b2ab7062ee389", "score": "0.5733948", "text": "def delete_selected(self):\n selection = self.__treeview.get_selection()\n model, iter = selection.get_selected()\n model.remove(iter)", "title": "" }, { "docid": "3200a694d387b88c903b23318cf9bb67", "score": "0.57334054", "text": "def removeToggle(self, model):\r\n\t\tif model in self.toggleDict.keys():\r\n\t\t\t# remove toggle from group\r\n\t\t\tself.toggleDict[model].detachModel()\r\n\t\t\tdel self.toggleDict[model]\r\n\t\t\t# update current selected\r\n\t\t\tif (model == self.getValue()):\r\n\t\t\t\tif (len(self.toggleDict) > 0):\r\n\t\t\t\t\tself.toggleDict.keys()[0].toggle()\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.value = None\r\n\t\t\t\t\tself._notify()\r\n\t\telse:\r\n\t\t\traise KeyError\r\n\t\treturn", "title": "" }, { "docid": "f6447a708e80d3419e7a9435e8f06b17", "score": "0.5675803", "text": "def deselect(self):\r\n for cell in self.move_cells.values():\r\n cell.opacity = 0", "title": "" }, { "docid": "c1cfe91d02b2de30d344ccb017502060", "score": "0.5675657", "text": "def select_deselect_combobox_layout(layout, is_checked):\n if layout.count() > 0:\n for idx in range(layout.count()):\n cb = get_widget_of_layout(layout.itemAt(idx), QtGui.QCheckBox)\n if cb:\n cb.setCheckState(is_checked)", "title": "" }, { "docid": "5b1b016d0d847b14ba50e785309f28b8", "score": "0.5675107", "text": "def remove(self, option):\n if option in self._options:\n if len(self._options) == 1:\n # this is the last option in the list so clear it\n self.clear()\n else:\n self._options.remove(option)\n self._refresh_options()\n # have we just removed the selected option?\n # if so set it to the first option\n if option == self.value:\n self._set_option(self._options[0])\n return True\n else:\n return False", "title": "" }, { "docid": "3edffc279899e38069e75d5ed7602d2a", "score": "0.56717104", "text": "def uncheckAll(self):\n for x in range(len(self.checkList)):\n op1 = self.checkList[x]\n op1.setChecked(False)", "title": "" }, { "docid": "bf0dc378a7101224afedcf7fb26a3dfa", "score": "0.5657949", "text": "def remove_selected(self):\n index = copy(self.selected_data)\n index.sort()\n if len(index) > 0:\n self._sizes = np.delete(self._sizes, index, axis=0)\n for i in index[::-1]:\n del self.edge_colors[i]\n del self.face_colors[i]\n if self._hover_point in self.selected_data:\n self._hover_point = None\n self.selected_data = []\n self.data = np.delete(self.data, index, axis=0)", "title": "" }, { "docid": "f959472737247d87d5b3c1f346fbf4f5", "score": "0.56573313", "text": "def unchoose_all(self) -> None:\n assert not self.spectator, \"Trying to unchoose for spectator\"\n for hcard in self._hand.values():\n hcard.chosen = None", "title": "" }, { "docid": "90b2dce4f3a1fb1b49ff621f2ea9f9fe", "score": "0.56542385", "text": "def deactivate(self):\n self.label.setFill('darkgrey')\n self.rect.setWidth(1)\n self.active = 0", "title": "" }, { "docid": "4edc35858019191b06f3eef1cc2e92e0", "score": "0.5653738", "text": "def deactivate(self):\n self.label.setFill(\"darkgrey\")\n self.circ.setWidth(1)\n self.active = False", "title": "" }, { "docid": "5b9655350fe266adaefe404c716d6dfe", "score": "0.565233", "text": "def deselect_all_items(self):\n\n self.wait.until(EC.element_to_be_clickable((By.NAME, self.select_all_button_name)))\n\n if not self.SELECT_ALL_BUTTON().is_selected():\n scroll_to_element(self.browser, self.SELECT_ALL_BUTTON())\n self.SELECT_ALL_BUTTON().click() # Invoked twice to ensure that all documents\n self.SELECT_ALL_BUTTON().click() # are deselected\n else:\n scroll_to_element(self.browser, self.SELECT_ALL_BUTTON())\n self.SELECT_ALL_BUTTON().click()", "title": "" }, { "docid": "0e9614e30da51a1b01b7ff2abfd6e4fc", "score": "0.56375396", "text": "def deactivate(self):\n self.active = False", "title": "" }, { "docid": "0e9614e30da51a1b01b7ff2abfd6e4fc", "score": "0.56375396", "text": "def deactivate(self):\n self.active = False", "title": "" }, { "docid": "0e9614e30da51a1b01b7ff2abfd6e4fc", "score": "0.56375396", "text": "def deactivate(self):\n self.active = False", "title": "" }, { "docid": "218b633ee7e48ac567308398d13573ed", "score": "0.5635341", "text": "def on_deselect_display_fcn(self, old_fcn):\n pass", "title": "" }, { "docid": "5ea1e570c87ba579d16c5232ded40e22", "score": "0.5631801", "text": "def cancel(self):\n\t\tself.select_index = 0\n\t\tself.bound_top_left = [-1,-1]\n\t\tself.bound_bottom_right = [-1,-1]\n\t\tself.drawGUI()", "title": "" }, { "docid": "aea0763c4605347e9e62c4e38c66db67", "score": "0.5627106", "text": "def uncheck(self, element):\n raise NotImplementedError(\n \"This browser doesn't support unchecking elements\")", "title": "" }, { "docid": "0a0f414b1c0c9588f2671b5025e880e2", "score": "0.5624509", "text": "def clearSelectionWithoutSavingHistory(self):\n self.historyWriter.entryWithoutHistory(lambda state: self.clearSelection())", "title": "" }, { "docid": "32b575cd2d5033616ba172851536b96b", "score": "0.5620127", "text": "def select_none(self):\r\n if DEBUG:\r\n print(\"select_none\")\r\n\r\n self.__select_brick = dict(idx=-1, brk=None, xy=(0, 0), rc=(0, 0))\r\n self.statusbar_set_text(\"Nessuna posizione selezionata\")", "title": "" }, { "docid": "380fa22a70fad6c5602be5326d7bc250", "score": "0.5585611", "text": "def toggle(self):\n assert self.form == 'int',\"'%s' not an int type var\"%(self.name)\n \n mc.optionVar(iv = (self.name,not self.value))\n self.value = not self.value\n guiFactory.report(\"'%s':%s\"%(self.name,self.value))", "title": "" }, { "docid": "08eec41ccf656f77e8c500e3851fc160", "score": "0.5585042", "text": "def remove(self, items):\n items = coerce_to_list(items)\n new_options = {k: v for k, v in self.options.items() if v not in items}\n self.widget.options = new_options\n self.widget.param.trigger(\"options\")", "title": "" }, { "docid": "0042f9ddda28ebad823071722ca0aac6", "score": "0.5584276", "text": "def conditional_clear_selection(self, command):\n if not self.selection_cache:\n return\n if command not in self.selection_commands:\n self.selection_cache = \"\"\n self.debug(\"Cleared selection\")", "title": "" }, { "docid": "c96812a16d8e0113824adc335e6127c4", "score": "0.5577427", "text": "def choose_antey_no_setting(self):\r\n option = self.driver.find_element(*MainPageLocators.ANTEY_OPTION_NO)\r\n option.click()", "title": "" }, { "docid": "6b26149bd1d0af40fb4c815b9dc9a4cb", "score": "0.55692446", "text": "def remove_selected_item(self) -> None:\n\n if len(self._view_items) == 0:\n return\n self._logger.debug(f'Removing {str(self._view_items[self._selected_item])}')\n del self._view_items[self._selected_item]\n if self._selected_item >= len(self._view_items) and self._selected_item > 0:\n self._selected_item = self._selected_item - 1", "title": "" }, { "docid": "d569f8825331986ee18f15c0017815af", "score": "0.5538323", "text": "def deactivate(self):\n if self.active:\n self.active = 0\n self.deactivateAll()", "title": "" }, { "docid": "d569f8825331986ee18f15c0017815af", "score": "0.5538323", "text": "def deactivate(self):\n if self.active:\n self.active = 0\n self.deactivateAll()", "title": "" }, { "docid": "879c313ec4aa4af300b0ff2745c17f69", "score": "0.55254865", "text": "def get_selected_option(self):\n option = self.selected_option\n self.selected_option = None\n return option", "title": "" }, { "docid": "cfbc5ee4bcc50d80a6e02a9b71d15a75", "score": "0.552425", "text": "def toggle_select(self):\n\n photos = self.ids['photos']\n photos.toggle_select()\n self.update_selected()", "title": "" }, { "docid": "cfbc5ee4bcc50d80a6e02a9b71d15a75", "score": "0.552425", "text": "def toggle_select(self):\n\n photos = self.ids['photos']\n photos.toggle_select()\n self.update_selected()", "title": "" }, { "docid": "b21ab6900e9b1c6e113c6c17eb664957", "score": "0.5514463", "text": "def untag_selected_samples(self, tag):\n self._collection.select(self.selected).untag_samples(tag)", "title": "" }, { "docid": "b273dff607a124a7cc2c654f207db8b8", "score": "0.5499122", "text": "def handle_option_action(self, opt_name):\n config.config['options'][opt_name] = not config.config['options'][opt_name]", "title": "" }, { "docid": "3ea5ec015347464108e17b2824ba0ff4", "score": "0.54959273", "text": "def decrement_selected_button(self):\n \n if len(self.button_list) == 0:\n self.selected_index = -1\n return\n\n if self.selected_index == 0:\n self.selected_index = len(self.button_list) - 1\n else:\n self.selected_index -= 1", "title": "" }, { "docid": "0976d6e67de37b322d136897de083510", "score": "0.54744893", "text": "def tryToDeleteSelection(self):\n\n self.__networkManager.deleteSelectedLayers()", "title": "" }, { "docid": "ad500d9b296e4b42203aa41e57208a3c", "score": "0.54742354", "text": "def testDeselect(self):\n self.ctrl.get_item(1).select()\n self.ctrl.get_item(4).select()\n\n self.ctrl.get_item(3).deselect()\n self.ctrl.get_item(4).deselect()\n\n self.assertRaises(IndexError, self.ctrl.get_item(23).deselect)\n\n self.assertEqual(self.ctrl.get_selected_count(), 1)", "title": "" }, { "docid": "0198190f220030afe39411ab2bbb0db4", "score": "0.5471926", "text": "def select_obj(self, obj):\n bpy.ops.object.select_all(action='DESELECT')\n obj.select = True\n bpy.context.scene.objects.active\n bpy.context.scene.update()", "title": "" }, { "docid": "18cd94e3e80853084854a76f9477f804", "score": "0.54692614", "text": "def deleteSelectedItem(self):\n # Assure this is indeed wanted\n delete_msg = \"This operation will remove the selected data sets \" +\\\n \"and all the dependents from the data explorer.\" +\\\n \"\\nDo you want to continue?\"\n reply = QtWidgets.QMessageBox.question(self,\n 'Warning',\n delete_msg,\n QtWidgets.QMessageBox.Yes,\n QtWidgets.QMessageBox.No)\n\n if reply == QtWidgets.QMessageBox.No:\n return\n\n indices = self.current_view.selectedIndexes()\n self.deleteIndices(indices)", "title": "" }, { "docid": "142b1bdcf3d681dafc034a9225e003eb", "score": "0.5465647", "text": "def deactivate(self):\n debug.trace('SelectWinGramNL.deactivate', 'self.buff_name=%s' % self.buff_name)\n if self.is_active():\n SelectGramBase.deactivate(self)\n self.active = 0", "title": "" } ]
c7467a234036812d5cf5ee5b85aa6b78
Performs a single optimization step.
[ { "docid": "d3d7589b0ec1178a28a65494b84c26a3", "score": "0.0", "text": "def step(self, closure=None):\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group[\"params\"]:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.dtype in {torch.float16, torch.bfloat16}:\n grad = grad.float()\n if grad.is_sparse:\n raise RuntimeError(\"Adafactor does not support sparse gradients.\")\n\n state = self.state[p]\n grad_shape = grad.shape\n\n factored, use_first_moment = self._get_options(group, grad_shape)\n\n grad_square = grad ** 2\n\n # State Initialization\n if len(state) == 0:\n state[\"step\"] = 0\n if use_first_moment: # Always True\n # Exponential moving average of gradient values\n state[\"exp_avg\"] = torch.zeros_like(grad)\n if factored:\n state[\"exp_avg_sq_row\"] = torch.zeros(grad_shape[:-1]).to(grad)\n state[\"exp_avg_sq_col\"] = torch.zeros(\n grad_shape[:-2] + grad_shape[-1:]\n ).to(grad)\n else:\n state[\"exp_avg_sq\"] = torch.zeros_like(grad)\n\n state[\"RMS\"] = 0\n\n # Bias Corrected Initialization\n if use_first_moment: # Always True\n state[\"exp_avg\"].copy_(grad)\n if factored:\n state[\"exp_avg_sq_row\"].add_(grad_square.mean(dim=-1))\n state[\"exp_avg_sq_col\"].add_(grad_square.mean(dim=-2)) \n else:\n state[\"exp_avg_sq\"].copy_(grad_square)\n\n else:\n if use_first_moment:\n state[\"exp_avg\"] = state[\"exp_avg\"].to(grad)\n if factored:\n state[\"exp_avg_sq_row\"] = state[\"exp_avg_sq_row\"].to(grad)\n state[\"exp_avg_sq_col\"] = state[\"exp_avg_sq_col\"].to(grad)\n else:\n state[\"exp_avg_sq\"] = state[\"exp_avg_sq\"].to(grad)\n\n p_data_fp32 = p.data\n if p.data.dtype in {torch.float16, torch.bfloat16}:\n p_data_fp32 = p_data_fp32.float()\n\n state[\"step\"] += 1\n state[\"RMS\"] = self._rms(p_data_fp32)\n group[\"lr\"] = self._get_lr(group, state)\n\n beta2t = 1.0 - math.pow(state[\"step\"], group[\"decay_rate\"]) # Increasing Decay Parameter\n\n beta1 = group[\"beta1\"]\n beta2 = group[\"beta2\"]\n #print(beta2)\n\n bias_correction1 = 1 - beta1 ** state['step']\n bias_correction2 = 1 - beta2 ** state['step']\n\n second_moment = grad_square + group[\"eps\"][0]\n if factored:\n exp_avg_sq_row = state[\"exp_avg_sq_row\"]\n exp_avg_sq_col = state[\"exp_avg_sq_col\"]\n\n exp_avg_sq_row.mul_(beta2).add_(\n second_moment.mean(dim=-1), alpha=1.0 - beta2\n )\n exp_avg_sq_col.mul_(beta2).add_(\n second_moment.mean(dim=-2), alpha=1.0 - beta2\n )\n\n # Approximation of exponential moving average of square of gradient\n second_moment = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col)\n denominator = second_moment.norm().sqrt() + group[\"epsilon\"]\n else:\n exp_avg_sq = state[\"exp_avg_sq\"]\n\n exp_avg_sq.mul_(beta2).add_(second_moment, alpha=1.0 - beta2)\n denominator = exp_avg_sq.sqrt().norm() + group[\"epsilon\"]\n\n nominator = grad\n\n if use_first_moment:\n exp_avg = state[\"exp_avg\"]\n exp_avg.mul_(group[\"beta1\"]).add_(nominator, alpha=1 - group[\"beta1\"])\n nominator = exp_avg\n\n if group[\"weight_decay\"] != 0:\n p_data_fp32.add_(\n p_data_fp32, alpha=-group[\"weight_decay\"] * group[\"lr\"]\n )\n\n nominator.mul_(group[\"lr\"])\n update = nominator/denominator\n\n\n # Update Clipping\n update.div_(\n (self._rms(update) / group[\"clip_threshold\"]).clamp_(min=1.0)\n )\n\n p_data_fp32.add_(-update)\n\n if p.data.dtype in {torch.float16, torch.bfloat16}:\n p.data.copy_(p_data_fp32)\n\n return loss", "title": "" } ]
[ { "docid": "fc8f599bd19d33e08ee4918b1dff69f6", "score": "0.7238263", "text": "def optimize():\n raise NotImplementedError", "title": "" }, { "docid": "e8b16b8d3c9751f70ce08ce92102ce02", "score": "0.68457", "text": "def optimize(self):\n raise NotImplementedError", "title": "" }, { "docid": "e8b16b8d3c9751f70ce08ce92102ce02", "score": "0.68457", "text": "def optimize(self):\n raise NotImplementedError", "title": "" }, { "docid": "e973dee096b4dd5af2f9efd8e191a5a6", "score": "0.67128915", "text": "def optimize(self):\n pass", "title": "" }, { "docid": "41deaf06bd53cf366270a55530d6d60f", "score": "0.66316366", "text": "def run_single(self):\n # Processing and wall time for pyomo\n start_preproc_pyomo_pt = time.clock()\n start_preproc_pyomo_wt = time.time()\n\n # Instantiate the optModel\n self.optModel.preprocess()\n\n elapsed_preproc_pyomo_pt = str(timedelta(seconds = time.clock() - start_preproc_pyomo_pt))\n elapsed_preproc_pyomo_wt = str(timedelta(seconds = time.time() - start_preproc_pyomo_wt))\n\n #---- Solve the model ----\n #- Solve the optModel (tee=True shows the solver output) -\n try:\n # Processing and wall time for the solver\n start_solver_pt = time.clock()\n start_solver_wt = time.time()\n optSoln = self._optSolver.solve(self.optModel,tee=False)\n solver_flag = 'normal'\n\n # In the case of an error switch the solver\n except Exception, e:\n solver_flag = 'solverError'\n if self.warnings:\n print '**WARNING (FORCE)! {} failed with the following error: \\n{} \\n'.format(self.optimization_solver,e)\n\n elapsed_solver_pt = str(timedelta(seconds = time.clock() - start_solver_pt))\n elapsed_solver_wt = str(timedelta(seconds = time.time() - start_solver_wt))\n\n if solver_flag == 'normal' and str(optSoln.solver.termination_condition).lower() == 'optimal':\n\n exit_flag = 'globallyOptimal'\n\n # Load the results\n self.optModel.load(optSoln)\n\n # Optimal value of the objective function\n opt_objValue = self.optModel.objectiveFunc()\n\n # Reactions that must be knocked out, up-regulated or down-regulated\n yX_one_rxns = [j for j in self.optModel.J if abs(self.optModel.yX[j].value - 1) <= mip_integrality_tol]\n yL_one_rxns = [j for j in self.optModel.J if abs(self.optModel.yL[j].value - 1) <= mip_integrality_tol]\n yU_one_rxns = [j for j in self.optModel.J if abs(self.optModel.yU[j].value - 1) <= mip_integrality_tol]\n\n # If there was a solver error or if an optimal solution was not returned \n else:\n opt_objValue = None\n if solver_flag == 'solverError':\n exit_flag = solver_flag\n else:\n exit_flag = str(optSoln.solver.termination_condition)\n \n # Reactions that must be knocked out, up-regulated or down-regulated\n yX_one_rxns = []\n yL_one_rxns = []\n yU_one_rxns = []\n\n # Store the solution\n self._curr_soln = {'exit_flag':exit_flag,'objective_value':opt_objValue,'interven_num': len(yX_one_rxns) + len(yL_one_rxns) + len(yU_one_rxns),'X_rxns':yX_one_rxns,'L_rxns':yL_one_rxns,'U_rxns':yU_one_rxns}\n\n # Print the results on the screen \n if self.stdout_msgs_details:\n print '\\nObjective value = {}, Optimality status = {}, Solution status = {}, Solver run status = {}'.format(opt_objValue, optSoln.solver.termination_condition, optSoln.Solution.status, solver_flag)\n print 'Took (hh:mm:ss) {}/{} of processing/walltime to create a pyomo model, {}/{} to preprcoess the model and {}/{} to solve the model\\n'.format(self._elapsed_create_optModel_pt, self._elapsed_create_optModel_wt, elapsed_preproc_pyomo_pt,elapsed_preproc_pyomo_wt, elapsed_solver_pt,elapsed_solver_wt)", "title": "" }, { "docid": "63f2f307625f908b2b02599f2248cfdb", "score": "0.6482195", "text": "def optimize(self, loss, update_step):\n self.state_feat_encoder_optim.zero_grad()\n self.policy_optim.zero_grad()\n if self.train_traj_feature:\n self.traj_feature_extractor_opt.zero_grad()\n if self.train_traj_sampler:\n self.traj_feature_sampler_opt.zero_grad()\n\n loss.backward(retain_graph=self.re_sampler_step)\n self.policy_optim.step()\n if self.train_feature:\n self.state_feat_encoder_optim.step()\n\n if self.train_traj_feature:\n self.traj_feature_extractor_opt.step()\n\n if self.train_traj_sampler:\n self.traj_feature_sampler_opt.step()", "title": "" }, { "docid": "f320a126b7d7348048df0e51e80e0c6f", "score": "0.64440686", "text": "def step_par_1():\n logging.info(\"par 1\")\n optimizer = create_optimizer()\n optimizer.apply_parallelism()\n optimizer.apply_cache()\n optimizer.update_plumber()\n optimizer.apply_parallelism()\n return optimizer", "title": "" }, { "docid": "e5fcfa518cdcae3382b1616e43b64b0d", "score": "0.64010715", "text": "def minimize():", "title": "" }, { "docid": "622e6815df50b756634d3a369709e85b", "score": "0.6331547", "text": "def optimization_step(self):\n time_pass_start = time.time()\n\n # 1./0\n\n ## choose an index to update\n indx = self.get_target_index()\n\n # self.logger.info('Working on data batch {}'.format(indx))\n\n if self.display > 2:\n print(\"||dtheta|| {0},\".format(np.sqrt(np.sum((self.theta - self.theta_prior_step)**2))))\n print(\"index {0}, last f {1},\".format(indx, self.hist_f[indx,0]))\n print(\"step scale {0},\".format(self.step_scale))\n\n # events are for debugging -- eg, so that the user supplied objective can check\n # for step failure itself\n self.events['step_failure'] = False\n\n # evaluate subfunction value and gradient at new position\n f, df_proj = self.f_df_wrapper(self.theta, indx)\n # self.logger.debug('Objective {}'.format(f))\n # self.logger.debug('Gradient norm {}'.format(np.linalg.norm(df_proj)))\n\n # check for a failed update step, and adjust f, df, and self.theta\n # as appropriate if one occurs.\n step_failure, f, df_proj = self.handle_step_failure(f, df_proj, indx)\n # if step_failure:\n # self.logger.warning('Step failure!')\n # self.logger.debug('Modified objective {}'.format(f))\n # self.logger.debug('Modified gradient norm {}'.format(np.linalg.norm(df_proj)))\n\n # add the change in theta and the change in gradient to the history for this subfunction\n self.update_history(indx, self.theta_proj, f, df_proj)\n\n # increment the total distance traveled using the last update\n self.total_distance += np.sqrt(np.sum((self.theta - self.theta_prior_step)**2))\n # self.logger.debug('Total distance {}'.format(self.total_distance))\n\n # the current contribution from this subfunction to the total Hessian approximation\n H_pre_update = np.real(np.dot(self.b[:,:,indx], self.b[:,:,indx].T))\n # self.logger.debug('H_pre_update: {}'.format(H_pre_update.shape))\n\n ## update this subfunction's Hessian estimate\n self.update_hessian(indx)\n # self.logger.debug('updated hessian')\n\n # the new contribution from this subfunction to the total approximate hessian\n H_new = np.real(np.dot(self.b[:,:,indx], self.b[:,:,indx].T))\n # self.logger.debug('computed H_new {}'.format(H_new.shape))\n\n # update total Hessian using this subfunction's updated contribution\n self.full_H += H_new - H_pre_update\n # self.logger.debug('added to full_H {}'.format(self.full_H.shape))\n\n # calculate the total gradient, total Hessian, and total function value at the current location\n full_df = 0.\n # self.logger.debug('computing the total objective')\n for i in range(self.N):\n dtheta = self.theta_proj - self.last_theta[:,[i]]\n bdtheta = np.dot(self.b[:,:,i].T, dtheta)\n Hdtheta = np.real(np.dot(self.b[:,:,i], bdtheta))\n Hdtheta += dtheta*self.min_eig_sub[i] # the diagonal contribution\n full_df += Hdtheta + self.last_df[:,[i]]\n # self.logger.debug('\\tcomputed batch {}'.format(i))\n\n # add in the ADMM augmented Hessian term\n full_H_combined = self.get_full_H_with_diagonal() + np.eye(self.K_max) * self.admm_rho\n # self.logger.debug('added the augmented Hessian. about to invert!')\n\n # ev = np.linalg.eigvals(full_H_combined)\n # self.logger.debug('Minimum eigenvalue: {}'.format(np.min(ev)))\n # self.logger.debug('Maximum eigenvalue: {}'.format(np.max(ev)))\n\n # Use cholesky factorization to solve\n cho = cho_factor(full_H_combined)\n\n # TODO - Use Woodbury identity instead of recalculating full inverse\n # full_H_inv = np.linalg.inv(full_H_combined)\n # self.logger.debug('finished inverting the Hessian!')\n\n # update subspace with ADMM previous theta location\n assert ('theta_admm_prev' in self.__dict__), \"Please set theta_admm_prev before calling optimize\"\n self.update_subspace(self.theta_admm_prev)\n theta_admm_prev_subspace = np.dot(self.P.T, self.theta_admm_prev)\n # self.logger.debug('updated subspace')\n\n # add in the ADMM augmented gradient term\n full_df += (self.theta_proj - theta_admm_prev_subspace) * self.admm_rho\n # self.logger.debug('added the augmented hessian')\n\n # calculate an update step\n # dtheta_proj = -np.dot(full_H_inv, full_df) * self.step_scale\n dtheta_proj = -cho_solve(cho, full_df) * self.step_scale\n # self.logger.debug('calculating the quasi-newton step')\n\n dtheta_proj_length = np.sqrt(np.sum(dtheta_proj**2))\n # self.logger.debug('projected step length {}'.format(dtheta_proj_length))\n\n if dtheta_proj_length < self.minimum_step_length:\n # self.logger.warning('projected length below the minimum. forcing minimum')\n dtheta_proj *= self.minimum_step_length/dtheta_proj_length\n dtheta_proj_length = self.minimum_step_length\n if self.display > 3:\n print(\"forcing minimum step length\")\n\n if self.eval_count_total > self.N and dtheta_proj_length > self.eps:\n # only allow a step to be up to a factor of max_step_length_ratio longer than the\n # average step length\n avg_length = self.total_distance / float(self.eval_count_total)\n length_ratio = dtheta_proj_length / avg_length\n ratio_scale = self.max_step_length_ratio\n if length_ratio > ratio_scale:\n # self.logger.warning('step length is too big')\n if self.display > 3:\n print(\"truncating step length from %g to %g\"%(dtheta_proj_length, ratio_scale*avg_length))\n dtheta_proj_length /= length_ratio/ratio_scale\n dtheta_proj /= length_ratio/ratio_scale\n\n # the update to theta, in the full dimensional space\n # self.logger.debug('about to compute the update to theta in the full space')\n dtheta = np.dot(self.P, dtheta_proj)\n\n # backup the prior position, in case this is a failed step\n self.theta_prior_step = self.theta.copy()\n\n # self.logger.debug('Step length {}'.format(np.linalg.norm(dtheta)))\n\n # update theta to the new location\n self.theta += dtheta\n self.theta_proj += dtheta_proj\n # the predicted improvement from this update step\n self.f_predicted_total_improvement = 0.5 * np.dot(dtheta_proj.T, np.dot(full_H_combined, dtheta_proj))\n\n ## expand the set of active subfunctions as appropriate\n # self.expand_active_subfunctions(full_H_inv, step_failure)\n self.expand_active_subfunctions(cho, step_failure)\n\n # record how much time was taken by this learning step\n time_diff = time.time() - time_pass_start\n self.time_pass += time_diff\n\n # self.logger.debug('Step time {}'.format(time_diff))\n # self.logger.debug('Elapsed time {}'.format(self.time_pass))", "title": "" }, { "docid": "6aed6b6df47bc39f7fa66f45db134e02", "score": "0.63196516", "text": "def optimizer(self):\n pass", "title": "" }, { "docid": "4b4a77ad4888ecb561fd824f6dfdab33", "score": "0.63138056", "text": "def _optimization_backend(self, x0, minimizer_kwargs, niter):\n pass", "title": "" }, { "docid": "d1947a40f8e67d79a1397151e711833f", "score": "0.6274566", "text": "def perform_optimization(self):\n self.train_op = tf.contrib.layers.optimize_loss(loss=self.loss,\n global_step=tf.train.get_global_step(),\n learning_rate=self.learning_rate_ph,\n optimizer='Adam',\n clip_gradients=1.0)", "title": "" }, { "docid": "08b6ed4cad3485a7dd8dad805866d1db", "score": "0.62529653", "text": "def step(self) -> None:\n if self.blocking_parameter_updates:\n self.torch_optimizer.step()\n else:\n self.update_next = True", "title": "" }, { "docid": "46c8cc1f28c208e1e80779faded47236", "score": "0.6247873", "text": "def step(self, closure=None):\n self.optimizer.step(closure)", "title": "" }, { "docid": "c1a4a843177bd14a8672a9f02b6fdd91", "score": "0.62027717", "text": "def optimize(opt: Optimizer, loss: Tensor) -> None:\n opt.zero_grad()\n loss.backward()\n opt.step()", "title": "" }, { "docid": "de7fb336378abfd25855869f5b7aa345", "score": "0.61987907", "text": "def optimize(self):\n x = self.owner\n # print \"STOP 3\\n\", self.myVector().keys()\n for y in self.myVector().keys():\n if y != x:\n minCost = self.bFord(x, y)\n self.set_cell(x, y, minCost)", "title": "" }, { "docid": "79aeca4cfdae5f56dc7e7a73d9a35096", "score": "0.6177578", "text": "def optimizer_step(self):\n self.optimizer.step()\n if self.lr_scheduler:\n self.lr_scheduler.step()", "title": "" }, { "docid": "a96568f0a614fdd2195fe38f49ab1040", "score": "0.61763984", "text": "def _optimizer_update(self):\n raise NotImplementedError()", "title": "" }, { "docid": "ca293cb594d34ee53a13f147bf33c24d", "score": "0.61419415", "text": "def step(self):\n for optimizer in self.optimizers:\n optimizer.step()", "title": "" }, { "docid": "660934475da21f8e841bf11f805a5db4", "score": "0.61336076", "text": "def minimize(self, optimizer=None, losses=[]):\n optimizer.minimize(losses[0])", "title": "" }, { "docid": "bfc0d04a1ce765f441d979a557b4df18", "score": "0.6106998", "text": "def _optimization_backend(self, x0, minimizer_kwargs, niter):\n if niter is None:\n # even though this is convex, there might still be some optimization\n # issues, so we use niter > 1.\n niter = 2\n return self._optimize_shotgun(x0, minimizer_kwargs, niter=niter)", "title": "" }, { "docid": "63cd3f25fd34fc6bbb34bbf5c1644cc6", "score": "0.60931075", "text": "def step(self):\n self._get_new_parameters() # update the dradients and weights stored in the optimizer\n self._compute_direction() # update direction\n self._compute_step_size() # update step size\n\n # do the step\n for i,param in enumerate(self.model.params):\n param[0][0].add_(- self.step_size * self.directions[i])\n param[0][1].add_(- self.step_size * param[1][1])\n\n # update optimizer parameters and do quasi-update\n self._update_parameters()\n self._quasi_update()", "title": "" }, { "docid": "a9c3aa515a4e706c4586a81a10f001ce", "score": "0.6055706", "text": "def optimize_params_s(self):\n # Forward-pass\n self.forward_e()\n self.forward_s()\n\n # Backward-pass\n # nets\n self.optimizer_s.zero_grad()\n self.backward_s()\n self.optimizer_s.step()", "title": "" }, { "docid": "a21ebc72026d2c815a08686701941bd9", "score": "0.6053677", "text": "def add_optimization(self):\n raise NotImplementedError", "title": "" }, { "docid": "634da9ba5229b596d003e3b590861ff4", "score": "0.60357124", "text": "def step_par_0():\n logging.info(\"par 0\")\n optimizer = create_optimizer()\n optimizer.apply_parallelism()\n return optimizer", "title": "" }, { "docid": "76c08a1624584dfa23c02d0d728fb0d4", "score": "0.60174406", "text": "def _reduce(self, x, y):\n\n n_samples, n_features = x.shape\n r = y.shape[1]\n N = int((r ** 2 + 3 * r) / 2.0)\n\n # If PL and PQ are passed, make sure dimensions/symmetries are correct\n self._check_P_matrix(r, n_features, N)\n\n # Set initial coefficients\n if self.use_constraints and self.constraint_order.lower() == \"target\":\n self.constraint_lhs = reorder_constraints(\n self.constraint_lhs, n_features, output_order=\"target\"\n )\n print(self.coef_)\n coef_sparse = self.coef_.T\n\n # Print initial values for each term in the optimization\n row = [\"Iteration\", \"Data Error\", \"Stability Error\", \"L1 Error\"]\n print(\"{: >10} | {: >10} | {: >10} | {: >10}\".format(*row))\n\n # initial A\n if self.A0 is not None:\n A = self.A0\n elif np.any(self.PQ != 0.0):\n A = np.diag(self.gamma * np.ones(r))\n else:\n A = np.diag(np.zeros(r))\n self.A_history_.append(A)\n\n # initial guess for m\n if self.m0 is not None:\n m = self.m0\n else:\n np.random.seed(1)\n m = (np.random.rand(r) - np.ones(r)) * 2\n self.m_history_.append(m)\n\n # Precompute some objects for optimization\n x_expanded = np.zeros((n_samples, r, n_features, r))\n for i in range(r):\n x_expanded[:, i, :, i] = x\n x_expanded = np.reshape(x_expanded, (n_samples * r, r * n_features))\n xTx = np.dot(x_expanded.T, x_expanded)\n xTy = np.dot(x_expanded.T, y.flatten())\n\n # if using acceleration\n tk_prev = 1\n m_prev = m\n\n # Begin optimization loop\n objective_history = []\n for k in range(self.max_iter):\n\n # update P tensor from the newest m\n mPQ = np.tensordot(m, self.PQ, axes=([0], [0]))\n p = self.PL - mPQ\n Pmatrix = p.reshape(r * r, r * n_features)\n\n # update w\n coef_prev = coef_sparse\n if self.evolve_w:\n if self.relax_optim:\n if self.threshold > 0.0:\n coef_sparse = self._solve_sparse_relax_and_split(\n r, n_features, x_expanded, y, Pmatrix, A, coef_prev\n )\n print(coef_sparse)\n else:\n pTp = np.dot(Pmatrix.T, Pmatrix)\n H = xTx + pTp / self.eta\n P_transpose_A = np.dot(Pmatrix.T, A.flatten())\n coef_sparse = self._solve_nonsparse_relax_and_split(\n H, xTy, P_transpose_A, coef_prev\n )\n else:\n m, coef_sparse = self._solve_direct_cvxpy(\n r, n_features, x_expanded, y, Pmatrix, coef_prev\n )\n\n # If problem over xi becomes infeasible, break out of the loop\n if coef_sparse is None:\n coef_sparse = coef_prev\n break\n\n if self.relax_optim:\n m_prev, m, A, tk_prev = self._solve_m_relax_and_split(\n r, n_features, m_prev, m, A, coef_sparse, tk_prev\n )\n\n # If problem over m becomes infeasible, break out of the loop\n if m is None:\n m = m_prev\n break\n self.history_.append(coef_sparse.T)\n PW = np.tensordot(p, coef_sparse, axes=([3, 2], [0, 1]))\n\n # (m,A) update finished, append the result\n self.m_history_.append(m)\n self.A_history_.append(A)\n eigvals, eigvecs = np.linalg.eig(PW)\n self.PW_history_.append(PW)\n self.PWeigs_history_.append(np.sort(eigvals))\n\n # update objective\n objective_history.append(self._objective(x, y, coef_sparse, A, PW, k))\n\n if (\n self._m_convergence_criterion() < self.tol_m\n and self._convergence_criterion() < self.tol\n ):\n # Could not (further) select important features\n break\n if k == self.max_iter - 1:\n warnings.warn(\n \"TrappingSR3._reduce did not converge after {} iters.\".format(\n self.max_iter\n ),\n ConvergenceWarning,\n )\n\n self.coef_ = coef_sparse.T\n self.objective_history = objective_history", "title": "" }, { "docid": "e68b50c6ade8c8fd96fa1b860d2a7990", "score": "0.60038316", "text": "def step(self, optimizer) -> None: # type: ignore\n # using layerwise gradient scaling\n if self._apply_layerwise_scaling:\n self._check_for_inf_or_nan()\n inf_nan_found = any(elt.found_inf_or_nan for elt in self.layer_info)\n\n if not inf_nan_found:\n optimizer.step()\n self._update_scale()\n # not using layerwise gradient scaling\n else:\n optimizer.step()", "title": "" }, { "docid": "9ac7c3d850971faa0d278efefe25a567", "score": "0.59815884", "text": "def optimize_parameters(self):", "title": "" }, { "docid": "565649155496ecd31d5e244c54f2d3bf", "score": "0.5979083", "text": "def optimize(self, model: onnx.ModelProto) -> onnx.ModelProto:", "title": "" }, { "docid": "8026c09b8fa82ef3a9ccfe6fc43c6aa5", "score": "0.5968146", "text": "def optimize(self, optimizer=None, start=None, **kwargs):\n self.inference_method.on_optimization_start()\n try:\n super(GP, self).optimize(optimizer, start, **kwargs)\n except KeyboardInterrupt:\n print(\"KeyboardInterrupt caught, calling on_optimization_end() to round things up\")\n self.inference_method.on_optimization_end()\n raise", "title": "" }, { "docid": "f410b9c0df08f777897137feb15878c1", "score": "0.59429806", "text": "def optimize(node, environment):\r\n optimizer = Optimizer(environment)\r\n return optimizer.visit(node)", "title": "" }, { "docid": "444a44ec7848233ab95b9329919020a4", "score": "0.59344435", "text": "def optimize(self):\n if self.get_memory_length() >= self.batch_size:\n self.loss = self.DQN.optimize()", "title": "" }, { "docid": "182126896f122c658964ada9062278c9", "score": "0.59219223", "text": "def runOptimizationRoutine(self):\n start_time = u.Helpers.getStartTime()\n acor = a.AcorContinuousDomain(n_pop=self.__n_pop, \n n_vars=self.__n_vars, \n cost_func=self.__cost_func, \n domain_bounds=self.__domain_bounds)\n print(f\"The main loop computation is now running..\")\n print(f\"{c.HelperConstants.CARRIAGE_RETURN}\")\n print(f\"{c.HelperConstants.CARRIAGE_RETURN}\")\n acor.runMainLoop()\n self.__solutions = acor.best_solutions\n print(f\"Now Plotting the optimization performance..\")\n print(f\"{c.HelperConstants.CARRIAGE_RETURN}\")\n print(f\"{c.HelperConstants.CARRIAGE_RETURN}\")\n print(f\"{c.HelperConstants.CARRIAGE_RETURN}\")\n self.__plotAlgorithmperformance()\n run_time = u.Helpers.computeTotalRunTime(start_time)\n result = u.Helpers.printAcoResults(acor.final_best_solution)\n print(f\"Best soltion:{c.HelperConstants.CARRIAGE_RETURN}{c.HelperConstants.CARRIAGE_RETURN}{result}\")\n print(f\"ACO compute time is: {run_time}\")\n print(f\"{c.HelperConstants.CARRIAGE_RETURN}\")\n print(f\"{c.HelperConstants.CARRIAGE_RETURN}\")", "title": "" }, { "docid": "cf79a2919d864c9fe1bc45713ade3a79", "score": "0.591431", "text": "def optimize(self):\n # compute fake images and reconstruction images.\n self.forward()\n set_requires_grad(self.netG_S2O, False)\n # set gradients to zero\n self.optimizer.zero_grad()\n # calculate gradients\n self.backward()\n # update only segnet weights weights\n self.optimizer.step()", "title": "" }, { "docid": "aeb50f8a74788b25b76bb7b4f5d6436c", "score": "0.5897733", "text": "def optimise(self):\n if self.kernel is None:\n raise RuntimeError('please build the kernel first')\n self.kernel[:] = self.param_kernel", "title": "" }, { "docid": "b303d744788a9b701e6a7a37512d904c", "score": "0.5894271", "text": "def _step(self):\n # Make a minibatch of training data\n\n self.batch_data.mk_minibatch(verbose=False) # verbose to False if you dont want to see the img_ids from the minibatch\n\n # Compute loss and gradient\n loss, grads = self.model.loss(self.batch_data, eval_mode=False)\n self.loss_history.append(loss)\n\n # Perform a parameter update\n for p, w in self.model.params.iteritems():\n dw = grads[p]\n if dw is None: # when usefinetune is False, some gradients will be None #todo: check if finetuning is done properly\n continue\n config = self.optim_configs[p]\n next_w, next_config = self.update_rule(w, dw, config)\n self.model.params[p] = next_w\n self.optim_configs[p] = next_config", "title": "" }, { "docid": "4d25e51fa39ad876df22ee7f02f6befd", "score": "0.58913666", "text": "def _optimize(self, loss):\n if self.before_backward_fn is not None:\n loss = self.before_backward_fn(loss)\n loss.backward()\n if self.after_backward_fn is not None:\n self.after_backward_fn()\n self.optimizer.minimize(loss,\n grad_clip=self.grad_clip,\n parameter_list=self.parameters())\n self.clear_gradients()\n return", "title": "" }, { "docid": "c285534b348e332a175d80199d75c9a0", "score": "0.5884347", "text": "def do_a_step(self):\n self.model.train()\n self.optimizer.zero_grad()\n prediction = self.model(self.feature_indices, self.feature_values)\n loss = torch.nn.functional.nll_loss(prediction[self.train_nodes], self.target[self.train_nodes])\n loss = loss + (self.args.lambd/2)*(torch.sum(self.model.layer_2.weight_matrix**2))\n loss.backward()\n self.optimizer.step()", "title": "" }, { "docid": "6b550420c8b5ef6c70628007aa41f9d6", "score": "0.5862719", "text": "def run_optimizer(cls):\n debug('-------------- OPTIMIZATIONS ------------------')\n cls.optimizer = LatteOptimizer(cls.codes)\n cls.optimizer.run_all(max_passes=Flags.optimizer_passes)", "title": "" }, { "docid": "eeb445191a4b0ddc581726bb3966a60a", "score": "0.5782003", "text": "def optimizer_step(self, loss: torch.Tensor, model: Model, optimizer: Optimizer) -> None:\n self.scaler.step(optimizer)\n self.scaler.update()", "title": "" }, { "docid": "eeb445191a4b0ddc581726bb3966a60a", "score": "0.5782003", "text": "def optimizer_step(self, loss: torch.Tensor, model: Model, optimizer: Optimizer) -> None:\n self.scaler.step(optimizer)\n self.scaler.update()", "title": "" }, { "docid": "6e4d49b14ae4da4428a95059bf1c0fe4", "score": "0.57687896", "text": "def optimize_parameters(self):\r\n self.forward() # first call forward to calculate intermediate results\r\n\r\n self.set_requires_grad([self.netD_B, self.netD_Ot, self.netD_Os], False)\r\n self.optimizer_G.zero_grad() # clear network G's existing gradients\r\n self.backward_G() # calculate gradients for network G\r\n self.optimizer_G.step() # update gradients for network G\r\n\r\n self.set_requires_grad([self.netD_B, self.netD_Ot, self.netD_Os], True)\r\n self.optimizer_B.zero_grad()\r\n self.backward_D_B()\r\n self.optimizer_B.step()\r\n\r\n self.optimizer_D_Ot.zero_grad()\r\n self.backward_D_Ot()\r\n self.optimizer_D_Ot.step()\r\n\r\n self.optimizer_D_Os.zero_grad()\r\n self.backward_D_Os()\r\n self.optimizer_D_Os.step()", "title": "" }, { "docid": "0bb529a797437a55d486aa6df6918119", "score": "0.5749301", "text": "def optimize(self):\n state_best, value_best = self.anneal()\n if self.info is not None:\n self.info['total_time'] = time.time() - self.start \n self.info['states_considered'] = self.steps\n self.info['performance'] = self.steps / self.info['total_time']\n \n if self.direction == 'max':\n return -value_best, state_best\n else:\n return value_best, state_best", "title": "" }, { "docid": "88a2e31cbcf6fdf5f6222523fdc29930", "score": "0.5747551", "text": "def post_processing(multiModalModel, linksNode, PT, BK, origin, destination, parameters, optimization_time,\n setOfNonWalkingModes, modesConsideredWaking, set_of_Edges,\n preprocessing_time, heuristic_objvalue, y):\n\n dictOfSolution = dict()\n if multiModalModel.status == gb.GRB.Status.OPTIMAL:\n dictOfSolution['y'] = list()\n dictOfSolution['w'] = list()\n for i, j, m in linksNode:\n if y[i, j, m].x >= 0.8:\n dictOfSolution['y'].append((i, j, m))\n if m == 'connectionLayer':\n dictOfSolution['w'].append((i.split('+')[0], i.split('+')[1], j.split('+')[1]))\n dictOfSolution['pt'] = PT.x\n dictOfSolution['bk'] = BK.x\n\n dictOfSolution['orderedSolutionTour'] = orderTour(dictOfSolution['y'], destination=destination,\n origin=origin)\n\n printNewChaptToConsole('end solution procedure')\n\n # get all objective function criteria values by recalculating their objective function contribution\n inVehicleTime = 0\n inVehicleTimePT = 0\n inVehicleTimeDrive = 0\n inVehicleTimeBK = 0\n for i, j, m in linksNode.select('*', '*', setOfNonWalkingModes):\n if m == 'drive':\n inVehicleTimeDrive += set_of_Edges[(i, j, m)][0] * y[i, j, m].x\n inVehicleTime += set_of_Edges[(i, j, m)][0] * y[i, j, m].x\n elif m == 'bike':\n inVehicleTimeBK += set_of_Edges[(i, j, m)][0] * y[i, j, m].x\n inVehicleTime += set_of_Edges[(i, j, m)][0] * y[i, j, m].x\n else:\n inVehicleTimePT += set_of_Edges[(i, j, m)][0] * y[i, j, m].x\n inVehicleTime += set_of_Edges[(i, j, m)][0] * y[i, j, m].x\n dictOfSolution['Disutility'] = multiModalModel.objVal\n dictOfSolution['inVehicleTime'] = inVehicleTime\n dictOfSolution['inVehicleTimePT'] = inVehicleTimePT\n dictOfSolution['inVehicleTimeDrive'] = inVehicleTimeDrive\n dictOfSolution['inVehicleTimeBK'] = inVehicleTimeBK\n walkingTime = 0\n for (i, j, m) in linksNode.select('*', '*', modesConsideredWaking):\n walkingTime += set_of_Edges[(i, j, m)][0] * y[i, j, m].x\n dictOfSolution['walkingTime'] = walkingTime\n waitingTime = 0\n for (i, j, m) in linksNode.select('*', '*', 'connectionLayer'):\n waitingTime += 0.5 * set_of_Edges[(i, j, m)][1] * y[i, j, m].x\n dictOfSolution['waitingTime'] = waitingTime\n cost = parameters['fixCost_PublicTransport'] * PT.x + parameters['fixCost_Bike'] * BK.x\n for (i, j, m) in linksNode.select('*', '*', 'drive'):\n cost += parameters['varCost_Taxi'] * set_of_Edges[i, j, m][2] * y[i, j, m].x\n for (i, j, m) in linksNode.select('*', '*', 'connectionLayer'):\n cost += y[i, j, m].x * set_of_Edges[(i, j, m)][3]\n dictOfSolution['cost'] = cost\n\n dictOfSolution['optimizationTime'] = optimization_time\n dictOfSolution['preprocessingTime'] = preprocessing_time\n dictOfSolution['Gap'] = round(((heuristic_objvalue - multiModalModel.objval)/multiModalModel.objval),3)\n dictOfSolution['maxNumber_of_Changes'] = parameters['maxNumber_of_Changes']\n\n return dictOfSolution", "title": "" }, { "docid": "6b9e693cdc5a936c6f028b316e20374c", "score": "0.57335037", "text": "def _optimization_shgo(self, x0, minimizer_kwargs, niter):\n if niter is None:\n niter = self._default_hops\n\n result = shgo(func=self.objective,\n bounds=minimizer_kwargs['bounds'],\n constraints=minimizer_kwargs['constraints'],\n iters=niter,\n )\n\n if result.success: # pragma: no cover\n return result", "title": "" }, { "docid": "c22a9acf79964c962f742c4664f433c4", "score": "0.57204306", "text": "def optimize(self, step, writer, current_lr):\n train_start = time.time()\n mb_infos_vals = []\n for grad_step in range(self.gradient_steps):\n if step < self.batch_size or step < self.learning_starts:\n break\n if len(self.replay_buffer) < self.batch_size:\n break\n\n self.n_updates += 1\n # Update policy and critics (q functions)\n mb_infos_vals.append(self._train_step(step, writer, current_lr))\n\n if (step + grad_step) % self.target_update_interval == 0:\n # Update target network\n self.sess.run(self.target_update_op)\n if self.n_updates > 0:\n # print(\"SAC training duration: {:.2f}s\".format(time.time() - train_start))\n pass\n return mb_infos_vals", "title": "" }, { "docid": "95b5d1876cfa57163a5ceba6ed4b3f34", "score": "0.5718974", "text": "def perform_optimizations( case, adapted_ct_name, adapted_plan, adapted_beam_set, full_ptv_name, presc_new, presc_delivered, remaining_fractions ):\r\n\r\n for ii in range(NUM_OPTIMIZATIONS):\r\n \r\n print \" Optimization \", ii+1\r\n adapted_plan.PlanOptimizations[0].RunOptimization()\r\n \r\n if RESCALE_BETWEEN_OPTIMIZATIONS:\r\n \r\n print \" Rescaling MU/fx for prescription to full (non _ed) PTV\\\" \"\r\n if full_ptv_name not in [ geom.OfRoi.Name for geom in case.PatientModel.StructureSets[adapted_ct_name].RoiGeometries ]:\r\n raise AdaptPlanException(\"{} does not exist in rescan\".format( full_ptv_name ))\r\n elif ( not case.PatientModel.StructureSets[adapted_ct_name].RoiGeometries[full_ptv_name].HasContours() ):\r\n raise AdaptPlanException(\"{} has no volume \".format(full_ptv_name)) \r\n\r\n ### Do this manually so I can check validity and undo if scaling not possible \r\n #beamset_prescription = presc_new - presc_delivered\r\n #adapted_beam_set.NormalizeToPrescription(DspName=None, RoiName=full_ptv_name, DoseValue=beamset_prescription,\r\n # DoseVolume=50, PrescriptionType=\"MedianDose\", LockedBeamNames=None, EvaluateAfterScaling=True)\r\n \r\n D50_fx = adapted_beam_set.FractionDose.GetDoseAtRelativeVolumes(RoiName=full_ptv_name, RelativeVolumes=[0.5])[0]\r\n scale_MU = 1.0*( presc_new - presc_delivered ) / ( D50_fx * remaining_fractions )\r\n \r\n print \" scale MU by: \", scale_MU\r\n for bm in adapted_beam_set.Beams:\r\n bm.BeamMU = bm.BeamMU * scale_MU\r\n \r\n # Check rescaled MUs per segment are ok, revert if not. # xx does it actually allow me to change it if not feasible?\r\n if (adapted_beam_set.MachineFeasibilityTest(TreatmentMachineName = MACHINE_FOR_OPT) != ''):\r\n print(\" !!! Beam is infeasible\")\r\n if( \" Minimum MU per arc segment\" in adapted_beam_set.MachineFeasibilityTest(TreatmentMachineName = MACHINE_FOR_OPT) ):\r\n print(\" !!! Rescaled MUs were not valid; reverting\")\r\n for bm in adapted_beam_set.Beams:\r\n bm.BeamMU = bm.BeamMU / scale_MU\r\n else:\r\n raise AdaptPlanException(\"Beam infeasible for reason other than MU per segment\")", "title": "" }, { "docid": "b83c1e90b24b33e473c30a4159ed902f", "score": "0.5709218", "text": "def optimize_params_er(self):\n # Forward-pass\n self.forward_er()\n\n # Backward-pass\n # nete & netr\n self.optimizer_e.zero_grad()\n self.optimizer_r.zero_grad()\n self.backward_er()\n self.optimizer_e.step()\n self.optimizer_r.step()", "title": "" }, { "docid": "56f673bc5e3ab5785074b031e8a45230", "score": "0.5706975", "text": "def step(self, closure, debug = False):\n assert len(self.param_groups) == 1\n\n # Make sure the closure is always called with grad enabled\n closure = torch.enable_grad()(closure)\n\n group = self.param_groups[0]\n lr = group['lr']\n pre_iter = group['pre_iter']\n post_iter = group['post_iter']\n opt=group['opt']\n m=group['m']\n l=group['l']\n line_search_fn = group['line_search_fn']\n\n # preoptimization\n pre_loss, pre_flat_grad, pre_weights = self._opt_iter(closure, opt, pre_iter)\n # NOTE: LBFGS has only global state, but we register it as state for\n # the first param, because this helps with casting in load_state_dict\n state = self.state[self._params[0]]\n state.setdefault('n_cycles', 0)\n state.setdefault('func_eval', 0)\n\n\n # tensors cached in state (for tracing)\n if len(pre_flat_grad) > 1:\n prev_flat_grad = pre_flat_grad[-1]\n prev_loss = pre_loss[-1]\n else:\n prev_flat_grad = state.get('prev_flat_grad')\n prev_loss = state.get('prev_loss')\n \n loss = closure()\n flat_grad = self._gather_flat_grad()\n loss = float(loss)\n pre_loss.append(loss)\n pre_flat_grad.append(flat_grad)\n \n \n\n t = lr\n ls_func_evals = state['func_eval']\n state['n_cycles'] += 1\n\n ############################################################\n # compute GCLC direction\n ############################################################\n error_code = 0\n # compute the Hessian\n with torch.enable_grad():\n H = self._compute_Hessian(closure).clone()\n # compute the restriction and prolongation operators\n if m == 0: # use aggregation\n P = torch.zeros((self._numel(),(self._numel()+1)//2)).index_put_((torch.arange(self._numel()),torch.arange(self._numel())//2),torch.ones(self._numel()))\n R = P.transpose(-2,-1)\n else: # default: use_smallest\n try:\n eig_vals, V = torch.symeig(H, eigenvectors = True) \n except RuntimeError as error:\n print(error)\n error_code = 2 # failed SVD\n opt.step(closure)\n #flat_grad = self._gather_flat_grad() # to save the direction\n opt.zero_grad()\n \n if error_code == 0: # default setting: use_smallest!\n P = V[:,:-m]\n R = P.transpose(-2,-1)\n \n # update the learning rate TODO: error_code\n if l > 0:\n if m == 0:\n try:\n eig_vals,_ = torch.symeig(H, eigenvectors = False) \n except RuntimeError as error:\n print(error)\n error_code = 1 # no new LR\n new_lr = lr\n if l == 1:\n new_lr = 1/eig_vals[-1]\n else:\n new_lr = 2/(eig_vals[-1] + eig_vals[-l])\n #print(new_lr)\n group['lr'] = new_lr.item()\n \n # construct the CLC direction\n try:\n d = torch.matmul(P, torch.solve(torch.matmul(R, flat_grad).unsqueeze(1),\n torch.matmul(R,torch.matmul(H, P)))[0]).squeeze()\n except RuntimeError as error:\n print(error)\n error_code = 3 # system of equations can not be solved\n opt.step(closure)\n #flat_grad = self._gather_flat_grad() # to save the direction\n opt.zero_grad()\n \n\n # if prev_flat_grad is None:\n # prev_flat_grad = flat_grad.clone(memory_format=torch.contiguous_format)\n # else:\n # prev_flat_grad.copy_(flat_grad)\n # prev_loss = loss\n\n ############################################################\n # compute step length\n ############################################################\n # reset initial guess for step size\n if error_code < 2: \n # directional derivative\n gtd = flat_grad.dot(-d) # g * d\n \n # optional line search: user function\n ls_func_evals = 0\n if line_search_fn is not None:\n # perform line search, using user function\n if line_search_fn == \"strong_wolfe\":\n x_init = self._clone_param()\n \n def obj_func(x, t, d):\n return self._directional_evaluate(closure, x, t, d)\n \n loss, flat_grad, t, ls_func_evals = _strong_wolfe(\n obj_func, x_init, t, -d, loss, flat_grad, gtd)\n print(t)\n self._add_grad(-t, d)\n elif line_search_fn == \"wolfe12\":\n x_init = self._clone_param_to_vector()\n def f(x):\n return self._net_loss(closure, x)\n def fprime(x):\n return self._d_net_loss(closure, x)\n \n if gtd < 0:\n lsearch = _line_search_wolfe12(f, fprime, x_init, -d, flat_grad, loss, prev_loss)\n if lsearch[0] is None:\n lsearch = _line_search_wolfe12(f, fprime, x_init, d, flat_grad, loss, prev_loss)\n \n else:\n lsearch = _line_search_wolfe12(f, fprime, x_init, d, flat_grad, loss, prev_loss) \n if lsearch[0] is None:\n lsearch = _line_search_wolfe12(f, fprime, x_init, -d, flat_grad, loss, prev_loss)\n \n t, func_evals, grad_evals, loss, prev_loss, flat_grad, ls_func_evals, tot_ge = lsearch\n \n else: \n raise RuntimeError(\"only 'strong_wolfe' or 'wolfe12' is supported\")\n if t is None:\n error_code = 4 # line-search failed\n opt.step(closure)\n #flat_grad = self._gather_flat_grad() # to save the direction \n opt.zero_grad() \n loss = closure().item()\n flat_grad = self._gather_flat_grad()\n \n else:\n print('fixed{}'.format(t))\n # no line search, simply move with fixed-step\n self._add_grad(t, -d)\n loss = float(closure())\n flat_grad = self._gather_flat_grad()\n else:\n loss = closure()\n flat_grad = self._gather_flat_grad()\n \n post_loss, post_flat_grad, post_weights = self._opt_iter(closure, opt, post_iter)\n if len(post_loss) > 0:\n loss = post_loss[-1]\n flat_grad = post_flat_grad[-1]\n post_loss.append(closure().item())\n post_flat_grad.append(self._gather_flat_grad())\n\n \n state['prev_flat_grad'] = flat_grad\n state['prev_loss'] = loss\n state['all_loss'] = pre_loss + post_loss \n state['error_code'] = error_code\n state['func_eval'] = ls_func_evals\n if debug:\n state['all_grad'] = pre_flat_grad + post_flat_grad\n state['H'] = H.clone()\n state['all_weights'] = pre_weights + post_weights\n state['d'] = d\n state['t'] = t\n state['x_init'] = x_init\n \n return loss, error_code", "title": "" }, { "docid": "02f9a6fb861b8a7825d9b8fa5675b877", "score": "0.57034737", "text": "def optimize(self, target_device):\n pass", "title": "" }, { "docid": "4630996ab79ebd6c42d7514fdb55ed53", "score": "0.57033235", "text": "def reload():\n optimizer.evolve()", "title": "" }, { "docid": "52ef330dd77e519925e76080d28e3395", "score": "0.56963843", "text": "def optimizer():\n\n res = opt.minimize(cost_function, np.array([0, 0, 0]),\n args=((plane_ext, plane_f_ext, 0)), method='Powell',\n options={'disp':True, 'maxiter':None, 'xtol':1})\n print(res)\n fval = res.fun\n\n #\n # print final result\n #\n print('Percentage of fitted points: ' + str((1 - fval/2 / illumpoints_avg) * 100))\n\n cost_function(res.x, plane_ext, plane_f_ext, plot=True)\n return res", "title": "" }, { "docid": "51cd2ffbb5ea47364879cc578cb0970e", "score": "0.56781936", "text": "def optimize(self):\n s1, a1, r1, s2 = self.ram.sample(TRAIN_BATCH_SIZE)\n\n s1 = Variable(torch.from_numpy(s1))\n a1 = Variable(torch.from_numpy(a1))\n r1 = Variable(torch.from_numpy(r1))\n s2 = Variable(torch.from_numpy(s2))\n\n s1 = s1.to(device)\n a1 = a1.to(device)\n r1 = r1.to(device)\n s2 = s2.to(device)\n\n # ---------------------- optimize critic ----------------------\n # Use target actor exploitation policy here for loss evaluation\n a2 = self.target_actor.forward(s2).detach()\n next_val = torch.squeeze(self.target_critic.forward(s2, a2).detach())\n y_expected = r1 + GAMMA * next_val\n y_predicted = torch.squeeze(self.critic.forward(s1, a1))\n # compute critic loss, and update the critic\n loss_critic = F.smooth_l1_loss(y_predicted, y_expected)\n loss_critic_value = loss_critic.item()\n self.critic_optimizer.zero_grad()\n loss_critic.backward()\n self.critic_optimizer.step()\n\n # ---------------------- optimize actor ----------------------\n pred_a1 = self.actor.forward(s1)\n loss_actor = -1 * torch.sum(self.critic.forward(s1, pred_a1))\n loss_actor_value = loss_actor.item()\n self.actor_optimizer.zero_grad()\n loss_actor.backward()\n self.actor_optimizer.step()\n\n soft_update(self.target_actor, self.actor, TAU)\n soft_update(self.target_critic, self.critic, TAU)\n\n return loss_critic_value, loss_actor_value", "title": "" }, { "docid": "a952f2fcc8135296243990eb3738a864", "score": "0.56753254", "text": "def agent_optimize(self, experiences):\n pass", "title": "" }, { "docid": "5cd5893e3fa6981754c33898e7d0881d", "score": "0.56694514", "text": "def minimize(self,expr,var_list=[]):\n assert False,'abstract method called'", "title": "" }, { "docid": "3f3268d1bad3a23364b60365b03a0e07", "score": "0.56574297", "text": "def _step_computation(self):\n self._activation += self.get_change(self._activation)\n self._output_buffer = self.compute_thresholded_activation(self._activation)\n self.write_activation_log()", "title": "" }, { "docid": "ca3a2d41251358d63b5a4f717b1fcac2", "score": "0.5654621", "text": "def step(model, x, y):\n # value_and_grad will return cost(model, x, y) and dcost(...)/dmodel.\n # The 'model' argument can be omitted: by default the derivative wrt\n # the first argument is returned.\n _cost, dmodel = value_and_grad(cost, 'model')(model, x, y)\n #return _cost, model - dmodel * lr\n return _cost, model - dmodel", "title": "" }, { "docid": "2990ce13fe88176973a88bb235b1efa3", "score": "0.56423175", "text": "def _evaluate(self,\n x, #\n out,\n *args,\n **kwargs):\n\n objective_values = []\n for k, individual_ in enumerate(x):\n # Stage 0: Git restore\n logger.debug(\"Executing git restore.\")\n git_restore(config.PROJECT_PATH)\n logger.debug(\"Updating understand database after git restore.\")\n update_understand_database(config.UDB_PATH)\n\n # Stage 1: Execute all refactoring operations in the sequence x\n logger.debug(f\"Reached Individual with Size {len(individual_[0])}\")\n for refactoring_operation in individual_[0]:\n refactoring_operation.do_refactoring()\n # Update Understand DB\n logger.debug(f\"Updating understand database after {refactoring_operation.name}.\")\n update_understand_database(config.UDB_PATH)\n\n # Stage 2:\n if self.mode == 'single':\n # Stage 2 (Single objective mode): Considering only one quality attribute, e.g., testability\n score = testability_main(config.UDB_PATH, initial_value=config.CURRENT_METRICS.get(\"TEST\", 1.0))\n else:\n # Stage 2 (Multi-objective mode): Considering one objective based on average of 8 objective\n arr = Array('d', range(self.n_obj_virtual))\n if self.evaluate_in_parallel:\n # Stage 2 (Multi-objective mode, parallel): Computing quality attributes\n p1 = Process(target=calc_qmood_objectives, args=(arr,))\n if self.n_obj_virtual == 8:\n p2 = Process(target=calc_testability_objective, args=(config.UDB_PATH, arr,))\n p3 = Process(target=calc_modularity_objective, args=(config.UDB_PATH, arr,))\n p1.start(), p2.start(), p3.start()\n p1.join(), p2.join(), p3.join()\n else:\n p1.start()\n p1.join()\n score = sum([i for i in arr]) / self.n_obj_virtual\n else:\n # Stage 2 (Multi-objective mode, sequential): Computing quality attributes\n qmood_quality_attributes = DesignQualityAttributes(udb_path=config.UDB_PATH)\n o1 = qmood_quality_attributes.average_sum\n if self.n_obj_virtual == 8:\n o2 = testability_main(config.UDB_PATH, initial_value=config.CURRENT_METRICS.get(\"TEST\", 1.0))\n o3 = modularity_main(config.UDB_PATH, initial_value=config.CURRENT_METRICS.get(\"MODULE\", 1.0))\n else:\n o2 = 0\n o3 = 0\n del qmood_quality_attributes\n score = (o1 * 6. + o2 + o3) / self.n_obj_virtual\n\n # Stage 3: Marshal objectives into vector\n objective_values.append([-1 * score])\n logger.info(f\"Objective values for individual {k} in mode {self.mode}: {[-1 * score]}\")\n\n # Stage 4: Marshal all objectives into out dictionary\n out['F'] = np.array(objective_values, dtype=float)", "title": "" }, { "docid": "6ce5129a116cbc475647a73f87dbbff7", "score": "0.56392694", "text": "def step(self, x,y):\n result = self.forward(x)\n optimizer = torch.optim.SGD(self.parameters(), lr=self.lrate)\n optimizer.zero_grad()\n loss = self.loss_fn(result.squeeze(), y)\n loss.backward()\n optimizer.step()\n # return loss.detach().cpu().numpy()\n return loss.item()", "title": "" }, { "docid": "925d8b8254f669a3862285393de76d81", "score": "0.56339663", "text": "def step(self, closure=None):\n\n\t\tdist = self.kwargs['dist']\n\t\tdistgroup = self.kwargs['distgroup']\n\t\tneighbors = self.kwargs['neighbors']\n\n\t\tcross_grad = self.kwargs['cross_grad']\n\n\n\t\t# Find this worker's index in the \"simplified\" pi and cross_grad \n\t\tcross_grad_len = [len(i) for i in cross_grad]\n\t\tworker_index = np.where(np.asarray(cross_grad_len) == 0)[0][0]\n\n\t\tloss = None\n\t\tif closure is not None:\n\t\t\tloss = closure()\n\t\tif not isinstance(self.state, defaultdict):\n\t\t\tself.state = defaultdict(dict)\n\n\t\tfor i, group in enumerate(self.param_groups): # always 1\n\t\t\tmomentum = group['momentum']\n\t\t\tdampening = group['dampening']\n\n\t\t\tfor j, p in enumerate(group['params']):\n\t\t\t\tif p.grad is None:\n\t\t\t\t\tcontinue\n\t\t\t\td_p = p.grad.data\n\t\t\t\tassert (torch.isfinite(p.data).any()), \"p.data before update Norm2 is %s\"%(p.data.norm(2))\n\t\t\t\tassert (torch.isfinite(d_p).any()), \"d_p Norm2 is %s\"%(d_p.norm(2))\n\n\t\t\t\tgradsize = d_p.size()\n\t\t\t\tg_tilda = torch.zeros((len(self.local_neigh), *gradsize)).to(self.device)\n\n\t\t\t\tempty_counter = 0\n\t\t\t\tfor i in range(len(g_tilda)):\n\t\t\t\t\tif len(cross_grad[i]) == 0: # empty for own agent's grad. -- grab it from d_p directly\n\t\t\t\t\t\tg_tilda[i] = d_p\n\t\t\t\t\t\tempty_counter += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tg_tilda[i] = cross_grad[i][j] # i-th worker's cross grad. for j-th layer\n\n\t\t\t\tassert empty_counter==1, \"No empty list in cross_grad! cross_test is probably buggy!\"\n\n\n\t\t\t\t################################### SGA implementation #################################\n\t\t\t\tg_tilda = self.ProjSGA(g_tilda, gradsize, j, worker_index) #old_v_batch updated\n\n\n\t\t\t\t############################ CDMSGD (but with nesterov=False) ##############################\n\t\t\t\tcon_buf = [torch.zeros(p.data.size(), dtype=p.data.dtype).to(self.device) for _ in range(len(neighbors))] # Parameters placeholder\n\n\t\t\t\tdist.all_gather(con_buf, p.data, group=distgroup) # Gather parameters from workers to con_buf\n\t\t\t\tbuf = torch.zeros(p.data.size()).to(self.device)\n\n\t\t\t\t# Extract connected agents data only\n\t\t\t\tcon_buf = [con_buf[i] for i in self.local_neigh]\n\n\t\t\t\tfor pival, con_buf_agent in zip(self.pi, con_buf):\n\t\t\t\t\tbuf.add_(other=con_buf_agent, alpha=pival)\n\n\t\t\t\tparam_state = self.state[p]\n\t\t\t\tif 'momentum_buffer' not in param_state:\n\t\t\t\t\tm_buf = param_state['momentum_buffer'] = torch.zeros(p.data.size()).to(self.device)\n\t\t\t\t\tm_buf.mul_(momentum).add_(g_tilda)\n\t\t\t\telse:\n\t\t\t\t\tm_buf = param_state['momentum_buffer']\n\t\t\t\t\tm_buf.mul_(momentum).add_(other=g_tilda, alpha=1-dampening)\n\n\t\t\t\tg_tilda.add_(other=m_buf, alpha=momentum)\n\t\t\t\tp.data = buf.add_(other=g_tilda, alpha=-group['lr'])\n\n\t\t\t\tassert (torch.isfinite(p.data).any()),\"p.data Norm2 after update is %s\"%(p.data.norm(2))\n\n\t\treturn loss", "title": "" }, { "docid": "7c76e6c1a0db2e30f89cccfefd22ee86", "score": "0.56218594", "text": "def step(self, closure=None):\n\n\t\t# # Extract var. added thru Collab()\n\t\tdist = self.kwargs['dist']\n\t\tdistgroup = self.kwargs['distgroup']\n\t\tneighbors = self.kwargs['neighbors']\n\n\t\tloss = None\n\t\tif closure is not None:\n\t\t\tloss = closure()\n\t\tif not isinstance(self.state, defaultdict):\n\t\t\tself.state = defaultdict(dict)\n\n\t\tfor i, group in enumerate(self.param_groups):## Update rule\n\t\t\tweight_decay = group['weight_decay']\n\t\t\tmomentum = group['momentum']\n\t\t\tdampening = group['dampening']\n\n\t\t\tfor j, p in enumerate(group['params']):\n\t\t\t\tif p.grad is None:\n\t\t\t\t\tcontinue\n\t\t\t\td_p = p.grad.data\n\n\t\t\t\tcon_buf = [torch.zeros(p.data.size()).to(self.device) for _ in range(len(neighbors))] # Parameters placeholder\n\t\t\t\tdist.all_gather(con_buf, p.data, group=distgroup) # Gather parameters from workers to con_buf\n\t\t\t\tbuf = torch.zeros(p.data.size()).to(self.device)\n\n\t\t\t\t# Extract connected agents data only\n\t\t\t\tcon_buf = [con_buf[i] for i in self.local_neigh]\n\n\t\t\t\tfor pival, con_buf_agent in zip(self.pi, con_buf):\n\t\t\t\t\tbuf.add_(other=con_buf_agent, alpha=pival)\n\n\t\t\t\tparam_state = self.state[p]\n\t\t\t\tif 'momentum_buffer' not in param_state:\n\t\t\t\t\tm_buf = param_state['momentum_buffer'] = torch.zeros(p.data.size()).to(self.device)\n\t\t\t\t\tm_buf.mul_(momentum).add_(d_p)\n\t\t\t\telse:\n\t\t\t\t\tm_buf = param_state['momentum_buffer']\n\t\t\t\t\tm_buf.mul_(momentum).add_(other=d_p, alpha=1-dampening)\n\n\t\t\t\td_p.add_(other=m_buf, alpha=momentum)\n\t\t\t\tp.data = buf.add_(other=d_p, alpha=-group['lr'])\n\n\n\t\treturn loss", "title": "" }, { "docid": "a93b1d42ae68eafc1a54e70fc9d551fa", "score": "0.5620344", "text": "def _run_optimization_step(optimizer, gradients, global_step):\n # Update operation for Batch Norm\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n\n with tf.variable_scope('train_op'):\n with tf.control_dependencies(update_ops):\n return optimizer.apply_gradients(grads_and_vars=gradients, global_step=global_step)", "title": "" }, { "docid": "a93b1d42ae68eafc1a54e70fc9d551fa", "score": "0.5620344", "text": "def _run_optimization_step(optimizer, gradients, global_step):\n # Update operation for Batch Norm\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n\n with tf.variable_scope('train_op'):\n with tf.control_dependencies(update_ops):\n return optimizer.apply_gradients(grads_and_vars=gradients, global_step=global_step)", "title": "" }, { "docid": "cb02e0197a23ede6b4b72e73b8c103e7", "score": "0.56175566", "text": "def opt_step(\n fp_optimizer, tracker, full_batch_size, update_freq, math_mode, world_size\n):\n if math_mode == \"fp32\":\n updated = fp_optimizer.step(tracker=tracker, denom=full_batch_size)\n elif math_mode == \"fp16\":\n # This results in reducing tensor and dividing by `loss_scale * full_batch_size`\n # but we divide by world size before reduction to avoid overflow, and\n # re-multiply after reduction to rescale\n multiplier = full_batch_size / (world_size * update_freq)\n denom = world_size * update_freq\n updated = fp_optimizer.step(tracker=tracker, denom=denom, multiplier=multiplier)\n else:\n raise NotImplementedError(\"Unknown math mode {}\".format(math_mode))\n return updated", "title": "" }, { "docid": "2c35a78342913b397ba916abcd8a2f72", "score": "0.55975497", "text": "def minimize_loss(loss, optimizer: optim.Adam):\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()", "title": "" }, { "docid": "e11891a32eb69c1f7764659a79117ae1", "score": "0.5594334", "text": "def LocalOptimizeMTF(self, target):\n lopt = self.TheSystem.Tools.OpenLocalOptimization()\n lopt.Algorithm = constants.OptimizationAlgorithm_DampedLeastSquares\n lopt.Cycles = constants.OptimizationCycles_Infinite\n lopt.NumberOfCores = 8\n print(\"Starting local optimization\") \n CastTo(lopt, \"ISystemTool\").Run()\n mf = lopt.InitialMeritFunction\n counter = 0\n dcount = 0\n print(\"Starting loop, mf = \" + str(mf))\n while mf > target:\n time.sleep(60)\n if (lopt.CurrentMeritFunction < mf):\n dcount = 0\n\n if dcount > 0: print(\"dcount is \" + str(dcount)) \n mf = lopt.CurrentMeritFunction\n print(\"mf = \" + str(mf))\n counter = counter + 1\n dcount = dcount + 1\n if( counter > 500): break\n if( dcount > 5): break\n CastTo(lopt, \"ISystemTool\").Cancel()\n CastTo(lopt, \"ISystemTool\").Close()\n return(mf)", "title": "" }, { "docid": "578f0a00946cc4eba52857a275f9128d", "score": "0.55928963", "text": "def minimize(self, optimizer=None, losses=[]):\n for loss in losses:\n optimizer.minimize(loss)", "title": "" }, { "docid": "d23044ad10be2c462937a63913840eae", "score": "0.559249", "text": "def _optimize_workingset(self, max_iterations=1000):\n iterations = 0\n again = True\n while again:\n if verbosity >= 5:\n log.writeln(\"SMO iteration %d\" % iterations)\n again = False\n # shuffle instances?\n for instance in self.instances:\n if len(instance.hyps) < 2:\n continue\n\n if verbosity >= 5:\n log.writeln(\"try to improve instance %s:\" % (instance.instance_id))\n\n # cache violation inside hyps, needed by both _select_pair and _optimize_pair\n for hyp in instance.hyps:\n hyp.violation = instance.violation(hyp)\n\n hyps = instance._select_pair()\n if hyps is None:\n if verbosity >= 5:\n log.writeln(\"all KKT conditions (almost) satisfied, do nothing\")\n continue\n\n if instance._optimize_pair(*hyps):\n again = True\n\n iterations += 1\n if iterations >= max_iterations:\n if verbosity >= 1:\n log.writeln(\"SMO: giving up\")\n break\n if verbosity >= 1:\n log.writeln(\"SMO: finished in %d iterations\" % iterations)\n if verbosity >= 4:\n if len(watch_features) > 0:\n log.writeln(\"new weights: %s\" % (self.mweights * watch_features))\n log.writeln(\"objective function: %s\" % self.objective())", "title": "" }, { "docid": "a21bf52ee924c5139688e4babef8c5a2", "score": "0.5576887", "text": "def optimize(self, evaluator):\n raise NotImplementedError(\"'optimize' is not implemented by derived \"\n \"class of 'Algorithm' ,'{}'\"\n .format(self.__class__.__name__))", "title": "" }, { "docid": "1eb63838b8c52d7c8deeb51bed475c67", "score": "0.557548", "text": "def optimize(self, args=None):\n return self.request('_optimize', pylastica.request.Request.POST, args)", "title": "" }, { "docid": "875dd304aca5fb11874481f8944d10de", "score": "0.55590564", "text": "def optimize_params_d(self):\n # Forward-pass\n self.forward_e()\n self.forward_g()\n self.forward_sg()\n self.forward_dg()\n\n # Backward-pass\n # nets\n self.optimizer_d.zero_grad()\n self.backward_d()\n self.optimizer_d.step()", "title": "" }, { "docid": "c2c495e0eb2d5302a66e065ad82c7dce", "score": "0.55583864", "text": "def _evaluate(self, x, out, *args, **kwargs):\n\n objective_values = []\n for k, individual_ in enumerate(x):\n # Stage 0: Git restore\n logger.debug(\"Executing git restore.\")\n git_restore(config.PROJECT_PATH)\n logger.debug(\"Updating understand database after git restore.\")\n update_understand_database(config.UDB_PATH)\n\n # Stage 1: Execute all refactoring operations in the sequence x\n logger.debug(f\"Reached an Individual with size {len(individual_[0])}\")\n for refactoring_operation in individual_[0]:\n res = refactoring_operation.do_refactoring()\n # Update Understand DB\n logger.debug(f\"Updating understand database after {refactoring_operation.name}.\")\n update_understand_database(config.UDB_PATH)\n\n # Stage 2:\n arr = Array('d', range(self.n_obj))\n if self.evaluate_in_parallel:\n # Stage 2 (parallel mood): Computing quality attributes\n p1 = Process(target=calc_qmood_objectives, args=(arr,))\n if self.n_obj == 8:\n p2 = Process(target=calc_testability_objective, args=(config.UDB_PATH, arr,))\n p3 = Process(target=calc_modularity_objective, args=(config.UDB_PATH, arr,))\n p1.start(), p2.start(), p3.start()\n p1.join(), p2.join(), p3.join()\n else:\n p1.start()\n p1.join()\n else:\n # Stage 2 (sequential mood): Computing quality attributes\n qmood_quality_attributes = DesignQualityAttributes(udb_path=config.UDB_PATH)\n arr[0] = qmood_quality_attributes.reusability\n arr[1] = qmood_quality_attributes.understandability\n arr[2] = qmood_quality_attributes.flexibility\n arr[3] = qmood_quality_attributes.functionality\n arr[4] = qmood_quality_attributes.effectiveness\n arr[5] = qmood_quality_attributes.extendability\n if self.n_obj == 8:\n arr[6] = testability_main(config.UDB_PATH, initial_value=config.CURRENT_METRICS.get(\"TEST\", 1.0))\n arr[7] = modularity_main(config.UDB_PATH, initial_value=config.CURRENT_METRICS.get(\"MODULE\", 1.0))\n\n if self.verbose_design_metrics:\n design_metrics = {\n \"DSC\": [qmood_quality_attributes.DSC],\n \"NOH\": [qmood_quality_attributes.NOH],\n \"ANA\": [qmood_quality_attributes.ANA],\n \"MOA\": [qmood_quality_attributes.MOA],\n \"DAM\": [qmood_quality_attributes.DAM],\n \"CAMC\": [qmood_quality_attributes.CAMC],\n \"CIS\": [qmood_quality_attributes.CIS],\n \"NOM\": [qmood_quality_attributes.NOM],\n \"DCC\": [qmood_quality_attributes.DCC],\n \"MFA\": [qmood_quality_attributes.MFA],\n \"NOP\": [qmood_quality_attributes.NOP]\n }\n self.log_design_metrics(design_metrics)\n\n del qmood_quality_attributes\n\n # Stage 3: Marshal objectives into vector\n objective_values.append([-1 * i for i in arr])\n logger.info(f\"Objective values for individual {k}: {[i for i in arr]}\")\n\n # Stage 4: Marshal all objectives into out dictionary\n out['F'] = np.array(objective_values, dtype=float)\n # print('OUT', out['F'])", "title": "" }, { "docid": "a32ef2ee2742c1b2aa1dedaa1805b147", "score": "0.5558151", "text": "def consolidate_optimizer(self):\r\n return False", "title": "" }, { "docid": "7d51bb1166252637c7a22ff8df497f05", "score": "0.5555823", "text": "def optimize(self):\n if self.data_type == int:\n return\n opt_map = self._get_optimization_map()\n self.choices = self._get_optimized_choices(opt_map)\n self.answers = self._get_optimized_answers(opt_map)", "title": "" }, { "docid": "eb25e549c9e3a15448266d613fc633e8", "score": "0.5555753", "text": "def step(self, closure=None):\n # Update the gradient every `update_interval` steps.\n if self.batch_counter % self.update_interval != self.update_interval - 1:\n self.batch_counter += 1\n return None\n\n log_timing = self.verbose_freq > 0 and self.batch_counter % self.verbose_freq == 0\n if log_timing:\n start_time = time.time()\n if self.model_parameters is not None:\n import apex.fp16_utils as fp16_utils\n fp16_utils.model_grads_to_master_grads(self.model_parameters,\n self.master_parameters)\n # TODO: This division might not be in the right place, given that\n # scaling happens right after. Look into this if problems arise.\n if self.loss_scale != 1.0:\n for parameter in self.master_parameters:\n parameter.grad.data = parameter.grad.data / self.loss_scale\n\n for p in self.param_groups[0]['params']:\n if p.grad is not None:\n p.grad.div_(self.update_interval)\n\n loss = self.base_optimizer.step()\n if self.model_parameters is not None:\n import apex.fp16_utils as fp16_utils\n fp16_utils.master_params_to_model_params(self.model_parameters,\n self.master_parameters)\n self.latest_version = self.latest_version.incr()\n if self.num_versions > 1:\n self.buffered_state_dicts = self.queue[0][0]\n self.queue.append(self.get_params(clone=False))\n\n if log_timing:\n print(\"Optimizer step took: %.3f\" % (time.time() - start_time))\n self.batch_counter += 1\n return loss", "title": "" }, { "docid": "863099c4643863bd0b7f7a035abc6b01", "score": "0.55540836", "text": "def run(self, shots: int = 1024) -> None:\n self._optimizer.minimize(shots, self._compute_loss, self._compute_gradient)\n self._minimum_eigenvalue = self._optimizer._loss_history[-1]", "title": "" }, { "docid": "d21f65f8ae9207343709288e491b5adf", "score": "0.5553206", "text": "def _evaluate(self,\n x, #\n out,\n *args,\n **kwargs):\n objective_values = []\n for k, individual_ in enumerate(x):\n # Stage 0: Git restore\n logger.debug(\"Executing git restore.\")\n git_restore(config.PROJECT_PATH)\n logger.debug(\"Updating understand database after git restore.\")\n update_understand_database(config.UDB_PATH)\n\n # Stage 1: Execute all refactoring operations in the sequence x\n logger.debug(f\"Reached Individual with Size {len(individual_[0])}\")\n for refactoring_operation in individual_[0]:\n refactoring_operation.do_refactoring()\n # Update Understand DB\n logger.debug(f\"Updating understand database after {refactoring_operation.name}.\")\n update_understand_database(config.UDB_PATH)\n\n # Stage 2:\n arr = Array('d', range(self.n_obj_virtual))\n if self.evaluate_in_parallel:\n # Stage 2 (parallel mood): Computing quality attributes\n p1 = Process(target=calc_qmood_objectives, args=(arr,))\n p2 = Process(target=calc_testability_objective, args=(config.UDB_PATH, arr,))\n p3 = Process(target=calc_modularity_objective, args=(config.UDB_PATH, arr,))\n p1.start(), p2.start(), p3.start()\n p1.join(), p2.join(), p3.join()\n o1 = sum([i for i in arr[:6]]) / 6.\n o2 = arr[6]\n o3 = arr[7]\n else:\n # Stage 2 (sequential mood): Computing quality attributes\n qmood_quality_attributes = DesignQualityAttributes(udb_path=config.UDB_PATH)\n o1 = qmood_quality_attributes.average_sum\n o2 = testability_main(config.UDB_PATH, initial_value=config.CURRENT_METRICS.get(\"TEST\", 1.0))\n o3 = modularity_main(config.UDB_PATH, initial_value=config.CURRENT_METRICS.get(\"MODULE\", 1.0))\n del qmood_quality_attributes\n\n # Stage 3: Marshal objectives into vector\n objective_values.append([-1 * o1, -1 * o2, -1 * o3])\n logger.info(f\"Objective values for individual {k}: {[-1 * o1, -1 * o2, -1 * o3]}\")\n\n # Stage 4: Marshal all objectives into out dictionary\n out['F'] = np.array(objective_values, dtype=float)", "title": "" }, { "docid": "ec2a231cb65570acc0dd779b6a468a14", "score": "0.55515546", "text": "def _objective(self):\n pass", "title": "" }, { "docid": "bf2c731b507666039ec7d5c76250cf4e", "score": "0.554253", "text": "def __do_smoothing(self):\n self.postporcessed_bi_predictions = self.bi_predictions", "title": "" }, { "docid": "53d52b6727980407bd3d84ee5244b474", "score": "0.5540213", "text": "def optimize_parameters(self):\n pass", "title": "" }, { "docid": "53d52b6727980407bd3d84ee5244b474", "score": "0.5540213", "text": "def optimize_parameters(self):\n pass", "title": "" }, { "docid": "53d52b6727980407bd3d84ee5244b474", "score": "0.5540213", "text": "def optimize_parameters(self):\n pass", "title": "" }, { "docid": "0c7502e874a281e0f89e2a4b4f1a9db0", "score": "0.55313045", "text": "def optimize_problem_loop(self, alpha=1):\r\n # remove any data that we will not use for analysis\r\n for service in self.services.values():\r\n service.estimate_year_data(self.opt_years, self.frequency)\r\n for service in self.predispatch_services.values():\r\n service.estimate_year_data(self.opt_years, self.frequency)\r\n if 'PV' in self.technologies.keys():\r\n self.technologies['PV'].estimate_year_data(self.opt_years, self.frequency)\r\n\r\n if 'Deferral' in self.predispatch_services.keys() and not len(self.services.keys()):\r\n # if Deferral is on, and there is no energy market specified for energy settlement (or other market services)\r\n # then do not optimize (skip the optimization loop)\r\n return\r\n e_logger.info(\"Preparing Optimization Problem...\")\r\n u_logger.info(\"Preparing Optimization Problem...\")\r\n\r\n # list of all optimization windows\r\n periods = pd.Series(copy.deepcopy(self.power_kw.opt_agg.unique()))\r\n periods.sort_values()\r\n\r\n # for mpc\r\n window_shift = 0\r\n\r\n for ind, opt_period in enumerate(periods):\r\n\r\n # used to select rows from time_series relevant to this optimization window\r\n if not self.mpc:\r\n mask = self.power_kw.loc[:, 'opt_agg'] == opt_period\r\n # mask_index =\r\n else:\r\n mask = self.power_kw['opt_agg'].between(1 + int(self.n_control) * window_shift, int(self.n) + int(self.n_control) * window_shift)\r\n window_shift += 1\r\n\r\n # apply past degradation\r\n storage = self.technologies['Storage']\r\n storage.apply_past_degredation(ind, self.power_kw, mask, opt_period, self.n)\r\n\r\n print(time.strftime('%H:%M:%S') + \": Running Optimization Problem for \" + str(self.power_kw.loc[mask].index[0]) + \"...\") if self.verbose else None\r\n\r\n # run optimization and return optimal variable and objective costs\r\n results, objective_values = self.optimization_problem(mask, alpha)\r\n\r\n # Add past degrade rate with degrade from calculated period\r\n storage = self.technologies['Storage']\r\n storage.calc_degradation(opt_period, results.index[0], results.index[-1], results['ene'])\r\n\r\n # add optimization variable results to power_kw\r\n if not results.empty and not self.mpc:\r\n self.power_kw = Lib.update_df(self.power_kw, results)\r\n elif not results.empty and self.mpc:\r\n results = results[:int(self.n_control)]\r\n self.power_kw = Lib.update_df(self.power_kw, results)\r\n\r\n # add objective expressions to financial obj_val\r\n if not objective_values.empty:\r\n objective_values.index = [opt_period]\r\n self.objective_values = Lib.update_df(self.objective_values, objective_values)", "title": "" }, { "docid": "0260b5775a2dfe90e7d407c9e4623a42", "score": "0.5526003", "text": "def update_model(self, pruned_model, optimizer_state=None):\n\n self.optimizer = None\n self.pruned_model = utils.data_parallel(pruned_model, self.settings.n_gpus)\n self.optimizer = torch.optim.SGD(\n # params=self.pruned_model.parameters(),\n [{'params': self.pruned_model.parameters()},\n {'params': self.aux_fc.parameters()}],\n lr=self.settings.lr,\n momentum=self.settings.momentum,\n weight_decay=self.settings.weight_decay,\n nesterov=True)\n if optimizer_state:\n self.optimizer.load_state_dict(optimizer_state)\n self.scheduler = optim.lr_scheduler.CosineAnnealingLR(self.optimizer, self.settings.n_epochs)", "title": "" }, { "docid": "0c7906c4222c7503f0765ca8afa91f33", "score": "0.5497696", "text": "def optimize_default():\n optimizer = create_optimizer()\n logging.info(optimizer.roofline())\n optimizer.apply_optimizations(benchmark_time_s=22, inner_benchmarking=False,\n num_optimization_passes=3,\n rebench=True)\n dataset = optimizer.instantiate_pipeline()\n _benchmark_dataset(dataset, time_limit_s=62)", "title": "" }, { "docid": "09687191a57cf8770f3a9dbbf3da655c", "score": "0.54972696", "text": "def optimize_flow(self, **kwargs) -> 'ResultProcessor':\n with ImportExtensions(required=True):\n import optuna\n if self._sampler == 'GridSampler':\n sampler = getattr(optuna.samplers, self._sampler)(**kwargs)\n else:\n sampler = getattr(optuna.samplers, self._sampler)(seed=self._seed, **kwargs)\n study = optuna.create_study(direction=self._direction, sampler=sampler)\n study.optimize(self._objective, n_trials=self._n_trials)\n result_processor = ResultProcessor(study)\n return result_processor", "title": "" }, { "docid": "fd52b80a429351d32a5330538cf43605", "score": "0.54962695", "text": "def step(self, closure=None):\n # self.param_groups updated on the GPU, stepped, then moved back to its own thread\n localg = U.get_flattened_grads(self.param_groups)\n if self.t % 100 == 0:\n self.check_synced()\n if localg.device.type == \"cpu\":\n localg = localg.detach().numpy()\n else:\n localg = localg.cpu().detach().numpy()\n if self.comm is not None:\n globalg = np.zeros_like(localg)\n self.comm.Allreduce(localg, globalg, op=MPI.SUM)\n if self.scale_grad_by_procs:\n globalg /= self.comm.Get_size()\n if localg.shape[0] > 1 and self.comm.Get_size() > 1:\n assert not (localg == globalg).all()\n globalg = ptu.from_numpy(globalg, device=torch.device(ptu.get_device()))\n else:\n globalg = ptu.from_numpy(localg, device=torch.device(ptu.get_device()))\n\n self.t += 1\n a = self.lr * math.sqrt(1 - self.beta2**self.t)/(1 - self.beta1**self.t)\n self.m = self.beta1 * self.m + (1 - self.beta1) * globalg\n self.v = self.beta2 * self.v + (1 - self.beta2) * (globalg * globalg)\n step_update = (- a) * self.m / (torch.sqrt(self.v) + self.epsilon)\n # print(\"before: \")\n # print(self.get_params_as_flat())\n self.set_params_from_flat((self.get_flat_params() + step_update).to(device=torch.device(\"cpu\")))\n # print(\"after, in mpi adam: \")\n # print(self.get_params_as_flat())", "title": "" }, { "docid": "571ccbf0bf8fa7023fb157bc6d33a958", "score": "0.5491971", "text": "def optimizationStep(params, X, Y, learning_rate):\n\n # layers count\n N = params[\"N\"]\n\n # making forward pass to calculate Y_hat predictions of\n # logistic regression\n Y_hat = forwardPropagationFullyConnected(params, X)\n\n # calculating the gradients of the cost function at current\n # coordinates (W,B), provided by current predictions\n backwardPropagationFullyConnected(params, X, Y, Y_hat)\n\n # gradient descent single step with given learning rate\n # for all layers\n for l in range(1, N + 1):\n W = params[\"W\" + str(l)]\n b = params[\"b\" + str(l)]\n dW = params[\"dW\" + str(l)]\n db = params[\"db\" + str(l)]\n\n # parameters update\n W = W - learning_rate * dW\n b = b - learning_rate * db\n\n params[\"W\" + str(l)] = W\n params[\"b\" + str(l)] = b", "title": "" }, { "docid": "19af8d9179d1811217fdd0e081a23475", "score": "0.5490529", "text": "def objective(trial):\n\tparams_hpo = suggest_params(trial)\n\n\tsave_path = f'../optuna/{name}/trial_{trial.number}'\n\tif not os.path.exists(save_path):\n\t\tos.makedirs(save_path)\n\ttorch.save(params_hpo, os.path.join(save_path, 'params.pkl'))\n\n\t# Init Comet-ML\n\texperiment = utils_train.initialize_comet(params_fixed, None)\n\n\tadata = utils_train.load_data('covid')\n\tadata = adata[adata.obs['set'] != 'test'] # This needs to be inside the function, ray can't deal with it outside\n\n\tinit_model = utils_train.select_model_by_name(params_fixed['model'])\n\tmodel = utils_train.init_model(params_hpo, init_model, adata, 'covid')\n\tutils_train.train_call(model, params_hpo, params_fixed, experiment)\n\n\tif os.path.exists(os.path.join(save_path, f'{name}_best_rec_model.pt')):\n\t\tmodes = []\n\n\t\tprint('UMAP for best reconstruction loss model on val')\n\t\tmodel.load(os.path.join(save_path, f'{name}_best_rec_model.pt'))\n\t\tval_latent = model.get_latent([adata[adata.obs['set'] == 'val']], batch_size=512, metadata=modes)\n\t\tfigs = tcr.utils.plot_umap_list(val_latent, title=name + '_val_best_recon', color_groups=modes)\n\t\tfor mode, fig in zip(modes, figs):\n\t\t\texperiment.log_figure(figure_name=name + '_val_best_recon_'+mode, figure=fig, step=model.epoch)\n\n\t\tprint('UMAP for best reconstruction loss model on train')\n\t\tmodel.load(os.path.join(save_path, f'{name}_best_rec_model.pt'))\n\t\tval_latent = model.get_latent([adata[adata.obs['set'] == 'train']], batch_size=512, metadata=['binding_name', 'clonotype', 'donor'])\n\t\tfigs = tcr.utils.plot_umap_list(val_latent, title=name + '_val_best_recon', color_groups=modes)\n\t\tfor mode, fig in zip(modes, figs):\n\t\t\texperiment.log_figure(figure_name=name + '_val_best_recon_' + mode, figure=fig, step=model.epoch)\n\n\texperiment.end()\n\treturn model.best_loss", "title": "" }, { "docid": "a6f997f8d701526f10ea3729b011040c", "score": "0.5485401", "text": "def compute_state_with_opt_mu(self):\r\n # Initialization\r\n # Step 1\r\n obj = 0\r\n prior = 0\r\n #self.vehicle.arrived()\r\n for t in range(self.num_pts+1):\r\n\r\n # minimize\r\n A = self.vehicle.system_matrix()\r\n b = self.vehicle.control_matrix()\r\n phit = self.vehicle.state\r\n yt = self.y[t]\r\n print(yt)\r\n Q = self.CCov\r\n rt = np.linalg.pinv(Q)\r\n utk = []\r\n try:\r\n self.Kstar = LA.solve_continuous_are(A, b, Q, self.R)\r\n except ValueError or np.linalg.LinAlgError:\r\n print('Using previous P')\r\n for idx in range(self.horizon):\r\n\r\n utk.append(self.propagate(phit.T[0], A, b, Q, self.Kstar))\r\n phit = np.matmul(A*self.dT + np.eye(3), phit) \\\r\n + utk[idx]\r\n yt = np.matmul(self.C_est, phit)\r\n rt += np.matmul(phit, phit.T)\r\n Q = np.linalg.pinv(rt)\r\n\r\n #utk = self.opt(t, A, b)\r\n\r\n print('OPTIMAL')\r\n control = np.array(utk[0], dtype=float)\r\n print(control)\r\n\r\n\r\n # Step 3 of RLdMPC\r\n\r\n self.vehicle.update(control)\r\n new_phi = self.vehicle.state\r\n\r\n new_output = np.matmul(self.C, new_phi) + self.get_noise(t)\r\n\r\n new_deviation = np.subtract(new_output,\r\n np.matmul(self.C_est, new_phi))\r\n\r\n G = np.matmul(np.matmul(self.CCov, new_phi),\r\n np.linalg.pinv(\r\n np.matmul(self.C_est, self.C_est.T) +\r\n self.quadratic_cost(new_phi, self.CCov)))\r\n\r\n self.CCov -= np.matmul(np.eye(self.num_states) - np.matmul(G, new_phi.T), self.CCov)\r\n\r\n self.C_est += np.matmul(G, new_deviation.T).T\r\n # Store new values\r\n self.controls.append(control)\r\n self.phi.append(new_phi)\r\n self.y.append(new_output)\r\n self.P.append(self.CCov)\r\n self.params.append(self.C_est)\r\n print(\"----------------------------------\")\r\n print(\"Next initial state: \\n\")\r\n print(self.vehicle.state[self.xidx], \"\\n\", self.vehicle.state[self.yidx])\r\n print(\"----------------------------------\")\r\n obj += new_output*new_output + self.R*control*control + np.matmul(np.matmul(new_phi.T, self.CCov), new_phi)\r\n if np.linalg.norm(new_phi) <= 1e-3:\r\n break\r\n\r\n self.update_time()\r\n return obj", "title": "" }, { "docid": "6304daa2e112c933fd1b17b8317c9d94", "score": "0.5474993", "text": "def nonsmooth_objective(self, arg, check_feasibility=False):\n arg = np.asarray(arg)\n x_offset = self.apply_offset(arg)\n if check_feasibility:\n value = self.constraint(x_offset)\n else:\n value = 0\n value += self.quadratic.objective(arg, 'func')\n return value", "title": "" }, { "docid": "e594b984cc28b0c54059ec4d0fcf03cc", "score": "0.5458794", "text": "def optimize_solo(self, *args):\n return _ida_hexrays.minsn_t_optimize_solo(self, *args)", "title": "" }, { "docid": "675c4c9f546b00ad6ad40ec8ec74362d", "score": "0.5458062", "text": "def optimize_params_g(self):\n # Forward-pass\n self.forward_g()\n self.forward_sg()\n self.forward_rg()\n self.forward_dg()\n\n # Backward-pass\n # nets\n self.optimizer_g.zero_grad()\n self.backward_g()\n self.optimizer_g.step()", "title": "" }, { "docid": "ef5ffeda7e54615261d6ce5cb15656ea", "score": "0.545281", "text": "def step(self, closure=None):\n loss = self.G_optimizer.step(closure)\n self._la_step += 1\n\n if self._la_step >= self._total_la_steps:\n self._la_step = 0\n\n # Lookahead and cache the current generator optimizer parameters\n for group in self.G_optimizer.param_groups:\n for p in group[\"params\"]:\n param_state = self.state[p]\n p.data.mul_(self.la_alpha).add_(1.0 - self.la_alpha, param_state[\"cached_G_params\"])\n param_state[\"cached_G_params\"].copy_(p.data)\n\n if self.pullback_momentum == \"pullback\":\n internal_momentum = self.G_optimizer.state[p][\"momentum_buffer\"]\n self.G_optimizer.state[p][\"momentum_buffer\"] = internal_momentum.mul_(self.la_alpha).add_(\n 1.0 - self.la_alpha, param_state[\"cached_G_mom\"]\n )\n param_state[\"cached_G_mom\"] = self.G_optimizer.state[p][\"momentum_buffer\"]\n elif self.pullback_momentum == \"reset\":\n self.G_optimizer.state[p][\"momentum_buffer\"] = torch.zeros_like(p.data)\n\n # Lookahead and cache the current discriminator optimizer parameters\n for group in self.D_optimizer.param_groups:\n for p in group[\"params\"]:\n param_state = self.state[p]\n p.data.mul_(self.la_alpha).add_(1.0 - self.la_alpha, param_state[\"cached_D_params\"])\n param_state[\"cached_D_params\"].copy_(p.data)\n\n if self.pullback_momentum == \"pullback\":\n internal_momentum = self.D_optimizer.state[p][\"momentum_buffer\"]\n self.D_optimizer.state[p][\"momentum_buffer\"] = internal_momentum.mul_(self.la_alpha).add_(\n 1.0 - self.la_alpha, param_state[\"cached_D_mom\"]\n )\n param_state[\"cached_D_mom\"] = self.optimizer.state[p][\"momentum_buffer\"]\n elif self.pullback_momentum == \"reset\":\n self.D_optimizer.state[p][\"momentum_buffer\"] = torch.zeros_like(p.data)\n\n return loss", "title": "" }, { "docid": "3baed4d6476d7ffb5c8043e324b8f8b3", "score": "0.54500365", "text": "def optimise(self, iters=1000):\n return _ndlml.gp_optimise(self, iters)", "title": "" }, { "docid": "b5c79321d3bc25219859a2e21d1d0741", "score": "0.5444835", "text": "def step(self):\n for i,param in enumerate(self.model.params):\n for j in range(len(param)): # iterate for biases and weights\n self.m[2*i+j] = self.b1*self.m[2*i+j] + (1-self.b1)*param[1][j]\n self.v[2*i+j] = self.b2*self.v[2*i+j] + (1-self.b2)*param[1][j]**2\n\n mhat = (1/(1-self.b1))*self.m[2*i+j]\n vhat = (1/(1-self.b2))*self.v[2*i+j]\n\n div = torch.sqrt(vhat)+torch.Tensor(vhat.size()).normal_(mean = 0, std = 1e-6)\n\n param[0][j].sub_(self.lr * mhat/div)", "title": "" }, { "docid": "3172566438c225a60f65a9cda58e99f3", "score": "0.54420125", "text": "def nonsmooth_objective(self, arg, check_feasibility=False):\n\n arg = np.asarray(arg)\n x_offset = self.apply_offset(arg)\n value = x_offset.max()\n value += self.quadratic.objective(arg, 'func')\n return value", "title": "" }, { "docid": "11e713af81c1ef5b74a2b514da949c8e", "score": "0.5441373", "text": "def optimize(self, opt_params: dict = None,\n verbose: Union[int, bool] = False) -> OptimizeResult:\n # Avoids UnboundLocalError by defining 'res' in case the\n # optimisation procedure doesn't have an obvious return object\n # e.g. slice sampling\n res = None\n if opt_params is None:\n opt_params = self.opt_params\n elif opt_params == 'default':\n opt_params = self.default_opt_params\n\n # Hacky check of verbosity: checking opt_params and function input.\n # Casting to bool in case one of the values is None\n if 'verbose' in opt_params.keys():\n verbose = bool(verbose or opt_params['verbose'])\n\n if opt_params['method'] == 'grad':\n # Normal gradient descent is done in log(hp) space\n if 'options' in opt_params.keys():\n options = opt_params['options']\n else:\n options = None\n\n res = sp.optimize.minimize(self.objective_log_theta,\n np.log(self.param_array),\n jac=self.objective_grad_log_theta,\n options=options)\n\n new_param_array = np.exp(res.x)\n self.param_array = new_param_array\n if verbose:\n print(\"Finished grad descent optimization of hps\")\n print(\"Result:\")\n pprint(res)\n print(\"New model\")\n print(self)\n return res\n\n elif opt_params['method'] == 'direct':\n if verbose:\n print(\"Starting DIRECT optimization of hps\")\n print(\"DIRECT options:\")\n pprint(opt_params)\n\n assert 'hp_bounds' in opt_params.keys()\n assert 'n_direct_evals' in opt_params.keys()\n\n # hp_bounds are in hp-space, so transform them to log space\n hp_bounds = np.log(opt_params['hp_bounds'])\n\n res = scipydirect.minimize(self.objective,\n hp_bounds,\n maxf=opt_params['n_direct_evals'])\n self.param_array = np.exp(res.x)\n if verbose:\n print(\"Finished DIRECT optimization of hps\")\n print(\"Result:\")\n pprint(res)\n print(\"New model\")\n print(self)\n\n return res\n\n elif opt_params['method'] == 'multigrad':\n if 'options' in opt_params.keys():\n options = opt_params['options']\n else:\n options = None\n\n assert 'restart_bounds' in opt_params.keys()\n # bounds are in hp-space, so transform them to log space\n restart_bounds = np.log(opt_params['restart_bounds'])\n\n if 'hp_bounds' in opt_params.keys():\n hp_bounds = np.log(opt_params['hp_bounds'])\n else:\n hp_bounds = None\n\n current_param_array = self.param_array.copy()\n num_restarts = opt_params['num_restarts']\n res = minimize_with_restarts(self.objective_log_theta,\n restart_bounds,\n num_restarts=num_restarts,\n jac=self.objective_grad_log_theta,\n hard_bounds=hp_bounds,\n minimize_options=options,\n verbose=verbose)\n\n # if multi-started gradient descent failed, then rollback\n if res is None:\n self.param_array = current_param_array\n else:\n new_param_array = np.exp(res.x)\n self.param_array = new_param_array\n if verbose:\n print(\"Finished multigrad descent optimization of hps\")\n print(\"Result:\")\n pprint(res)\n print(\"New model\")\n print(self)\n return res\n\n elif opt_params['method'] == 'samplegrad':\n if 'minimize_options' in opt_params.keys():\n minimize_options = opt_params['minimize_options']\n else:\n minimize_options = None\n\n if 'num_samples' in opt_params.keys():\n num_samples = opt_params['num_samples']\n else:\n num_samples = 1000\n if 'num_local' in opt_params.keys():\n num_local = opt_params['num_local']\n else:\n num_local = 5\n\n hp_bounds = np.log(opt_params['hp_bounds'])\n\n res = sample_then_minimize(\n self.objective_log_theta,\n hp_bounds,\n num_samples=num_samples,\n num_local=num_local,\n jac=self.objective_grad_log_theta,\n minimize_options=minimize_options,\n evaluate_sequentially=True,\n verbose=False)\n\n self.param_array = np.exp(res.x)\n\n\n elif opt_params['method'] == 'slice':\n print(\"Running optimize with slice sampling inside a model class!\",\n \"Do this in a model_collection instead!\")\n raise NotImplementedError\n\n else:\n print(\"Bad optimiser choice\")\n raise NotImplementedError\n\n # return res", "title": "" }, { "docid": "99054e278704ec2714f3e0ff81a94953", "score": "0.54397815", "text": "def optimizer_step(self, epoch_nb, batch_nb, optimizer, optimizer_i, second_order_closure=None):\n if isinstance(optimizer, torch.optim.LBFGS):\n optimizer.step(second_order_closure)\n else:\n optimizer.step()\n\n # clear gradients\n optimizer.zero_grad()", "title": "" }, { "docid": "478bf68d5b4fb17369b6fb74e627248c", "score": "0.54391193", "text": "def run_gp_minimize(self, initialx):\n \n self.caseindex=initialx[-1]\n initialx=initialx[:-1]\n \n return gp_minimize(func=self.evalX, \n dimensions=self.dimensions,\n acq_func='EI',\n n_calls=self.n_calls,\n x0=initialx,\n random_state=self.caseindex)", "title": "" }, { "docid": "cea369d7efbd6b78686e1d4b742fd30a", "score": "0.543464", "text": "def anneal_step(self):\n # minimize A using the chosen method\n if self.method in ['L-BFGS-B', 'NCG', 'TNC', 'LM','IPOPT']:\n if self.betaidx == 0:\n if self.NPest == 0:\n XP0 = np.copy(self.minpaths[0][:self.N_model*self.D])\n elif self.NPest == self.NP:\n XP0 = np.copy(self.minpaths[0])\n else:\n X0 = self.minpaths[0][:self.N_model*self.D]\n P0 = self.minpaths[0][self.N_model*self.D:][self.Pidx]\n XP0 = np.append(X0, P0)\n else:\n if self.NPest == 0:\n XP0 = np.copy(self.minpaths[self.betaidx-1][:self.N_model*self.D])\n elif self.NPest == self.NP:\n XP0 = np.copy(self.minpaths[self.betaidx-1])\n else:\n X0 = self.minpaths[self.betaidx-1][:self.N_model*self.D]\n P0 = self.minpaths[self.betaidx-1][self.N_model*self.D:][self.Pidx]\n XP0 = np.append(X0, P0)\n\n if self.method == 'L-BFGS-B':\n XPmin, Amin, exitflag = self.min_lbfgs_scipy(XP0, self.gen_xtrace())\n elif self.method == 'NCG':\n XPmin, Amin, exitflag = self.min_cg_scipy(XP0, self.gen_xtrace())\n elif self.method == 'TNC':\n XPmin, Amin, exitflag = self.min_tnc_scipy(XP0, self.gen_xtrace())\n #Added min_ipopt\n elif self.method == 'IPOPT':\n XPmin, Amin, exitstatus = self.min_ipopt(XP0, self.gen_xtrace())\n #elif self.method == 'LM':\n # XPmin, Amin, exitflag = self.min_lm_scipy(XP0)\n else:\n print(\"You really shouldn't be here. Exiting.\")\n sys.exit(1)\n else:\n print(\"ERROR: Optimization routine not implemented or recognized.\")\n sys.exit(1)\n\n # update optimal parameter values\n if self.NPest > 0:\n if self.P.ndim == 1:\n if isinstance(XPmin[0], adolc._adolc.adouble):\n self.P[self.Pidx] = np.array([XPmin[-self.NPest + i].val \\\n for i in xrange(self.NPest)])\n else:\n self.P[self.Pidx] = np.copy(XPmin[-self.NPest:])\n else:\n if self.disc.im_func.__name__ in [\"disc_euler\", \"disc_forwardmap\"]:\n nmax = self.N_model - 1\n else:\n nmax = self.N_model\n for n in xrange(nmax):\n if isinstance(XPmin[0], adolc._adolc.adouble):\n nidx = nmax - n - 1\n self.P[n, self.Pidx] = np.array([XPmin[-nidx*self.NPest + i].val \\\n for i in xrange(self.NPest)])\n else:\n pi1 = nmax*self.D + n*self.NPest\n pi2 = nmax*self.D + (n+1)*self.NPest\n self.P[n, self.Pidx] = np.copy(XPmin[pi1:pi2])\n\n # store A_min and the minimizing path\n self.A_array[self.betaidx] = Amin\n self.me_array[self.betaidx] = self.me_gaussian(np.array(XPmin[:self.N_model*self.D]))\n self.fe_array[self.betaidx] = self.fe_gaussian(np.array(XPmin))\n self.minpaths[self.betaidx] = np.array(np.append(XPmin[:self.N_model*self.D], self.P))\n\n # increase RF\n if self.betaidx < len(self.beta_array) - 1:\n self.betaidx += 1\n self.beta = self.beta_array[self.betaidx]\n self.RF = self.RF0 * self.alpha**self.beta\n\n # set flags indicating that A needs to be retaped, and that we're no\n # longer at the beginning of the annealing procedure\n self.taped = False\n if self.annealing_initialized:\n # Indicate no longer at beta_0\n self.initialized = False", "title": "" } ]
e3e97acd6844f1e4b3ff20643225326b
Tool used in _format().
[ { "docid": "78a61781e601f5fb984ed2af66dd16db", "score": "0.0", "text": "def _format_data(self, data, name):\n try:\n val = data[name]\n except KeyError:\n val = None\n return val", "title": "" } ]
[ { "docid": "1fd09b4bd37ddf1ac1d83bef2f3f7740", "score": "0.78909135", "text": "def format_(self) -> Any:", "title": "" }, { "docid": "f0ef23e1f312434467c94c45c8094c12", "score": "0.7415282", "text": "def __format__(self, format_spec): # real signature unknown; restored from __doc__\n return \"\"", "title": "" }, { "docid": "33fb67e9dfe22c96f580613c14d33ce5", "score": "0.71275705", "text": "def format(self, *args, **kwargs): # real signature unknown; restored from __doc__\n return \"\"", "title": "" }, { "docid": "516542b9aeea8ad9875c49b34e48d832", "score": "0.7026448", "text": "def getFormatter(self):", "title": "" }, { "docid": "fc8224d831e1708a64d06f7be46e6942", "score": "0.6982802", "text": "def output_formatting():\n pass", "title": "" }, { "docid": "16d56af0d05facb91eb79f1f48f8c5a6", "score": "0.6675896", "text": "def format(self):\n return", "title": "" }, { "docid": "be09f4b5785bca8b45230cd0546f0b42", "score": "0.6617626", "text": "def __format__(self, format):\n return f'{str(self):{format}}'", "title": "" }, { "docid": "be61fdda347a9e7edff2c8f671ed27be", "score": "0.6592204", "text": "def Format(self, args):\n del args # Unused in Format\n return 'default'", "title": "" }, { "docid": "7398efb8d8f7cd81a5814c253cb35c04", "score": "0.6565247", "text": "def format_usage(self, ):\n\t\tpass", "title": "" }, { "docid": "da28611a5d63b6cf36983c07e6225037", "score": "0.6554665", "text": "def format(self, value: T) -> str:", "title": "" }, { "docid": "f4150fe82d7c707e482a3f4884dc6c4c", "score": "0.6421986", "text": "def format(self, format):\n return self.__format__(format)", "title": "" }, { "docid": "867f1e95b971d4b29593d714a058ab6e", "score": "0.6305408", "text": "def _parse_format(self):\n pass", "title": "" }, { "docid": "d8f48ea59ba58f85f9f434c05aad63f5", "score": "0.6289832", "text": "def test_formatter_stdlib(self):\n self.assert_formatter_equal('', '')\n self.assert_formatter_equal('a', 'a')\n self.assert_formatter_equal('ab', 'ab')\n self.assert_formatter_equal('a{{', 'a{')\n self.assert_formatter_equal('a}}', 'a}')\n self.assert_formatter_equal('{{b', '{b')\n self.assert_formatter_equal('}}b', '}b')\n self.assert_formatter_equal('a{{b', 'a{b')\n\n # examples from the PEP:\n import datetime\n self.assert_formatter_equal(\"My name is {0}\", \"My name is Fred\", 'Fred')\n self.assert_formatter_equal(\"My name is {0[name]}\", \"My name is Fred\", dict(name='Fred'))\n self.assert_formatter_equal(\"My name is {0} :-{{}}\", \"My name is Fred :-{}\", 'Fred')\n\n d = datetime.date(2007, 8, 18)\n self.assert_formatter_equal(\"The year is {0.year}\", \"The year is 2007\", d)\n\n # classes we'll use for testing\n class C(object):\n def __init__(self, x=100):\n self._x = x\n\n def __format__(self, spec):\n return spec\n\n class D(object):\n def __init__(self, x):\n self.x = x\n\n def __format__(self, spec):\n return str(self.x)\n\n # class with __str__, but no __format__\n class E(object):\n def __init__(self, x):\n self.x = x\n\n def __str__(self):\n return 'E(' + self.x + ')'\n\n # class with __repr__, but no __format__ or __str__\n class F(object):\n def __init__(self, x):\n self.x = x\n\n def __repr__(self):\n return 'F(' + self.x + ')'\n\n # class with __format__ that forwards to string, for some format_spec's\n class G(object):\n def __init__(self, x):\n self.x = x\n\n def __str__(self):\n return \"string is \" + self.x\n\n def __format__(self, format_spec):\n if format_spec == 'd':\n return 'G(' + self.x + ')'\n return format(str(self), format_spec)\n\n # class that returns a bad type from __format__\n class H(object):\n def __format__(self, format_spec):\n return 1.0\n\n class I(datetime.date):\n def __format__(self, format_spec):\n return self.strftime(format_spec)\n\n class J(int):\n def __format__(self, format_spec):\n return int.__format__(self * 2, format_spec)\n\n self.assert_formatter_equal('', '')\n self.assert_formatter_equal('abc', 'abc')\n self.assert_formatter_equal('{0}', 'abc', 'abc')\n self.assert_formatter_equal('{0:}', 'abc', 'abc')\n self.assert_formatter_equal('X{0}', 'Xabc', 'abc')\n self.assert_formatter_equal('{0}X', 'abcX', 'abc')\n self.assert_formatter_equal('X{0}Y', 'XabcY', 'abc')\n self.assert_formatter_equal('{1}', 'abc', 1, 'abc')\n self.assert_formatter_equal('X{1}', 'Xabc', 1, 'abc')\n self.assert_formatter_equal('{1}X', 'abcX', 1, 'abc')\n self.assert_formatter_equal('X{1}Y', 'XabcY', 1, 'abc')\n self.assert_formatter_equal('{0}', '-15', -15)\n self.assert_formatter_equal('{0}{1}', '-15abc', -15, 'abc')\n self.assert_formatter_equal('{0}X{1}', '-15Xabc', -15, 'abc')\n self.assert_formatter_equal('{{', '{')\n self.assert_formatter_equal('}}', '}')\n self.assert_formatter_equal('{{}}', '{}')\n self.assert_formatter_equal('{{x}}', '{x}')\n self.assert_formatter_equal('{{{0}}}', '{123}', 123)\n self.assert_formatter_equal('{{{{0}}}}', '{{0}}')\n self.assert_formatter_equal('}}{{', '}{')\n self.assert_formatter_equal('}}x{{', '}x{')\n\n # weird field names\n self.assert_formatter_equal(\"{0[foo-bar]}\", 'baz', {'foo-bar': 'baz'})\n self.assert_formatter_equal(\"{0[foo bar]}\", 'baz', {'foo bar': 'baz'})\n self.assert_formatter_equal(\"{0[ ]}\", '3', {' ': 3})\n\n self.assert_formatter_equal('{foo._x}', '20', foo=C(20))\n self.assert_formatter_equal('{1}{0}', '2010', D(10), D(20))\n self.assert_formatter_equal('{0._x.x}', 'abc', C(D('abc')))\n self.assert_formatter_equal('{0[0]}', 'abc', ['abc', 'def'])\n self.assert_formatter_equal('{0[1]}', 'def', ['abc', 'def'])\n self.assert_formatter_equal('{0[1][0]}', 'def', ['abc', ['def']])\n self.assert_formatter_equal('{0[1][0].x}', 'def', ['abc', [D('def')]])\n\n # strings\n self.assert_formatter_equal('{0:.3s}', 'abc', 'abc')\n self.assert_formatter_equal('{0:.3s}', 'ab', 'ab')\n self.assert_formatter_equal('{0:.3s}', 'abc', 'abcdef')\n self.assert_formatter_equal('{0:.0s}', '', 'abcdef')\n self.assert_formatter_equal('{0:3.3s}', 'abc', 'abc')\n self.assert_formatter_equal('{0:2.3s}', 'abc', 'abc')\n self.assert_formatter_equal('{0:2.2s}', 'ab', 'abc')\n self.assert_formatter_equal('{0:3.2s}', 'ab ', 'abc')\n self.assert_formatter_equal('{0:x<0s}', 'result', 'result')\n self.assert_formatter_equal('{0:x<5s}', 'result', 'result')\n self.assert_formatter_equal('{0:x<6s}', 'result', 'result')\n self.assert_formatter_equal('{0:x<7s}', 'resultx', 'result')\n self.assert_formatter_equal('{0:x<8s}', 'resultxx', 'result')\n self.assert_formatter_equal('{0: <7s}', 'result ', 'result')\n self.assert_formatter_equal('{0:<7s}', 'result ', 'result')\n self.assert_formatter_equal('{0:>7s}', ' result', 'result')\n self.assert_formatter_equal('{0:>8s}', ' result', 'result')\n self.assert_formatter_equal('{0:^8s}', ' result ', 'result')\n self.assert_formatter_equal('{0:^9s}', ' result ', 'result')\n self.assert_formatter_equal('{0:^10s}', ' result ', 'result')\n self.assert_formatter_equal('{0:10000}', 'a' + ' ' * 9999, 'a')\n self.assert_formatter_equal('{0:10000}', ' ' * 10000, '')\n self.assert_formatter_equal('{0:10000000}', ' ' * 10000000, '')\n\n # format specifiers for user defined type\n self.assert_formatter_equal('{0:abc}', 'abc', C())\n\n # !r and !s coersions\n self.assert_formatter_equal('{0!s}', 'Hello', 'Hello')\n self.assert_formatter_equal('{0!s:}', 'Hello', 'Hello')\n self.assert_formatter_equal('{0!s:15}', 'Hello ', 'Hello')\n self.assert_formatter_equal('{0!s:15s}', 'Hello ', 'Hello')\n self.assert_formatter_equal('{0!r}', \"'Hello'\", 'Hello')\n self.assert_formatter_equal('{0!r:}', \"'Hello'\", 'Hello')\n self.assert_formatter_equal('{0!r}', 'F(Hello)', F('Hello'))\n\n self.assert_formatter_equal('{0:d}', 'G(data)', G('data'))\n self.assert_formatter_equal('{0!s}', 'string is data', G('data'))\n\n self.assert_formatter_equal(\"{0:date: %Y-%m-%d}\", \"date: 2007-08-27\",\n I(year=2007, month=8, day=27))\n\n # test deriving from a builtin type and overriding __format__\n self.assert_formatter_equal(\"{0}\", \"20\", J(10))\n\n # string format specifiers\n self.assert_formatter_equal('{0:}', 'a', 'a')\n\n # computed format specifiers\n self.assert_formatter_equal(\"{0:.{1}}\", 'hello', 'hello world', 5)\n self.assert_formatter_equal(\"{0:.{1}s}\", 'hello', 'hello world', 5)\n self.assert_formatter_equal(\"{0:.{precision}s}\", 'hello', 'hello world',\n precision=5)\n self.assert_formatter_equal(\"{0:{width}.{precision}s}\", 'hello ',\n 'hello world', width=10, precision=5)\n self.assert_formatter_equal(\"{0:{width}.{precision}s}\", 'hello ',\n 'hello world', width='10', precision='5')\n\n # test various errors\n self.assert_formatter_raises('{', ValueError)\n self.assert_formatter_raises('}', ValueError)\n self.assert_formatter_raises('a{', ValueError)\n self.assert_formatter_raises('a}', ValueError)\n self.assert_formatter_raises('{a', ValueError)\n self.assert_formatter_raises('}a', ValueError)\n self.assert_formatter_raises('{0}', IndexError)\n self.assert_formatter_raises('{1}', IndexError, 'abc')\n self.assert_formatter_raises('{x}', KeyError)\n self.assert_formatter_raises(\"}{\", ValueError)\n self.assert_formatter_raises(\"{\", ValueError)\n self.assert_formatter_raises(\"}\", ValueError)\n self.assert_formatter_raises(r\"abc{0:{}\", ValueError)\n self.assert_formatter_raises(\"{0\", ValueError)\n self.assert_formatter_raises(\"{0.}\", IndexError)\n self.assert_formatter_raises(\"{0.}\", ValueError, 0)\n\n if six.PY2:\n self.assert_formatter_raises(\"{0[}\", IndexError)\n else:\n self.assert_formatter_raises(\"{0[}\", ValueError)\n\n self.assert_formatter_raises(\"{0[}\", ValueError, [])\n self.assert_formatter_raises(\"{0]}\", KeyError)\n self.assert_formatter_raises(\"{0.[]}\", ValueError, 0)\n self.assert_formatter_raises(\"{0..foo}\", ValueError, 0)\n self.assert_formatter_raises(\"{0[0}\", ValueError, 0)\n self.assert_formatter_raises(\"{0[0:foo}\", ValueError, 0)\n self.assert_formatter_raises(\"{c]}\", KeyError)\n self.assert_formatter_raises(\"{{ {{{0}}\", ValueError, 0)\n self.assert_formatter_raises(\"{0}}\", ValueError, 0)\n self.assert_formatter_raises(\"{foo}\", KeyError, bar=3)\n self.assert_formatter_raises(\"{0!x}\", ValueError, 3)\n self.assert_formatter_raises(\"{0!}\", ValueError, 0)\n self.assert_formatter_raises(\"{0!rs}\", ValueError, 0)\n self.assert_formatter_raises(\"{!}\", ValueError)\n\n # in python 2.7 onwards, string.Formatter raises KeyError here, rather\n # than ValueError. In rex we keep this as ValueError (the change is due\n # to implicit positional arguments, not applicable in rex).\n if six.PY2:\n self.assert_formatter_raises(\"{:}\", ValueError)\n self.assert_formatter_raises(\"{:s}\", ValueError)\n self.assert_formatter_raises(\"{}\", ValueError)\n else:\n self.assert_formatter_raises(\"{:}\", IndexError)\n self.assert_formatter_raises(\"{:s}\", IndexError)\n self.assert_formatter_raises(\"{}\", IndexError)\n\n # issue 6089\n self.assert_formatter_raises(\"{0[0]x}\", ValueError, [None])\n self.assert_formatter_raises(\"{0[0](10)}\", ValueError, [None])\n\n # can't have a replacement on the field name portion\n self.assert_formatter_raises('{0[{1}]}', TypeError, 'abcdefg', 4)\n\n # exceed maximum recursion depth\n self.assert_formatter_raises(\"{0:{1:{2}}}\", ValueError, 'abc', 's', '')\n self.assert_formatter_raises(\"{0:{1:{2:{3:{4:{5:{6}}}}}}}\",\n ValueError, 0, 1, 2, 3, 4, 5, 6, 7)\n\n # string format spec errors\n self.assert_formatter_raises(\"{0:-s}\", ValueError, '')\n self.assert_formatter_raises(\"{0:=s}\", ValueError, '')", "title": "" }, { "docid": "e19a90ba4d6807b1888d4e2cb60c0b46", "score": "0.62099", "text": "def format_(self) -> Optional[str]:\n return self.__format", "title": "" }, { "docid": "10d4c1102f36b1581247db942c3b8355", "score": "0.6183198", "text": "def format(cls, obj):\n raise NotImplementedError", "title": "" }, { "docid": "93d777f5a59ed0fc3cb706c0b04f7380", "score": "0.6174087", "text": "def test_format(self, store):\n\n # Test basic formatting and base unit expansion\n store.add_unit('se', 'second')\n y = store.add_base_unit('y')\n z = store.add_unit('z', 'y * meter / se ** 2')\n assert store.format(z) == 'z'\n assert store.format(z, True) == '1.0 meter * y / second ** 2'\n\n # Test if internal name manipulation can mess things up\n a = store.add_base_unit(str(y))\n b = store.add_unit(str(z), '2 * z /' + str(y))\n assert store.format(a) == str(y)\n assert store.format(b) == str(z)\n assert store.format(a, True) in ('1.0 ' + str(y), '1 ' + str(y))\n assert store.format(b, True) == '2.0 meter * y / second ** 2 / ' + str(y)", "title": "" }, { "docid": "cb2d45039a100d7ad115d8c0e6bd7fee", "score": "0.6158049", "text": "def __format__(self, formatstr):\n\n str0 = str(formatstr)\n if len(str0) == 0:\n str0 = \"%n(%s) Z=%z\"\n str0 = str0.replace(\"%s\", self.symbol)\n str0 = str0.replace(\"%n\", self.name)\n str0 = str0.replace(\"%z\", f\"{self.Z}\")\n return str0", "title": "" }, { "docid": "5b5e3fe16ff40d740e039eed97499593", "score": "0.61505413", "text": "def as_string(self, fmt=None):\n pass", "title": "" }, { "docid": "01ddbed24542536ebc4ef17f66f90419", "score": "0.61091846", "text": "def __format__(self, fmt):\n\n return f'Instance: {self.name} - {self.simple_description} - {self.descriptors}\\n'", "title": "" }, { "docid": "55ed0e9503f4d55ae944bf9444a7d534", "score": "0.6066933", "text": "def _format(self):\n success = False\n return success", "title": "" }, { "docid": "ee12e0d744169cd706b0e456a61ed60e", "score": "0.6040646", "text": "def __str__(self):\r\n return '<pre %s></pre>%s' % (self.strAttr(pyClassNames=self.pyStyle), self.helper)", "title": "" }, { "docid": "409a861cc1371c90589f270b5d5aa999", "score": "0.6010147", "text": "def using(self, *args):\n return self.value.format(*args)", "title": "" }, { "docid": "6e751429552fe72b5c8f41f0aee22400", "score": "0.60066074", "text": "def getFormat(self) -> int:\n ...", "title": "" }, { "docid": "b607cd7bfd223866effe25dfc13af668", "score": "0.6002134", "text": "def getFormat(self):\n pass", "title": "" }, { "docid": "f79bfea54139efb68984280f2e02f2bb", "score": "0.5997973", "text": "def _get_format_code(self):\n return self._format_code", "title": "" }, { "docid": "6778702ecf068793fe576364612356f5", "score": "0.59528404", "text": "def __str__(self):\n return self.format.format(**vars(self))", "title": "" }, { "docid": "0c6169c3acc8e31ac2257b738505826d", "score": "0.5931329", "text": "def str_format():\n print('\\nWe are the {} who say \"{}!\"'.format('knights' , 'Ni'))\n print('{0} and {1}'.format('spam' , 'eggs'))\n print('{1} and {0}'.format('spam', 'eggs'))\n #If keyword args are used in the str.format() method, their values are referred to by using the name of argument\n print('This {food} is {adjective}.'.format(food = 'spam' , adjective = 'absolutely horrible'))\n print('The story of {0}, {1}, and {other}.'.format('Bill' , 'Manfred' , other = 'Georg'))\n #You can also assign double quote shortcut on index !r\n contents = 'eels'\n print('My hovercraft is full of {!r}.'.format(contents))\n print(\"\"\"\n ---------------------------------------\n FROM :\n ---------------------------------------\n print('We are the {} who say \"{}!\"'.format('knights' , 'Ni'))\n print('{0} and {1}'.format('spam' , 'eggs'))\n print('{1} and {0}'.format('spam', 'eggs'))\n #If keyword args are used in the str.format() method, their values are referred to by using the name of argument\n print('This {food} is {adjective}.'.format(food = 'spam' , adjective = 'absolutely horrible'))\n print('The story of {0}, {1}, and {other}.'.format('Bill' , 'Manfred' , other = 'Georg'))\n #You can also assign double quote shortcut on index !r\n contents = 'eels'\n print('My hovercraft is full of {!r}.'.format(contents))\n \"\"\")\n return", "title": "" }, { "docid": "5177794bfb713575e1545e0ac9dc87ad", "score": "0.5918014", "text": "def format_help(self, ):\n\t\tpass", "title": "" }, { "docid": "c5d631b87dbe2fe325eaa5d4eaf70f49", "score": "0.59153014", "text": "def __str__(self):\n return self.format()", "title": "" }, { "docid": "b14ed07eaf8583369b951dd436e97bbc", "score": "0.5913176", "text": "def _format(self):\n if not isinstance(self._flags, (list,tuple)):\n self._flags = self._flags.split(',')\n if not isinstance(self._rectypes, (list,tuple)):\n self._rectypes = self._rectypes.split(',')", "title": "" }, { "docid": "b3cc5bbe41f035ddf81df69067bec234", "score": "0.58937347", "text": "def _formatMessage(self, msg, standardMsg):\n if not self.longMessage:\n return msg or standardMsg\n if msg is None:\n return standardMsg\n try:\n # don't switch to '{}' formatting in Python 2.X\n # it changes the way unicode input is handled\n return '%s : %s' % (standardMsg, msg)\n except UnicodeDecodeError:\n return '%s : %s' % (safe_repr(standardMsg), safe_repr(msg))", "title": "" }, { "docid": "25b704ca2adbf0949196f672949a33ae", "score": "0.5883206", "text": "def format(self):\n return self._format", "title": "" }, { "docid": "25b704ca2adbf0949196f672949a33ae", "score": "0.5883206", "text": "def format(self):\n return self._format", "title": "" }, { "docid": "973a074478675ea99ec3911797beb326", "score": "0.58754313", "text": "def __repr__(self):\n if self._format_func is None:\n return super().__repr__()\n else:\n return self._format_func(self)", "title": "" }, { "docid": "c0bc50722064d5d88f5eb63d37482337", "score": "0.5872582", "text": "def pretty_print(self):", "title": "" }, { "docid": "6e5ade74785a1c139bbffebe7ef874a4", "score": "0.58644253", "text": "def add_command_formatting(self, command):\n ...", "title": "" }, { "docid": "6e5ade74785a1c139bbffebe7ef874a4", "score": "0.58644253", "text": "def add_command_formatting(self, command):\n ...", "title": "" }, { "docid": "7dd4442ec945e32c18fe68e0ff4e8e58", "score": "0.58452755", "text": "def __str__(self):\n return \"[%s]\" % (self._str_base())", "title": "" }, { "docid": "d8ddb2f79a2618f6057e5a4dffd339db", "score": "0.58440286", "text": "def cmd_template(self):\n return self._fmt", "title": "" }, { "docid": "d8ddb2f79a2618f6057e5a4dffd339db", "score": "0.58440286", "text": "def cmd_template(self):\n return self._fmt", "title": "" }, { "docid": "a81d65142fc3ca1673e47e0a02f1f355", "score": "0.5832257", "text": "def _format_str(self, full=True):\n if full:\n format_dimers = self._format_dimers\n format_hairpins = self._format_hairpins\n else:\n format_dimers = self._format_dimers_short\n format_hairpins = self._format_hairpins_short\n string = '\\n'\n if self._seq2:\n #string += hr(' %s vs %s cross-dimers ' % (self._seq_rec1.id, self._seq_rec2.id), symbol='=')\n string += format_dimers(self._cross_dimers, self._seq1, self._seq2)\n string += ' '\n else:\n #string += hr(' %s: %s ' % (self._seq_rec1.id, str(self._seq1)), symbol='=')\n #string += hr(' self-dimers ')\n string += format_dimers(self._seq1_dimers, self._seq1, self._seq1)\n string += ' '\n #string += hr(' hairpins ')\n string += format_hairpins(self._seq1_hairpins, self._seq1)\n string += ' '\n #string += hr('',symbol='=')\n return string", "title": "" }, { "docid": "ab480b9447c69d75e76a0980a962677f", "score": "0.58222103", "text": "def safe_format(\n self,\n format_string,\n param_dict=None,\n force_composite=False,\n attr_getter=None,\n max_width=None,\n ):\n try:\n result = self._formatter.format(\n format_string,\n self._py3status_module,\n param_dict,\n force_composite=force_composite,\n attr_getter=attr_getter,\n )\n if max_width is not None and max_width > 0:\n if isinstance(result, str):\n result = result[:max_width]\n elif isinstance(result, Composite):\n chars_left = max_width\n for composite in result:\n if \"index\" in composite:\n continue\n composite[\"full_text\"] = composite[\"full_text\"][:chars_left]\n chars_left -= len(composite[\"full_text\"])\n chars_left = max(0, chars_left)\n return result\n except Exception as err:\n self._report_exception(f\"Invalid format `{format_string}` ({err})\")\n return f\"invalid format ({err})\"", "title": "" }, { "docid": "a57273bd9188e782a59c2343b24b73c4", "score": "0.5809264", "text": "def format(self, extension=None):\n raise NotImplementedError", "title": "" }, { "docid": "07f5ea63bbbd58e3400ccfa63bbe35ba", "score": "0.58062136", "text": "def _format_intermediary_56A(self, val):\n return \"\"", "title": "" }, { "docid": "5bd85f81f7ad4b9fd24d838e913be0d5", "score": "0.5800826", "text": "def GetFormat(self, p_int, p_int_1, bool):\n ...", "title": "" }, { "docid": "f6e29139c1027c41ab242549776fe4ed", "score": "0.57953054", "text": "def sformatf(cls, msg: str, *args) -> str:\n #formats = {\"%t\": \"%d\", \"%0t\": \"%0d\"}\n #for s in formats:\n # msg = msg.replace(s, formats[s])\n #return sformatf(msg, *args)\n # TODO substitute old types %s/%d etc with {}\n #new_msg = cls.STR_RE.sub(r'{:\\1}', msg)\n #print(\"new_msg is \" + new_msg)\n for s in cls.formats:\n if s == \"%h\" or s == \"%0h\":\n msg = msg.replace(s, \"{:X}\")\n else:\n msg = msg.replace(s, \"{}\")\n return msg.format(*args)", "title": "" }, { "docid": "091b40b9e40f7467db18da9f2e9f28da", "score": "0.5794394", "text": "def lazy_format(format_this, *args, **kwargs):\n\n if args:\n for idx, val in enumerate(args):\n format_this = format_this.replace(\n \"{\" + str(idx) + \"}\", str(val))\n\n pattern = re.compile(\".*?(\\{.*?\\}).*?\")\n formatted = format_this\n to_replace = [item for item in re.findall(pattern, format_this)]\n for item in to_replace:\n if kwargs.get(item[1:-1]) is not None:\n formatted = formatted.replace(item, str(kwargs.get(item[1:-1])))\n return formatted", "title": "" }, { "docid": "e94c588f97c09519b95ef8786ec24957", "score": "0.57910514", "text": "def reformat():\n toolkit.reformat()", "title": "" }, { "docid": "b61fc12cb3cd833e8f42b1cc2df5f3d0", "score": "0.5783297", "text": "def get_format(self):\n return self.console_format.replace('{T}', self.now())", "title": "" }, { "docid": "f4df1eb0f65975c562bfd0634cd4e82b", "score": "0.5776552", "text": "def formatValue(self, value):\n value = self.parseValue(value)\n # format can be a string or a function\n if issubclass(type(self.format), str):\n return self.format%(value)\n else:\n return self.format(value)", "title": "" }, { "docid": "e7d16f4bcbce9ea7106eebc33eefaa58", "score": "0.57667994", "text": "def test_str_format(self):\n my_obj = Place()\n str_format = \"[Place] ({}) {}\".format(my_obj.id, my_obj.__dict__)\n self.assertEqual(str_format, str(my_obj))", "title": "" }, { "docid": "35481b3066d54c580379783a514b2fa0", "score": "0.5762757", "text": "def __str__(self):\n formatted = super().__str__()\n return formatted", "title": "" }, { "docid": "f6c2dfe0671f1b20ef6fe0d583385e59", "score": "0.57604885", "text": "def format(self, *args, **kwargs):\r\n if args:\r\n kwargs.update(dict((str(i), value)\r\n for (i, value) in enumerate(args)))\r\n # Encode arguments to ASCII, if format string is bytes\r\n want_bytes = isinstance(self._string, str)\r\n params = {}\r\n for name, items in self._kwords.items():\r\n value = kwargs[name]\r\n for item in items:\r\n parts, conv, spec = item\r\n params[str(id(item))] = _format_field(value, parts, conv, spec,\r\n want_bytes)\r\n for name, items in self._nested.items():\r\n value = kwargs[name]\r\n for item in items:\r\n parts, conv, spec = item\r\n spec = spec % params\r\n params[str(id(item))] = _format_field(value, parts, conv, spec,\r\n want_bytes)\r\n return self._string % params", "title": "" }, { "docid": "fec5e72c6bc6db55e0a987c7a45324f6", "score": "0.57514566", "text": "def _vformat(self, format_string, args, kwargs, used_args,\n auto_arg_index=0):\n tokens = self.parse(format_string)\n\n for literal_text, field_name, format_spec, conversion in tokens:\n # Output the literal text\n if literal_text:\n self.file.write(literal_text)\n self.file.flush()\n\n # If there's a field, output it\n if field_name is not None:\n # This is some markup, find the object and do the formatting.\n # Handle arg indexing when empty field_names are given\n if field_name == '':\n if auto_arg_index is False:\n raise ValueError(\n 'cannot switch from manual field specification to '\n 'automatic field numbering'\n )\n field_name = str(auto_arg_index)\n auto_arg_index += 1\n elif field_name.isdigit():\n if auto_arg_index:\n raise ValueError(\n 'cannot switch from manual field specification to '\n 'automatic field numbering'\n )\n # Disable auto arg incrementing, if it gets used later on,\n # then an exception will be raised\n auto_arg_index = False\n\n # Given the field_name, find the object it references and the\n # argument it came from\n obj, arg_used = self.get_field(field_name, args, kwargs)\n used_args.add(arg_used)\n\n # Do any conversion on the resulting object\n obj = self.convert_field(obj, conversion)\n\n # Format the object and append to the result\n self.file.write(self.format_field(obj, format_spec))", "title": "" }, { "docid": "58d3ef1f5cc66a5d1cdfe1bbcde5aee2", "score": "0.5746639", "text": "def format_data(self, data):\n pass", "title": "" }, { "docid": "e2107d8669c2cca6920ec9e14fe30bf3", "score": "0.57326734", "text": "def format(self):\n\t\treturn '%s.%s.%s/%s-%s' % (self.cnpj[0:2], self.cnpj[2:5],\n\t\t\tself.cnpj[5:8], self.cnpj[8:12], self.cnpj[12:14])", "title": "" }, { "docid": "a3455119e05573dbb4d9fb9d44fad37a", "score": "0.5723483", "text": "def formatted_string(self, silent_if_no_results=False, *args, **kwargs) -> str:\n pass", "title": "" }, { "docid": "d4c5642af527a4ef3334f5297afe07b5", "score": "0.57230264", "text": "def __str__( self, ):\n a_str = f\">>>>>>>>>>* parameters (some) *<<<<<<<<<<<<\"\n a_str = f\"{a_str}\\n mode {self.mode}\"\n\n a_str = f\"{a_str}\\n logger_id {self.logger_id}\"\n a_str = f\"{a_str}\\n logging_level {self.logging_level}\"\n a_str = f\"{a_str}\\n pylogging_fn {self.pylogging_fn}\"\n\n a_str = f\"{a_str}\\n snippets_fn {self.snippets_fn}\"\n a_str = f\"{a_str}\\n snippets_sort {self.snippets_sort}\"\n\n a_str = f\"{a_str}\\n snip_file_fn {self.snip_file_fn}\"\n a_str = f\"{a_str}\\n snip_file_sort {self.snip_file_sort}\"\n a_str = f\"{a_str}\\n snip_file_command {self.snip_file_command}\"\n\n\n a_str = f\"{a_str}\\n snip_editor {self.snip_editor}\"\n a_str = f\"{a_str}\\n scratch_bat {self.scratch_bat}\"\n a_str = f\"{a_str}\\n scratch_py {self.scratch_py}\"\n a_str = f\"{a_str}\\n run_py {self.run_py}\"\n a_str = f\"{a_str}\\n ex_editor {self.ex_editor}\"\n\n a_str = f\"{a_str}\\n scratch_bat {self.scratch_bat}\"\n a_str = f\"{a_str}\\n scratch_py {self.scratch_py}\"\n\n a_str = f\"{a_str}\\n win_geometry {self.win_geometry}\"\n a_str = f\"{a_str}\\n computername {self.computername}\"\n a_str = f\"{a_str}\\n our_os {self.our_os}\"\n a_str = f\"{a_str}\\n and so much more... \\n\\n\"\n return a_str", "title": "" }, { "docid": "51dfba54f86229f9b56ade2254ecbded", "score": "0.5715133", "text": "def GetInternalFormat(self, p_int, p_int_1, bool):\n ...", "title": "" }, { "docid": "4f329e1daa97db8de35c13ce21854ade", "score": "0.5707691", "text": "def get_format_string(self, width, precision=None):\n format_string = \"{:\" + str(width)\n if precision is not None:\n format_string += \".\" + str(precision) + \"f} | \"\n else:\n format_string += \"} | \"\n return format_string", "title": "" }, { "docid": "db60dfbd4819562ee1a5568baa15d4ba", "score": "0.5694981", "text": "def __str__(self):\n if self.is_valid:\n valid = ''\n else:\n valid = ' -> Invalid!'\n return '> %s | Mean: %d | Var: %f | Min: %d | Max: %d%s' % \\\n (self.Name, self.get_mean, self.get_var, self.get_min, self.get_max, valid)", "title": "" }, { "docid": "7b9c628feb9d4858a0282d05753fb44d", "score": "0.5667217", "text": "def _FormatAttribute(self, name, value):\r\n if name in ('json', 'first_exception', 'last_exception'):\r\n return '<pre>%s</pre>' % self._XEscape(value)\r\n elif name == 'backoff':\r\n if value < time.time():\r\n return '<i>Expired</i>'\r\n else:\r\n return self._FormatTimestamp(value)\r\n else:\r\n return FmtDefault._FormatAttribute(self, name, value)", "title": "" }, { "docid": "96020758207d6ae6979837d29c6408c0", "score": "0.56608564", "text": "def get_string(self):\n return \"%s %s %s\\n\" % (sanitize_name(self.name), self.time, self.value)", "title": "" }, { "docid": "e6793ad78bb8da97f6247f981ad14c28", "score": "0.56601846", "text": "def format_info (cls, args):\n def shorten (s, length=100):\n s = str(s)\n if len(s) > length: s = s[:length] + \"...\"\n return s\n return shorten(str(args))", "title": "" }, { "docid": "7fdfc5788b0024b8c97883c668520aca", "score": "0.5649179", "text": "def format(self):\n chrom, start, end, strand = self.id.split(\"|\")\n return \"{}\\t{}\\t{}\\t{}\\t{i.type}\\t{i.subtype}\\t{i.confidence}\" \\\n \"\\t{i.AT_AC_U12_d}\\t{i.GT_AG_U12_d}\\t{i.GT_AG_U2_d}\" \\\n \"\\t{i.GC_AG_U2_d}\\t{i.AT_AC_U12_b}\\t{i.GT_AG_U12_b}\\n\".format(chrom, start, end, strand, i=self)", "title": "" }, { "docid": "89b0bd5ee40f7c5ef7155462df1f056d", "score": "0.5648649", "text": "def format_usage(self, ctx, formatter):\n PrettyHelper.format_usage(self, ctx, formatter)", "title": "" }, { "docid": "81ae469e69d0479527760d3370318850", "score": "0.5646507", "text": "def listFormatters(self): #$NON-NLS-1$\r", "title": "" }, { "docid": "51a8b1f1945e586dca745a28e699be46", "score": "0.5640508", "text": "def test_with_format(self):\n to_print = [\n bitmath.Byte(101),\n bitmath.KiB(202),\n bitmath.MB(303),\n bitmath.GiB(404),\n bitmath.TB(505),\n bitmath.PiB(606),\n bitmath.EB(707)\n ]\n\n str_reps = [\n \"101.00-Byte\",\n \"202.00-KiB\",\n \"303.00-MB\",\n \"404.00-GiB\",\n \"505.00-TB\",\n \"606.00-PiB\",\n \"707.00-EB\"\n ]\n\n # Make sure formatting looks right BEFORE the context manager\n self.assertEqual(str(bitmath.KiB(1.337)), \"1.337 KiB\")\n\n with bitmath.format(\"{value:.2f}-{unit}\"):\n for (inst, inst_str) in zip(to_print, str_reps):\n self.assertEqual(str(inst), inst_str)\n\n # Make sure formatting looks right AFTER the context manager\n self.assertEqual(str(bitmath.KiB(1.337)), \"1.337 KiB\")", "title": "" }, { "docid": "aaf64b7ce90790c5bad894ed7748a32b", "score": "0.56395894", "text": "def _format_data(self, data):\n return data", "title": "" }, { "docid": "ba8017f07b9d23f41bac246bd61c1eac", "score": "0.56196696", "text": "def __str__(self):\n eer = self.get_eer()\n if self.percent:\n mult = 100\n symbol = '%'\n else:\n mult = 1\n symbol = ''\n fmtstr = f' | {self.name}: {round(eer*mult, self.round_digits)}{symbol} | Num. Elements: {len(self.y_true)}'\n return fmtstr", "title": "" }, { "docid": "b5ae68ae5231c6e7b66cbab722432e12", "score": "0.56172365", "text": "def format(self, tree, name):\n pass", "title": "" }, { "docid": "15de66ef1ad63bbbd0deeb3e82ea1897", "score": "0.5611081", "text": "def format_assigner():\n\tfile_extension = file_extension_reader()\n\t# If the file extension is .csv or .txt, keep a distinction between the extension and the format (tabular).\n\tif file_extension in tabular_exts_and_seps.viewkeys():\n\t\tformat = generic_tabular_format\n\telse:\n\t\tformat = file_extension\n\tprint fa_info_str.format(f_info[1], format)\n\treturn format", "title": "" }, { "docid": "41a5fd7bb57a4d63a301454ec5dfc3af", "score": "0.5608604", "text": "def _format(self):\n pre = 'set format {} \"{}\"\\n'\n if isinstance(self.exp_format, str):\n val = self.exp_format\n elif self.canvas:\n val = self.canvas._format(self.axis)\n else:\n val = rcParams[self.axis + \"format\"]\n return pre.format(self.axis, val)", "title": "" }, { "docid": "ae4c41f16df083218fb6147f7bf3485f", "score": "0.5603345", "text": "def __str__(self): \n return f'A {self.data_format} processor for Cartographic Boundary Files'", "title": "" }, { "docid": "bdfbb4a5acf64909d93ad84544c67bf2", "score": "0.55930483", "text": "def get_formatter(self, **kwargs):\n config = dict([\n (attr, getattr(self, attr))\n for attr in [\n \"include_sign\",\n \"group_with_commas\",\n \"num_decimal_places\",\n ]\n ])\n config.update(kwargs)\n return \"\".join([\n \"{\",\n config.get(\"field_name\", \"\"),\n \":\",\n \"+\" if config[\"include_sign\"] else \"\",\n \",\" if config[\"group_with_commas\"] else \"\",\n \".\", str(config[\"num_decimal_places\"]), \"f\",\n \"}\",\n ])", "title": "" }, { "docid": "2928aeb9e62fba2bdce1dcc593fd86e2", "score": "0.55842286", "text": "def alt_format(cls, data, formatstr):\n pieces = []\n for i, piece in enumerate(re_alt.split(str(formatstr))):\n if i % 2:\n pieces.append(str(getattr(cls, piece)(data)))\n elif piece:\n pieces.append(piece)\n return ''.join(pieces)", "title": "" }, { "docid": "f1dd18eae7b74d9a90eca8ac3b72d340", "score": "0.5579368", "text": "def get_info(self):\n\n info = \"\"\"Resource: %s\n\nDatatype properties: \n%s\n\nObject properties: \n%s\n\n\"\"\" % (self.name, self.get_info_dp(), self.get_info_op())\n\n # return\n return info", "title": "" }, { "docid": "19736dd1e110804e7a4fc7c1013d115d", "score": "0.5579052", "text": "def __str__(self):\n return \"{}\\nyear: {}\\n\".format(self.__class__, self.year) \\\n + \"area [km2]: {:.2f}\\n\".format(self.area_m2 / 1e6) \\\n + \"volume [km3]: {:.3f}\\n\".format(self.volume_m3 / 1e9) \\\n + \"length [km]: {:.2f}\\n\".format(self.length_m / 1e3) \\\n + \"min elev [m asl.]: {:.0f}\\n\".format(self.min_hgt) \\\n + \"spec mb [mm w.e. yr-1]: {:.2f}\".format(self.spec_mb)", "title": "" }, { "docid": "f8b7b480b196b58b609c3a10bf977c2e", "score": "0.55771255", "text": "def traceformat(self) :\n\t\ttry :\n\t\t\treturn self._traceformat\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "1346fd430a2a300b49ee018973b25b55", "score": "0.5575903", "text": "def __str__(self):\n obj_str = \"Limit args for: {:s} \\n\".format(self.job_id)\n obj_str += 'signal: {:s}\\n'.format(self.kwargs['sig_name'])\n obj_str += 'signal region: {:s}\\n'.format(self.sig_reg_name)\n obj_str += 'mass cut: {:.1f}\\n'.format(self.kwargs['mass_cut'])\n obj_str += 'registered processes: \\n'\n for process in list(self.kwargs['process_configs'].keys()):\n obj_str += '\\t{:s}\\n'.format(process)\n for sig_reg, yld in list(self.kwargs['sig_yield'].items()):\n obj_str += 'SR {:s} yield: {:.2f} +- {:.2f}\\n'.format(sig_reg, *yld)\n\n if 'sr_syst' in self.kwargs:\n for sig_reg in list(self.kwargs['sr_syst'].keys()):\n obj_str += 'SR systematics for {:s}: \\n'.format(sig_reg)\n for process in list(self.kwargs['sr_syst'][sig_reg].keys()):\n obj_str += 'Process: {:s}\\n'.format(process)\n for name, unc in list(self.kwargs['sr_syst'][sig_reg][process].items()):\n obj_str += '\\t{:s}\\t\\t{:.2f}\\n'.format(name, unc)\n obj_str += '\\n'\n for sig_reg in list(self.kwargs['bkg_yields'].keys()):\n obj_str += 'Bkg yields in {:s}: \\n'.format(sig_reg)\n for process, ylds in list(self.kwargs['bkg_yields'][sig_reg].items()):\n obj_str += '\\t{:s}\\t\\t{:.2f} +- {:.2f}\\n'.format(process, *ylds)\n if self.kwargs['ctrl_syst'] is not None:\n obj_str += 'CR systematics: \\n'\n for region in list(self.kwargs['ctrl_syst'].keys()):\n obj_str += 'CR: {:s}\\n'.format(region)\n for process in list(self.kwargs['ctrl_syst'][region].keys()):\n obj_str += 'Process: {:s}\\n'.format(process)\n for name, unc in list(self.kwargs['ctrl_syst'][region][process].items()):\n obj_str += '\\t{:s}\\t\\t{:.2f}\\n'.format(name, unc)\n obj_str += '\\n'\n return obj_str", "title": "" }, { "docid": "b385c28538490527a3d75a11450ec0c7", "score": "0.557506", "text": "def _format_common(schema: Dict) -> None:\n if \"description\" in schema:\n formatted.append(\" \"*4 + \"Description: {}\".format(schema[\"description\"]))\n if \"type\" in schema:\n formatted.append(\" \"*4 + \"Type: {}\".format(str(schema[\"type\"])))\n if \"properties\" in schema:\n formatted.append(\" \"*4 + \"Properties:\")\n for property_name, property_value in schema[\"properties\"].items():\n formatted.append(\" \"*8 + \"- \" + property_name + \":\")\n for key, value in property_value.items():\n formatted.append(\" \"*12 + \"{}: {}\".format(sentence_case(key), str(value)))\n formatted.append(\" \"*12 + \"Required: \" +\n str(property_name in schema.get(\"required\", [])))", "title": "" }, { "docid": "65c5ddaff05db96be340211d36ac8a0f", "score": "0.5575002", "text": "def format(self, record):\n if record.levelname == 'DEBUG':\n # This function is quite a ways away from the calling function due to the custom logging module\n func = inspect.currentframe().f_back.f_back.f_back.f_back.f_back.f_back.f_back.f_back.f_back.f_back.f_code\n format = '[DEBUG] {message} ({name} in {file}:{line})'.format(message=record.getMessage(),\n name=func.co_name,\n file=func.co_filename,\n line=func.co_firstlineno)\n return format\n elif record.levelname == 'INFO':\n return '{message}'.format(message=record.getMessage())\n elif record.levelname == 'PLUS':\n prefix = '{color}[+]'.format(color=Fore.GREEN)\n elif record.levelname == 'ASTERISK':\n prefix = '[*]'\n elif record.levelname == 'MINUS':\n prefix = '{style}{color}[-]'.format(color=Fore.YELLOW,\n style=Style.DIM)\n elif record.levelname == 'BANG':\n prefix = '{color}[!]'.format(color=Fore.YELLOW)\n elif record.levelname == 'WARNING':\n prefix = '{color}{background}{style}[W]'.format(color=Fore.RED, \n background=Back.YELLOW,\n style=Style.NORMAL)\n elif record.levelname == 'ERROR':\n prefix = '{color}{background}{style}[E]'.format(color=Fore.YELLOW, \n background=Back.RED,\n style=Style.NORMAL)\n else:\n prefix = ''\n\n format = '{prefix} {message}{reset_color}'.format(prefix=prefix,\n message=record.getMessage(),\n reset_color=Style.RESET_ALL)\n\n # handle an exception\n if record.exc_info:\n record.exc_text = self.formatException(record.exc_info)\n format += '{color}{exc_text}{reset_color}'.format(color=Fore.RED + Back.YELLOW,\n exc_text=record.exc_text,\n reset_color=Style.RESET_ALL)\n\n return format", "title": "" }, { "docid": "e02127441ad80224d9ef12cd0549c3b5", "score": "0.5573769", "text": "def pretty(self):\n return f\"({self.area_code})-{self.exchange_code}-{self.subscriber}\"", "title": "" }, { "docid": "5fa323372f0e9e8984be4bfe48935a04", "score": "0.5565059", "text": "def format(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"format\")", "title": "" }, { "docid": "5fa323372f0e9e8984be4bfe48935a04", "score": "0.5565059", "text": "def format(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"format\")", "title": "" }, { "docid": "59c6b2d37bb9f8b05c1fe52f2e031910", "score": "0.5556288", "text": "def formatted(self):\n return self._formatted", "title": "" }, { "docid": "e253f9103aeb1d195b339e6c29c15328", "score": "0.5552304", "text": "def __str__(self):\n def render(string):\n if string is None:\n return \"\"\n else:\n return string\n #\n indent_str = self.getIndentStr()\n stg = \"%s%s: %s\\n\" % (indent_str, self.name, _prune(render(self.dtype)))\n if self.doc is not None:\n stg += \"%s %s\\n\" % (indent_str, render(self.doc))\n if self.default is not None: \n stg += \"%s default = %s\\n\" % (indent_str, str(self.default))\n return stg", "title": "" }, { "docid": "1ee71bc6da3499015fa7bd1185f26f03", "score": "0.5551162", "text": "def add_subcommand_formatting(self, command):\n ...", "title": "" }, { "docid": "dfca714b99252f1d55cefa6cf29de4bc", "score": "0.55497426", "text": "def return_label():\n label =\"{:18} | {:>12} | {:>9} | {:>12}\\n\".format('Donor Name','Total Given','Num Gifts','Average Gift')\n label += \"-\"*len(label)\n return label", "title": "" }, { "docid": "66d97deabc60f23ec5acfc68366dd566", "score": "0.5536083", "text": "def formatOutput(self,attr):\n signature = ''\n _vars = vars(attr)\n #print _vars\n for k in _vars:\n #print \"k: \", k\n #print \"k test: \", _vars[k] is not None\n if _vars[k] is not None:\n if isinstance(_vars[k],list):\n for data in _vars[k]:\n signature = '%s\\t%s %s\\n' % (signature,k.replace('_','-'),\\\n data)\n else:\n signature = '%s\\t%s %s\\n' % \\\n (signature,k.replace('_','-'),_vars[k])\n return signature", "title": "" }, { "docid": "9a75f4f892e1e0a09f79c1deb513762b", "score": "0.5532108", "text": "def __repr__(self):\n return self.format()", "title": "" }, { "docid": "9a75f4f892e1e0a09f79c1deb513762b", "score": "0.5532108", "text": "def __repr__(self):\n return self.format()", "title": "" }, { "docid": "b896acee8ac48f4feeef4b133a4d4a65", "score": "0.55301565", "text": "def _format(self, msg):\n body = obj2str(msg)\n return body", "title": "" }, { "docid": "40dacc21e6ab806f19288065038e3850", "score": "0.5525929", "text": "def pkg_fmt(status, distribution, package, version, extra=None, message=None):\n fmt = \"{status} ({distribution}): {package} {version}\".format(status=status,\n distribution=distribution,\n package=package,\n version=version)\n if extra:\n fmt += \" [{extra}]\".format(extra=extra)\n if message:\n fmt += \": {message}\".format(message=message)\n return fmt", "title": "" }, { "docid": "3325a7d233ec84d40dfd5c9539df490e", "score": "0.5519201", "text": "def test_format_specifiers(format_specifier):\n try:\n exec('\"{:' + format_specifier + '}\".format(0)')\n except ValueError as e:\n if 'Unknown format code' not in str(e):\n raise", "title": "" }, { "docid": "71b49d7c60dbf8cf55b69a97f22255b0", "score": "0.5512288", "text": "def __str__(self):\n return \"\"\"\\\nMatcher: {!r}\nGold: {} relations\nActual: {} relations\nRecall: {:.2f}%\nPrecision: {:.2f}%\nF1: {:.2f}%\"\"\".format(self.matcher, len(self._gold), len(self._actual), 100*self.recall, 100*self.precision, 100*self.F1)", "title": "" }, { "docid": "61dd1ca956733d3ba25b5b029d9f4003", "score": "0.551133", "text": "def _formatValue(self, value, format: str) -> str:\n if format is not None and format not in self.FORMATS_DEF:\n raise ValueError(\"invalid format\")\n if format == self.FORMAT_ISK:\n txt = self._format_number(value)\n elif format == self.FORMAT_NUMBER:\n txt = self._format_number(value)\n elif format == self.FORMAT_PERCENT:\n txt = \"{:.0f}%\".format(value)\n else:\n if isinstance(value, bool):\n txt = \"yes\" if value else \"no\"\n else:\n txt = str(value)\n return txt", "title": "" }, { "docid": "76a8bc6bbf68aeccc4387e5b05f753ab", "score": "0.5508881", "text": "def formatted_value(self):\n value = self.value\n from numpy import nan,isnan\n try: text = eval(self.format_code)\n except Exception,msg:\n warn(\"value: %s with value=%r: %s\\n%s\" % (self.format_code,value,msg,\n traceback.format_exc()))\n text = str(value)\n return text", "title": "" }, { "docid": "5e38f6dd8e154c5a59a99b533ac543b9", "score": "0.5508113", "text": "def _interpret_format(format_str: str, company_email: str):\n # format should be 'first_inital last' for example\n company = company_email.find(\"@\")\n company_email = company_email[company: len(company_email)]\n email_format = format_str + \" \" + company_email\n return email_format", "title": "" }, { "docid": "a0705be2ff97ab67fa8210319b3758e8", "score": "0.55030006", "text": "def format(self, record):\n extra = self.get_extra(record)\n if extra:\n extras = [f'{k}=\"{str(v)}\"' for k, v in extra.items()]\n record.msg = f\"{record.msg} [{', '.join(extras)}]\"\n return super().format(record)", "title": "" }, { "docid": "263a1898842c7329048ad30b7970d63b", "score": "0.54990196", "text": "def test_format_just(self):\n testformats = {\n '{:<10}': {\n 'name': 'Left justify',\n 'expected': '\\x1b[31mTest\\x1b[0m ',\n },\n '{:>10}': {\n 'name': 'Right justify',\n 'expected': ' \\x1b[31mTest\\x1b[0m',\n },\n '{:^10}': {\n 'name': 'Center justify',\n 'expected': ' \\x1b[31mTest\\x1b[0m ',\n },\n '{:X<10}': {\n 'name': 'Left custom char justify',\n 'expected': '\\x1b[31mTest\\x1b[0mXXXXXX',\n },\n '{:X>10}': {\n 'name': 'Right custom char justify',\n 'expected': 'XXXXXX\\x1b[31mTest\\x1b[0m',\n },\n '{:X^10}': {\n 'name': 'Center custom char justify',\n 'expected': 'XXX\\x1b[31mTest\\x1b[0mXXX',\n },\n # Colr nevers sees these formats, python takes care of it.\n # Still, I want to make sure there is never a regression.\n '{:<{w}}': {\n 'name': 'Left dynamic justify',\n 'kwargs': {'w': 10},\n 'expected': '\\x1b[31mTest\\x1b[0m ',\n },\n '{:>{w}}': {\n 'name': 'Right dynamic justify',\n 'kwargs': {'w': 10},\n 'expected': ' \\x1b[31mTest\\x1b[0m',\n },\n '{:^{w}}': {\n 'name': 'Center dynamic justify',\n 'kwargs': {'w': 10},\n 'expected': ' \\x1b[31mTest\\x1b[0m ',\n },\n '{:{c}<{w}}': {\n 'name': 'Left dynamic custom char justify',\n 'kwargs': {'c': 'X', 'w': 10},\n 'expected': '\\x1b[31mTest\\x1b[0mXXXXXX',\n },\n '{:{c}>{w}}': {\n 'name': 'Right dynamic custom char justify',\n 'kwargs': {'c': 'X', 'w': 10},\n 'expected': 'XXXXXX\\x1b[31mTest\\x1b[0m',\n },\n '{:{c}^{w}}': {\n 'name': 'Center dynamic custom char justify',\n 'kwargs': {'c': 'X', 'w': 10},\n 'expected': 'XXX\\x1b[31mTest\\x1b[0mXXX',\n },\n # Regular formats handled by str(self.data).__format__\n '{!r}': {\n 'name': 'repr()',\n 'expected': '\\'\\\\x1b[31mTest\\\\x1b[0m\\'',\n },\n '{!s}': {\n 'name': 'str()',\n 'expected': '\\x1b[31mTest\\x1b[0m',\n }\n\n }\n\n for fmt, fmtinfo in testformats.items():\n val = fmt.format(\n Colr('Test', 'red'),\n **(fmtinfo.get('kwargs', {}))\n )\n self.assertCallEqual(\n val,\n fmtinfo['expected'],\n func=Colr.__format__,\n args=[fmt],\n msg='Colr.__format__ failed for valid format.',\n )\n\n # Colr.format should not break this.\n val = Colr('Test {:<10} Out', 'blue').format(Colr('This', 'red'))\n expected = (\n '\\x1b[34mTest \\x1b[31mThis\\x1b[0m Out\\x1b[0m'\n )\n self.assertEqual(\n str(val),\n expected,\n msg='Colr(\\'{}\\').format(Colr()) breaks formatting!',\n )", "title": "" } ]
a2cbb1881b82366cccefb4c3a24b009c
Set up calculators for tests.
[ { "docid": "fc5d7957383a02c9eaf1c51b51513ee9", "score": "0.8050646", "text": "def setUp(self) -> None:\n self.first_calculator = Calculator(60, 170, 17, 'F', 1.9)\n self.second_calculator = Calculator(80, 180, 25, 'M', 1.2)\n self.third_calculator = Calculator(75, 176, 30, 'M', 1.7)\n self.fourth_calculator = Calculator(55, 160, 24, 'F', 1.5)", "title": "" } ]
[ { "docid": "31f3e45912b6f74dc4d35bde00451dcf", "score": "0.6592788", "text": "def setUp(self):\n o, r = loadTestReactor(\n os.path.join(TEST_ROOT, \"detailedAxialExpansion\"),\n customSettings={\"inputHeightsConsideredHot\": True},\n )\n reduceTestReactorRings(r, o.cs, 5)\n\n self.stdAssems = [a for a in r.core.getAssemblies()]\n\n oCold, rCold = loadTestReactor(\n os.path.join(TEST_ROOT, \"detailedAxialExpansion\"),\n customSettings={\"inputHeightsConsideredHot\": False},\n )\n reduceTestReactorRings(rCold, oCold.cs, 5)\n\n self.testAssems = [a for a in rCold.core.getAssemblies()]", "title": "" }, { "docid": "df71de8836febe3a232ab1bab6241196", "score": "0.6305434", "text": "def setUp(self):\n self.decider = Decider(100, 0.5)\n self.actions = {\n 'PUMP_IN': 1,\n 'PUMP_OFF': 0,\n 'PUMP_OUT': -1,\n }", "title": "" }, { "docid": "13766d2d294aba0b7a5c6ad88e73ef49", "score": "0.6295678", "text": "def _make_calculators(self):\n # Create two microsimulation calculators\n gd_base = tc.GrowDiff()\n gf_base = tc.GrowFactors()\n # apply user specified growdiff\n if self.params[\"growdiff_baseline\"]:\n gd_base.update_growdiff(self.params[\"growdiff_baseline\"])\n gd_base.apply_to(gf_base)\n # Baseline calculator\n if self.use_cps:\n records = tc.Records.cps_constructor(data=self.microdata,\n gfactors=gf_base)\n else:\n records = tc.Records(self.microdata, gfactors=gf_base)\n policy = tc.Policy(gf_base)\n if self.params[\"base_policy\"]:\n update_policy(policy, self.params[\"base_policy\"])\n base_calc = tc.Calculator(policy=policy,\n records=records,\n verbose=self.verbose)\n\n # Reform calculator\n # Initialize a policy object\n gd_reform = tc.GrowDiff()\n gf_reform = tc.GrowFactors()\n if self.params[\"growdiff_response\"]:\n gd_reform.update_growdiff(self.params[\"growdiff_response\"])\n gd_reform.apply_to(gf_reform)\n if self.use_cps:\n records = tc.Records.cps_constructor(data=self.microdata,\n gfactors=gf_reform)\n else:\n records = tc.Records(self.microdata, gfactors=gf_reform)\n policy = tc.Policy(gf_reform)\n if self.params[\"base_policy\"]:\n update_policy(policy, self.params[\"base_policy\"])\n update_policy(policy, self.params[\"policy\"])\n\n # Initialize Calculator\n reform_calc = tc.Calculator(policy=policy, records=records,\n verbose=self.verbose)\n # delete all unneeded variables\n del gd_base, gd_reform, records, gf_base, gf_reform, policy\n return base_calc, reform_calc", "title": "" }, { "docid": "bb8891655db0cc9a7f06f3430526f9d6", "score": "0.6221207", "text": "def setUp(self):\n rmgpy_path = os.path.normpath(os.path.join(get_path(), '..'))\n\n qm = QMCalculator(software='gaussian',\n method='pm6',\n fileStore=os.path.join(rmgpy_path, 'testing', 'qm', 'QMfiles'),\n scratchDirectory=os.path.join(rmgpy_path, 'testing', 'qm', 'QMscratch'),\n )\n\n if not os.path.exists(qm.settings.fileStore):\n os.makedirs(qm.settings.fileStore)\n\n self.qmmol1 = GaussianMolPM6(mol1, qm.settings)", "title": "" }, { "docid": "36ce6fab041e30cae773deb3a9cac815", "score": "0.62157387", "text": "def setup(self):\n self.ctx.calc_name = self._calculation_class.__name__\n self.ctx.unexpected_failure = False\n self.ctx.restart_calc = None\n self.ctx.is_finished = False\n self.ctx.iteration = 0", "title": "" }, { "docid": "b92751fc876fc50e341951cd95c4d11d", "score": "0.6119264", "text": "def configure(self):\r\n self.add('gp_fun', GoldsteinPrice())\r\n\r\n conmin = self.add('conmin', CONMINdriver())\r\n conmin.workflow.add('gp_fun')\r\n conmin.add_parameter('gp_fun.x1')\r\n conmin.add_parameter('gp_fun.x2')\r\n conmin.add_objective('gp_fun.f')\r\n\r\n doe = self.add('driver', DOEdriver())\r\n doe.workflow.add('conmin')\r\n doe.add_parameter('gp_fun.x1', low=-1.5, high=1.5, start=1)\r\n doe.add_parameter('gp_fun.x2', low=-1.5, high=1.5, start=1)\r\n doe.DOEgenerator = FullFactorial(5)\r\n doe.add_responses(['gp_fun.f', 'gp_fun.x1', 'gp_fun.x2',\r\n 'gp_fun.exec_count'])\r\n self.recorders = [CSVCaseRecorder(), DumpCaseRecorder()]", "title": "" }, { "docid": "22d77ee7c808acab305a7c31ce332c39", "score": "0.61161965", "text": "def setUp(self):\n self._p_m = ProcesserManager() \n self._p_m.init_class(\"preprocesser\", \"preprocess_time.PreprocessTime\")\n self._p_m.init_class(\"preprocesser\", \"preprocess_trim.PreprocessTrim\")\n self._p_m.init_class(\"join_checker\", \"join_checker_equal.EqualJoinChecker\")", "title": "" }, { "docid": "88ebe912645e25d052eba2687c00f677", "score": "0.6082583", "text": "def setUp(self):\n rmgpy_path = os.path.normpath(os.path.join(get_path(), '..'))\n\n qm = QMCalculator(software='gaussian',\n method='pm3',\n fileStore=os.path.join(rmgpy_path, 'testing', 'qm', 'QMfiles'),\n scratchDirectory=os.path.join(rmgpy_path, 'testing', 'qm', 'QMscratch'),\n )\n\n if not os.path.exists(qm.settings.fileStore):\n os.makedirs(qm.settings.fileStore)\n\n self.qmmol1 = GaussianMolPM3(mol1, qm.settings)", "title": "" }, { "docid": "a6e077260a239948ea6915aa377ac703", "score": "0.6066763", "text": "def setUp(self):\n AxialExpansionTestBase.setUp(self)\n self.common = (\"test\", \"FakeMat\", 25.0, 25.0) # name, material, Tinput, Thot", "title": "" }, { "docid": "8547b082f44a5cf775e3c04d1cbe6d2a", "score": "0.602677", "text": "def setup(self):\n info = {'index': 'mlt'}\n self.testInst = pysat.Instrument('pysat', 'testing',\n clean_level='clean',\n orbit_info=info, update_files=True)", "title": "" }, { "docid": "8547b082f44a5cf775e3c04d1cbe6d2a", "score": "0.602677", "text": "def setup(self):\n info = {'index': 'mlt'}\n self.testInst = pysat.Instrument('pysat', 'testing',\n clean_level='clean',\n orbit_info=info, update_files=True)", "title": "" }, { "docid": "6f80408fd2e61df67778f0437d5d7ce4", "score": "0.60180974", "text": "def setup(self):\n\n # Instrument object and disable mutability\n self.testInst = pysat.Instrument('pysat', 'testing',\n clean_level='clean')\n self.stime = pysat.instruments.pysat_testing._test_dates['']['']\n self.meta = self.testInst.meta\n self.meta.mutable = False\n self.meta_labels = {'units': ('Units', str),\n 'name': ('Long_Name', str)}\n\n # Assign remaining values\n self.dval = None\n self.out = None\n self.default_name = ['long_name']\n self.default_nan = ['fill', 'value_min', 'value_max']\n self.default_val = {'notes': '', 'units': '', 'desc': ''}\n self.frame_list = ['dummy_frame1', 'dummy_frame2']", "title": "" }, { "docid": "4622cdd6ad993aee7c4b20af0deb9da7", "score": "0.60165966", "text": "def setUp(self):\n OrekitPropagator.init_jvm()", "title": "" }, { "docid": "1beb818a54a9f3f27a81283328b1b082", "score": "0.6015987", "text": "def setUp(self):\n complication_element1 = mommy.make('atelier.ComplicationElement', base_price=10, complexity=2, name='Element1')\n complication_element2 = mommy.make('atelier.ComplicationElement', base_price=5, complexity=1, name='Element2')\n client = mommy.make('atelier.Client')\n product = mommy.make('atelier.Product', base_price=100)\n fabric = mommy.make('atelier.Fabric', complexity_factor=2)\n allowance_discount_1 = mommy.make('atelier.AllowanceDiscount', coefficient=1)\n allowance_discount_2 = mommy.make('atelier.AllowanceDiscount', coefficient=2)\n atelier = mommy.make('atelier.Atelier')\n user = mommy.make('User')\n self.order = mommy.make('atelier.Order', complication_elements=[complication_element1, complication_element2],\n atelier=atelier, client=client, product=product, fabric=fabric,\n allowance_discount=[allowance_discount_1, allowance_discount_2], performer=user)", "title": "" }, { "docid": "972f0291389a44a79284738ce6a4dc16", "score": "0.59565043", "text": "def setUp(self):\n super().setUp()\n self.provider_uuid = self.ocp_provider_uuid\n\n self.rates, self.expected_value_rate_mapping = build_rates()\n self.markup = {\"value\": 10, \"unit\": \"percent\"}\n with schema_context(self.schema):\n self.cost_model = CostModel.objects.first()\n self.expected = parse_expected()", "title": "" }, { "docid": "811e6135f19cc78445a1ee243ae09ed3", "score": "0.5953776", "text": "def setUp(self):\n self.atelier = mommy.make('atelier.Atelier')", "title": "" }, { "docid": "c713d0bd565cda957a8a6e515015a910", "score": "0.59462285", "text": "def setUp(self):\n self.minimal_style = mommy.make('atelier.MinimalStyle')\n self.atelier = mommy.make(('atelier.Atelier'))\n self.product = mommy.make(Product, minimal_style=self.minimal_style, atelier=self.atelier)", "title": "" }, { "docid": "178bebebf8532a8cbe57932f012eeaa1", "score": "0.59428376", "text": "def setUp(self):\n self.engine = engine.Engine()\n self.engine.load_empty()\n\n # Set up call tracing to count calls (formula evaluations) for each column for each table.\n self.call_counts = {}\n def trace_call(col_obj, _rec):\n # Ignore formulas in metadata tables for simplicity. Such formulas are mostly private, and\n # it would be annoying to fix tests every time we change them.\n if not col_obj.table_id.startswith(\"_grist_\"):\n tmap = self.call_counts.setdefault(col_obj.table_id, {})\n tmap[col_obj.col_id] = tmap.get(col_obj.col_id, 0) + 1\n self.engine.formula_tracer = trace_call\n\n # This is set when a test case is wrapped by `test_engine.test_undo`.\n self._undo_state_tracker = None", "title": "" }, { "docid": "8892f50662078c3024f0f5390a9d1935", "score": "0.5932759", "text": "def setUp(self):\n pass\n # setup the test as needed\n # e.g. pandas to open Kabam qaqc csv\n # Read qaqc csv and create pandas DataFrames for inputs and expected outputs", "title": "" }, { "docid": "ee5287e6208318cfd74eb7ec59341332", "score": "0.59259707", "text": "def setup(self):\n info = {'index': 'mlt'}\n self.testInst = pysat.Instrument('pysat', 'testing',\n clean_level='clean',\n orbit_info=info, update_files=True)\n self.testInst.custom.add(filter_data, 'modify')", "title": "" }, { "docid": "772ce0c639428a572268506db3e8b639", "score": "0.5924251", "text": "def _initialize_tests(self):\n # Access the sentries for inspecting service units\n self.compute_sentry = self.d.sentry['nova-compute'][0]\n self.rabbitmq_sentry = self.d.sentry['rabbitmq-server'][0]\n self.neutron_api_sentry = self.d.sentry['neutron-api'][0]\n self.n_ovs_sentry = self.d.sentry['neutron-openvswitch'][0]", "title": "" }, { "docid": "2b5e8a93bf14af4fdbb99cbb9da53f2f", "score": "0.59093136", "text": "def setUp(self):\n\n self.h = 1\n self.a = 2\n self.c0 = 3\n self.b = 4\n self.s = 5", "title": "" }, { "docid": "6ecc327268e02629f388ff4fd5a92f89", "score": "0.5908844", "text": "def setUp(self):\n # call parent setUp()adge(self):\n super(TestStockQuant, self).setUp()\n # create referred models used in test cases\n # self.stock_quant = self.env['stock.quant']\n self.product_auto = FlspProductAutomation(self)\n self.purchase_auto = FlspPurchaseAutomation(self) \n self.stock_auto = FlspStockAutomation(self)", "title": "" }, { "docid": "373e985b6df10435b237df53ef720413", "score": "0.5907595", "text": "def setUp(self):\n print('In setUp()')\n\n # dataframes used in plotting\n df_noaa = self.df_noaa\n df_berkeley = self.db_berkeley\n df_worldbank = self.df_wb # using the cached dataframe\n df_co2 = self.df_co2\n # Define the testing unit\n\n # plot_each_absolute_temperature function data\n (self.ydata, self.xdata) = plot_each_absolute_temperature(df_noaa,\n df_berkeley, df_worldbank, do_plot=False,\n fig_num=None)\n\n # plot_co2_against_temperature function data\n (self.ydata_wco2, self.xdata_wco2) = plot_co2_against_temperature(\n df_co2, df_noaa,\n df_berkeley, df_worldbank, do_plot=False,\n fig_num=0)\n\n # plot_each_temperature function data\n (self.ydata_multplt, self.xdata_multplt) = plot_each_temperature(\n df_noaa, df_berkeley,\n df_worldbank, do_plot=False)", "title": "" }, { "docid": "5e6580d91162fea4fe78c662312dacf8", "score": "0.590008", "text": "def setup(self):\n info = {'index': 'mlt'}\n self.testInst = pysat.Instrument('pysat', 'testing_xarray',\n clean_level='clean',\n orbit_info=info, update_files=True)", "title": "" }, { "docid": "b6f249da33ccde61f193eb41a71313ff", "score": "0.5898343", "text": "def setUp(self):\n super().setUp()\n self.slo = baker.make_recipe(\"makeReports.sloInReport\", report=self.rpt)\n self.assess = baker.make_recipe(\"makeReports.assessmentVersion\", report=self.rpt, slo=self.slo)\n self.assess2 = baker.make_recipe(\"makeReports.assessmentVersion\",report=self.rpt, slo=self.slo)", "title": "" }, { "docid": "5559999d23b8bee3824b48b518199380", "score": "0.58920586", "text": "def setUp(cls):\n cls.user = User.objects.create_user(\n username='Frodon',\n email='[email protected]',\n password='sam'\n )\n\n cls.address = Address.objects.create(\n street='2 rue Isildur',\n city='Minas Tirith',\n postal_code='80000',\n country='Terre du Milieu'\n )\n\n cls.group = Group.objects.create(\n name=\"La communauté de l'anneau\",\n address=cls.address,\n )\n\n cls.group_member = GroupMember.objects.create(\n user=cls.user, group=cls.group\n )\n\n cls.product1 = Product.objects.create(\n name='Epée',\n user_provider=cls.group_member,\n tenant=cls.group_member,\n group=cls.group,\n )\n\n cls.product2 = Product.objects.create(\n name='Dague',\n group_provider=cls.group,\n tenant=cls.group_member,\n group=cls.group,\n )\n\n cls.estimation1 = Estimation(\n cost=20,\n group_member=cls.group_member,\n product=cls.product1\n )\n\n cls.estimation2 = Estimation(\n cost=20,\n group_member=cls.group_member,\n product=cls.product2\n )", "title": "" }, { "docid": "2618156062632f464250172356a30902", "score": "0.588978", "text": "def setUp(self):\n self.decider = Decider(100, 0.05)\n self.pump = Pump('127.0.0.1', 8000)\n self.sensor = Sensor('127.0.0.1', 8000)\n self.controller = Controller(self.sensor, self.pump, self.decider)", "title": "" }, { "docid": "9c3414fc89a5289bb246d417158c8605", "score": "0.5878835", "text": "def _initCalculation(self):\n pass", "title": "" }, { "docid": "2c7f0a57492a8cbaaa81de3ad6399e80", "score": "0.5867738", "text": "def setUp(self):\n # skip tests if scipy not installed\n if not HAS_SCIPY:\n raise SkipTest(\"scipy needed to test matlab conversion\")", "title": "" }, { "docid": "aa4c7249f63cd7007a4dc4ac60993807", "score": "0.5852029", "text": "def setUp(self):\n self.options = Options()\n fitting_problem = FittingProblem(self.options)\n fitting_problem.function = lambda x, p1: x + p1\n self.x_val = np.array([1.0, 8.0, 11.0])\n self.y_val = np.array([6.0, 10.0, 20.0])\n self.e_val = np.array([2.0, 4.0, 1.0])\n fitting_problem.data_x = self.x_val\n fitting_problem.data_y = self.y_val\n fitting_problem.data_e = self.e_val\n self.cost_function = WeightedNLLSCostFunc(fitting_problem)", "title": "" }, { "docid": "04ce2d6789efaf2142fc6825f5d819ac", "score": "0.58440536", "text": "def setup(self):\n self.testInst = pysat.Instrument('pysat', 'testing')\n self.stime = pysat.instruments.pysat_testing._test_dates['']['']\n self.meta = self.testInst.meta\n\n self.meta_labels = {'units': ('Units', str),\n 'name': ('Long_Name', str)}\n self.dval = None\n self.out = None\n self.default_name = ['long_name']\n self.default_nan = ['fill', 'value_min', 'value_max']\n self.default_val = {'notes': '', 'units': '', 'desc': ''}\n self.frame_list = ['dummy_frame1', 'dummy_frame2']", "title": "" }, { "docid": "b2918f92341e6ada0f63094799d50934", "score": "0.58280444", "text": "def setUp(self):\n self.minimizers = [\"deriv_free_algorithm\", \"general\"]\n cost_func = make_cost_function(minimizers=self.minimizers)\n problem = cost_func.problem\n self.controller = DummyController(cost_func=cost_func)\n self.options = problem.options\n self.grabbed_output = output_grabber.OutputGrabber(self.options)\n self.controller.parameter_set = 0\n self.count = 0\n self.result = fitbm_result.FittingResult(\n controller=self.controller,\n accuracy=1,\n runtimes=[1])\n self.cp = Checkpoint(self.options)", "title": "" }, { "docid": "88d0f1a9418b673ea697e543426c1a42", "score": "0.5825659", "text": "def setUp(self):\r\n\r\n self.DUT = HFGaAsFET()", "title": "" }, { "docid": "67382b34a82683da3e871757491cbd66", "score": "0.58221847", "text": "def _initialize_tests(self):\n # Access the sentries for inspecting service units\n self.pxc_sentry = self.d.sentry['percona-cluster'][0]\n self.glance_sentry = self.d.sentry['glance'][0]\n self.keystone_sentry = self.d.sentry['keystone'][0]\n self.rabbitmq_sentry = self.d.sentry['rabbitmq-server'][0]\n u.log.debug('openstack release val: {}'.format(\n self._get_openstack_release()))\n u.log.debug('openstack release str: {}'.format(\n self._get_openstack_release_string()))\n\n # Authenticate admin with keystone\n self.keystone = u.authenticate_keystone_admin(self.keystone_sentry,\n user='admin',\n password='openstack',\n tenant='admin')\n # Authenticate admin with glance endpoint\n self.glance = u.authenticate_glance_admin(self.keystone)", "title": "" }, { "docid": "6e108111c66366c147c8340e1a13c8d9", "score": "0.5822046", "text": "def setUp(self):\n if not self.__class__._data_ws:\n self.__class__._data_ws = sapi.LoadNexus(\"ENGINX00228061.nxs\", OutputWorkspace='ENGIN-X_test_ws')\n\n if not self.__class__._van_curves_ws:\n # Note the pre-calculated file instead of the too big vanadium run\n # self.__class__._van_ws = LoadNexus(\"ENGINX00236516.nxs\", OutputWorkspace='ENGIN-X_test_vanadium_ws')\n self.__class__._van_curves_ws = sapi.LoadNexus(Filename=\n 'ENGINX_precalculated_vanadium_run000236516_bank_curves.nxs',\n OutputWorkspace='ENGIN-X_vanadium_curves_test_ws')\n self.__class__._van_integ_tbl = sapi.LoadNexus(Filename=\n 'ENGINX_precalculated_vanadium_run000236516_integration.nxs',\n OutputWorkspace='ENGIN-X_vanadium_integ_test_ws')", "title": "" }, { "docid": "66a0beab74f4699db95a200160486dfc", "score": "0.58204985", "text": "def setUp(self):\n\n self.places = 7 # precision\n\n return", "title": "" }, { "docid": "f4b8e6d63d030dae5228dc54a442714a", "score": "0.58189374", "text": "def setup(self):\n # Load a test instrument\n self.testInst = pysat.Instrument('pysat', 'testing', tag='12',\n clean_level='clean')\n self.testInst.load(2009, 1)", "title": "" }, { "docid": "38839b4787601a72faa7e19fd3f84263", "score": "0.5806655", "text": "def setUp(self):\n super().setUp()\n self.slo = baker.make(\"SLOInReport\", report=self.rpt)\n self.assess = baker.make(\"AssessmentVersion\", report=self.rpt, slo=self.slo)\n self.assess2 = baker.make(\"AssessmentVersion\",report=self.rpt, slo=self.slo)", "title": "" }, { "docid": "38839b4787601a72faa7e19fd3f84263", "score": "0.5806655", "text": "def setUp(self):\n super().setUp()\n self.slo = baker.make(\"SLOInReport\", report=self.rpt)\n self.assess = baker.make(\"AssessmentVersion\", report=self.rpt, slo=self.slo)\n self.assess2 = baker.make(\"AssessmentVersion\",report=self.rpt, slo=self.slo)", "title": "" }, { "docid": "59d0a5cda378139dbc56ef172efa6511", "score": "0.5802624", "text": "def setup(self):\n self.measurer = distance.HaversineDistance()", "title": "" }, { "docid": "99e414b16e995bbcb07ed5ec4adb1c3e", "score": "0.58011484", "text": "def setup(self):\n info = {'index': 'mlt'}\n self.testInst = pysat.Instrument('pysat', 'testing_xarray',\n clean_level='clean',\n orbit_info=info, update_files=True)\n self.testInst.custom.add(filter_data, 'modify')", "title": "" }, { "docid": "9dea8b413c77d8912d07561e13d935bd", "score": "0.57921815", "text": "def setUp(self):\n\t\t\n\t\tself.test_order = [\"ate\", \"eat\", \"tae\", \"tea\", \"eta\"]\n\t\tself.result = [\"ate\", \"eat\", \"eta\", \"tae\", \"tea\"]", "title": "" }, { "docid": "7819336f2fdf5f64c677c4bae81eb917", "score": "0.5791984", "text": "def setUp(self):\n self.weight_init = pc.weight_init_pca()", "title": "" }, { "docid": "01899751b842a0dd97d22c937c32e327", "score": "0.5785554", "text": "def setUp(self) -> None:\n\n super().setUp()\n\n self.finance_inputs = {\n \"discount_rate\": 0.1,\n \"general_o&m\": 500,\n \"misc\": {\"cost\": 0},\n \"bos\": {\"cost\": 200, \"cost_decrease\": 2},\n \"diesel_fuel\": {\"cost\": 0.9, \"cost_decrease\": -1},\n \"grid\": {\"cost\": 0.01, \"extension cost\": 5000, \"infrastructure_cost\": 2000},\n \"households\": {\"connection_cost\": 100},\n \"inverter\": {\n \"cost\": 200,\n \"cost_decrease\": 2,\n \"lifetime\": 4,\n \"size_increment\": 1,\n },\n \"kerosene\": {\"cost\": 0.008},\n \"pv\": {\n \"cost\": 500,\n \"cost_decrease\": 5,\n \"installation_cost\": 100,\n \"installation_cost_decrease\": 0,\n \"o&m\": 5,\n },\n \"diesel_generator\": {\n \"cost\": 200,\n \"installation_cost\": 50,\n \"installation_cost_decrease\": 0,\n \"o&m\": 20,\n \"cost_decrease\": 0,\n },\n \"storage\": {\"cost\": 400, \"cost_decrease\": 5, \"o&m\": 10},\n }\n self.location = mock.Mock(max_years=20)\n self.logger = mock.MagicMock()\n self.yearly_load_statistics = pd.DataFrame(\n {\n \"Maximum\": {\n 0: 4644.0,\n 1: 4577.0,\n 2: 4513.0,\n 3: 5366.0,\n 4: 5968.0,\n 5: 6288.0,\n 6: 7583.0,\n 7: 7806.0,\n 8: 9182.0,\n 9: 9798.0,\n 10: 10184.0,\n 11: 11106.0,\n 12: 11653.0,\n 13: 11907.0,\n 14: 12887.0,\n 15: 13638.0,\n 16: 14121.0,\n 17: 14564.0,\n 18: 15613.0,\n 19: 15102.0,\n },\n \"Mean\": {\n 0: 1231.0,\n 1: 1372.0,\n 2: 1524.0,\n 3: 1791.0,\n 4: 2184.0,\n 5: 2472.0,\n 6: 2946.0,\n 7: 3295.0,\n 8: 3766.0,\n 9: 4147.0,\n 10: 4540.0,\n 11: 4968.0,\n 12: 5319.0,\n 13: 5653.0,\n 14: 6028.0,\n 15: 6274.0,\n 16: 6612.0,\n 17: 6757.0,\n 18: 7129.0,\n 19: 7263.0,\n },\n \"Median\": {\n 0: 1129.0,\n 1: 1245.0,\n 2: 1395.0,\n 3: 1605.0,\n 4: 2022.5,\n 5: 2270.0,\n 6: 2749.5,\n 7: 3065.0,\n 8: 3520.0,\n 9: 3875.0,\n 10: 4258.0,\n 11: 4680.0,\n 12: 5027.5,\n 13: 5335.0,\n 14: 5710.0,\n 15: 5940.5,\n 16: 6289.0,\n 17: 6450.0,\n 18: 6874.5,\n 19: 6945.0,\n },\n }\n )", "title": "" }, { "docid": "41bba6b3ed290e932b29c1f0447e600e", "score": "0.57797956", "text": "def unittest_setup():\n clear_cfg_files(True)\n add_spinnaker_cfg()\n FecDataWriter.mock()", "title": "" }, { "docid": "671dad05709bf2f717b6b4bab6dd406f", "score": "0.577749", "text": "def setup(self):\n super(TestFmfBasics, self).__initialize__()", "title": "" }, { "docid": "4b39729027eab79f471e1cb66df251d0", "score": "0.57749325", "text": "def main():\n run_test_sum_cosines()\n run_test_sum_square_roots()", "title": "" }, { "docid": "931c42fa6e4cc214f9c7a95a6a8b3669", "score": "0.5767593", "text": "def setUp(self) -> None:\n self.base_tf = generate_triples_factory(\n num_entities=50,\n num_relations=9,\n num_triples=500,\n )\n self.training, self.testing, self.validation = self.base_tf.split([0.8, 0.1, 0.1])", "title": "" }, { "docid": "1274b5e885898a7bd9600328f8d7a391", "score": "0.5758203", "text": "def setUp(self):\n self.confusion = Confusion()", "title": "" }, { "docid": "bb12c2ef1abfa866d315af77fb1b55b7", "score": "0.57489103", "text": "def setUp(self):\n config = TestConfiguration()\n self.db_mgr = DatabaseManager(config)\n self.mgr = AccountManager(config)", "title": "" }, { "docid": "c2d7bb138fa52505feb7a5a96cd49a85", "score": "0.5747378", "text": "def setUp(self):\n self.pump = Pump('127.0.0.1', 8000)\n\n self.actions = {'PUMP_IN': self.pump.PUMP_IN,\n 'PUMP_OUT': self.pump.PUMP_OUT,\n 'PUMP_OFF': self.pump.PUMP_OFF,\n }\n\n self.decider = Decider(100, 0.05)", "title": "" }, { "docid": "93d634925ca4cd42ab67364dab3dcb40", "score": "0.57472056", "text": "def tests():", "title": "" }, { "docid": "93d634925ca4cd42ab67364dab3dcb40", "score": "0.57472056", "text": "def tests():", "title": "" }, { "docid": "5538736b1a7de8f272f1abc7a8b85656", "score": "0.57427514", "text": "def setUp(self):\n self.options = Options()\n fitting_problem = FittingProblem(self.options)\n fitting_problem.function = lambda x, p1: x + p1\n self.x_val = np.array([1.0, 8.0, 11.0])\n self.y_val = np.array([6.0, 10.0, 20.0])\n fitting_problem.data_x = self.x_val\n fitting_problem.data_y = self.y_val\n self.cost_function = PoissonCostFunc(fitting_problem)", "title": "" }, { "docid": "a10f6e4736778758994ec0032d1997ae", "score": "0.5740281", "text": "def setUp(self) -> None:\n self.evaluator = EvalEnrichment()\n self.evaluator.load_annotations(GENES2GO_PATH)\n self.davinci_evaluator = EvalEnrichment(model=\"text-davinci-003\")\n self.hgnc = get_adapter(\"sqlite:obo:hgnc\")", "title": "" }, { "docid": "f1913bc63ce0ef21c9c6e56e06c71b80", "score": "0.5737976", "text": "def testBasics(self):\n for addend in (1.1, -3.5):\n for multiplicand in (0.9, -45.0):\n config = AddMultTask.ConfigClass()\n config.add.addend = addend\n config.mult[\"stdMult\"].multiplicand = multiplicand\n # make sure both ways of accessing the registry work and give\n # the same result\n self.assertEqual(config.mult.active.multiplicand, multiplicand)\n addMultTask = AddMultTask(config=config)\n for val in (-1.0, 0.0, 17.5):\n ret = addMultTask.run(val=val)\n self.assertAlmostEqual(ret.val, (val + addend) * multiplicand)", "title": "" }, { "docid": "c82418a210cc72305ed4773726ff21e2", "score": "0.5727556", "text": "def populate(self):\n # Create the default sim\n populate_standard_test()", "title": "" }, { "docid": "4c56bec0367bb7c70c0c1f93e949292f", "score": "0.57265246", "text": "def setUp(self):\r\n\r\n self.DUT = Thermistor()", "title": "" }, { "docid": "aca7fdfb6abc9fbfdb51aa2f5b6ba667", "score": "0.5720477", "text": "def setUp(self):\n self.options = Options()\n fitting_problem = FittingProblem(self.options)\n fitting_problem.function = lambda x, p1: x + p1\n self.x_val = np.array([1.0, 8.0, 11.0])\n self.y_val = np.array([6.0, 10.0, 20.0])\n fitting_problem.data_x = self.x_val\n fitting_problem.data_y = self.y_val\n self.cost_function = HellingerNLLSCostFunc(fitting_problem)", "title": "" }, { "docid": "714f868f28a79d6beca6af2c63c44d52", "score": "0.5717344", "text": "def setUp(self):\n\n self.fl1 = FreqLearner()", "title": "" }, { "docid": "5bf483f12f62fecfbf9e905278edcd40", "score": "0.57153475", "text": "def setUp(self):\n self.options = Options()\n fitting_problem = FittingProblem(self.options)\n fitting_problem.function = lambda x, p1: x + p1\n self.x_val = np.array([1.0, 8.0, 11.0])\n self.y_val = np.array([6.0, 10.0, 20.0])\n fitting_problem.data_x = self.x_val\n fitting_problem.data_y = self.y_val\n self.cost_function = NLLSCostFunc(fitting_problem)", "title": "" }, { "docid": "36206cfe0cf07120fb84d288a1c79d35", "score": "0.57145554", "text": "def setup(self):\n self.meta = pysat.Meta()\n self.testInst = pysat.Instrument('pysat', 'testing',\n clean_level='clean')", "title": "" }, { "docid": "448379a752bb4283292e39d4a8828b24", "score": "0.57134247", "text": "def runTest(self):\n self.setUp()\n self.test_ValveQuantificationMitral()", "title": "" }, { "docid": "7e224b827beed1daad6c40af2f4f9eb3", "score": "0.57114726", "text": "def setUp(self):\n\n self.DUT = Testing()", "title": "" }, { "docid": "500354c2286c889ea2b1c392c7355893", "score": "0.5709872", "text": "def setup(self):\n info = {'index': 'mlt'}\n self.testInst = pysat.Instrument('pysat', 'testing',\n clean_level='clean',\n orbit_info=info)\n times = [[pysat.datetime(2008, 12, 31, 4),\n pysat.datetime(2008, 12, 31, 5, 37)],\n [pysat.datetime(2009, 1, 1),\n pysat.datetime(2009, 1, 1, 1, 37)]\n ]\n for seconds in np.arange(38):\n day = pysat.datetime(2009, 1, 2) + \\\n pds.DateOffset(days=int(seconds))\n times.append([day, day +\n pds.DateOffset(hours=1, minutes=37,\n seconds=int(seconds)) -\n pds.DateOffset(seconds=20)])\n\n self.testInst.custom.add(filter_data2, 'modify', times=times)", "title": "" }, { "docid": "4efc8bb7ebf80be8f786c411de8f73ce", "score": "0.57010704", "text": "def setUp(self):\n self.allowance_discount = mommy.make(AllowanceDiscount)", "title": "" }, { "docid": "ef36637650e5980508bba7f0b3134cb2", "score": "0.5698172", "text": "def setup(self):\n info = {'index': 'orbit_num', 'kind': 'orbit'}\n self.testInst = pysat.Instrument('pysat', 'testing',\n clean_level='clean',\n orbit_info=info)\n self.testInst.custom.add(filter_data, 'modify')", "title": "" }, { "docid": "4adb6aa39be7b8daa022475403e5ae12", "score": "0.5694293", "text": "def setUp(self):\n super().setUp()\n self.start_radius()\n self.start_chewie()", "title": "" }, { "docid": "ae88fd8d2f40896585179df805292a1e", "score": "0.5690836", "text": "def setUp(self):\n\n self.analysis = DBInteractions()", "title": "" }, { "docid": "00c216a4949e0ce568671ef2984903f3", "score": "0.567314", "text": "def setup_method(self):\n op_maker = serving.OpMaker()\n op_seq_maker = serving.OpSeqMaker()\n read_op = op_maker.create(\"general_reader\")\n op_seq_maker.add_op(read_op)\n infer_op_name = \"general_infer\"\n general_infer_op = op_maker.create(infer_op_name)\n op_seq_maker.add_op(general_infer_op)\n general_response_op = op_maker.create(\"general_response\")\n op_seq_maker.add_op(general_response_op)\n\n self.test_server = Server()\n self.test_server.set_op_sequence(op_seq_maker.get_op_sequence())\n self.test_server.load_model_config(self.model_dir)", "title": "" }, { "docid": "6ba5a4a3d163b37d8ccd3436762de314", "score": "0.56702095", "text": "def _initialize_tests(self):\n # Access the sentries for inspecting service units\n self.{{ metadata.package }}_sentry = self.d.sentry['{{ metadata.package }}'][0]\n self.mysql_sentry = self.d.sentry['mysql'][0]\n self.keystone_sentry = self.d.sentry['keystone'][0]\n self.rabbitmq_sentry = self.d.sentry['rabbitmq-server'][0]\n self.{{ metadata.package }}_svcs = {{ all_services }}\n\n # Authenticate admin with keystone endpoint\n self.keystone = u.authenticate_keystone_admin(self.keystone_sentry,\n user='admin',\n password='openstack',\n tenant='admin')", "title": "" }, { "docid": "ca120fc9fe1e6b0866110c4b63af6cde", "score": "0.566399", "text": "def test_suite():\r\n test(hypotenuse(3, 4) == 5.0)\r\n test(hypotenuse(12, 5) == 13.0)\r\n test(hypotenuse(24, 7) == 25.0)\r\n test(hypotenuse(9, 12) == 15.0)", "title": "" }, { "docid": "4dd0ef80a358037838f19fc52807767d", "score": "0.5660243", "text": "def setup(self):\n info = {'index': 'orbit_num', 'kind': 'orbit'}\n self.testInst = pysat.Instrument('pysat', 'testing',\n clean_level='clean',\n orbit_info=info, update_files=True)", "title": "" }, { "docid": "207980625312391f04485a645fcdda9b", "score": "0.5658546", "text": "def setUp(self):\n StatCounter.reset_counts()\n # sub classes can change the following default settings\n self.use_fast_heapify = False # the default setting\n\n # set max number of childre that will be tested\n self.max_num_children = DEFAULT_MAX_NUM_CHILDREN_TO_TEST\n\n # Most tests will change num_children before running\n # set to 2 as a simple default\n self.num_children = 2", "title": "" }, { "docid": "c84d162ff83a41076e8d8e6ad5b87d3b", "score": "0.5652333", "text": "def setup(self):\n info = {'index': 'latitude', 'kind': 'polar'}\n self.testInst = pysat.Instrument('pysat', 'testing',\n clean_level='clean',\n orbit_info=info)\n self.testInst.custom.add(filter_data, 'modify')", "title": "" }, { "docid": "c3a4ca4e32481990ff2d5eaf9c6fa3d1", "score": "0.56503654", "text": "def setUp(self):\n # Standard config file with two test suites.\n self.config = [\"# a comment\", \" \",\n \"QIIME\\tsource /bin/setup.sh; cd /bin; ./tests.py\",\n \"PyCogent\\t/bin/cogent_tests\"]", "title": "" }, { "docid": "42da6659acb9e9272eab9f72b777aade", "score": "0.56488705", "text": "def setUp(self):\n self.hp_intra = gd.HP_intra(-0.32, -0.58, -0.34, -3.80, 4.48, -0.73)\n self.hp_inter = gd.HP_inter(-0.87, -0.61, 3.07, 2.35, 0.78, 32.52)", "title": "" }, { "docid": "dde1de7c918d040f7cba200373f43997", "score": "0.5639672", "text": "def setUp(cls):\n cls.user1 = User.objects.create_user(\n username='Frodon',\n email='[email protected]',\n password='sam'\n )\n cls.user2 = User.objects.create_user(\n username='Sam',\n email='[email protected]',\n password='frodon'\n )\n cls.group1 = Group.objects.create(name=\"La communauté de l'anneau\")\n cls.group2 = Group.objects.create(name=\"Mordor\")\n\n cls.group1_member1 = GroupMember.objects.create(\n user=cls.user1,\n group=cls.group1\n )\n cls.group2_member = GroupMember.objects.create(\n user=cls.user1,\n group=cls.group2\n )\n\n cls.group1_member2 = GroupMember.objects.create(\n user=cls.user2,\n group=cls.group1\n )\n\n cls.product1 = Product.objects.create(\n name='Epée',\n user_provider=cls.group1_member1,\n group=cls.group1\n )\n\n cls.group1_member1_estimate_product1 = (\n Estimation.objects.create(\n cost=10,\n group_member=cls.group1_member1,\n product=cls.product1)\n )\n cls.group1_member2_estimate_product1 = (\n Estimation.objects.create(\n cost=30,\n group_member=cls.group1_member2,\n product=cls.product1)\n )", "title": "" }, { "docid": "c45f5f8ba2339a814473bf09422198cb", "score": "0.56382793", "text": "def setup(self):\n info = {'index': 'latitude', 'kind': 'polar'}\n self.testInst = pysat.Instrument('pysat', 'testing',\n clean_level='clean',\n orbit_info=info, update_files=True)", "title": "" }, { "docid": "3c1645fc98068fc3db23b83d18ffa76c", "score": "0.56351286", "text": "def setUp(self):\n data = np.ones((1, 2, 4), dtype=np.float32)\n self.latlon_cube = set_up_variable_cube(\n data, \"precipitation_amount\", \"kg m^-2\",\n )\n self.expected_lons = np.array([-15, -5, 5, 15, -15, -5, 5, 15]).reshape(2, 4)\n self.expected_lats = np.array([-5, -5, -5, -5, 5, 5, 5, 5]).reshape(2, 4)\n self.equalarea_cube = set_up_variable_cube(\n data,\n \"precipitation_amount\",\n \"kg m^-2\",\n \"equalarea\",\n grid_spacing=2000,\n domain_corner=(-1000, -1000),\n )\n self.expected_proj_x = np.array(\n [-1000, 1000, 3000, 5000, -1000, 1000, 3000, 5000]\n ).reshape(2, 4)\n self.expected_proj_y = np.array(\n [-1000, -1000, -1000, -1000, 1000, 1000, 1000, 1000]\n ).reshape(2, 4)", "title": "" }, { "docid": "ce7e15412576d332807e6fac77311362", "score": "0.56176084", "text": "def setUp(self):\n self.art = pc.datasets.decaying_multi_normal(2, 1)", "title": "" }, { "docid": "0714b55a928f10390dd112e4a71d64e7", "score": "0.5608039", "text": "def test_init(self):\n pass", "title": "" }, { "docid": "71d95b206aeeb2203b304c1aeaa50968", "score": "0.5605523", "text": "def setup_class(cls):\n cls.checkpoint_interval = 2\n # Make sure we collect the same number of samples for all tests to avoid instabilities in MBAR.\n base_steps = 50\n cls.n_steps = int(np.ceil(base_steps / cls.N_SAMPLERS))\n\n # Test case with host guest in vacuum at 3 different positions and alchemical parameters.\n # -----------------------------------------------------------------------------------------\n hostguest_test = mmtools.testsystems.HostGuestVacuum()\n factory = mmtools.alchemy.AbsoluteAlchemicalFactory()\n alchemical_region = mmtools.alchemy.AlchemicalRegion(alchemical_atoms=range(126, 156))\n hostguest_alchemical = factory.create_alchemical_system(hostguest_test.system, alchemical_region)\n\n # Add restraint force between host and guest.\n restraint_force = mmtools.forces.HarmonicRestraintBondForce(\n spring_constant=2.0 * unit.kilojoule_per_mole / unit.angstrom ** 2,\n restrained_atom_index1=10, restrained_atom_index2=16,\n )\n hostguest_alchemical.addForce(copy.deepcopy(restraint_force))\n\n # Translate the sampler states to be different one from each other.\n positions = hostguest_test.positions\n box_vectors = hostguest_test.system.getDefaultPeriodicBoxVectors()\n hostguest_sampler_states = [mmtools.states.SamplerState(positions=positions + 10*i*unit.nanometers,\n box_vectors=box_vectors)\n for i in range(cls.N_SAMPLERS)]\n\n # Create the basic thermodynamic states.\n hostguest_thermodynamic_states = [mmtools.states.ThermodynamicState(hostguest_alchemical, 300*unit.kelvin)\n for _ in range(cls.N_STATES)]\n\n # Create alchemical states at different parameter values.\n alchemical_states = [mmtools.alchemy.AlchemicalState.from_system(hostguest_alchemical)\n for _ in range(cls.N_STATES)]\n for i, alchemical_state in enumerate(alchemical_states):\n alchemical_state.set_alchemical_parameters(float(i) / (cls.N_STATES - 1))\n\n # Create compound states.\n hostguest_compound_states = list()\n for i in range(cls.N_STATES):\n hostguest_compound_states.append(\n mmtools.states.CompoundThermodynamicState(thermodynamic_state=hostguest_thermodynamic_states[i],\n composable_states=[alchemical_states[i]])\n )\n\n # Unsampled states.\n cls.n_unsampled_states = 2\n nonalchemical_system = copy.deepcopy(hostguest_test.system)\n nonalchemical_system.addForce(copy.deepcopy(restraint_force))\n nonalchemical_state = mmtools.states.ThermodynamicState(nonalchemical_system, 300*unit.kelvin)\n nonalchemical_compound_state = mmtools.states.CompoundThermodynamicState(\n thermodynamic_state=nonalchemical_state,\n composable_states=[RestraintState(lambda_restraints=1.0)]\n )\n hostguest_unsampled_states = [copy.deepcopy(nonalchemical_compound_state) for _ in\n range(cls.n_unsampled_states)]\n\n cls.hostguest_test = (hostguest_compound_states, hostguest_sampler_states, hostguest_unsampled_states)\n\n # Run a quick simulation\n thermodynamic_states, sampler_states, unsampled_states = copy.deepcopy(cls.hostguest_test)\n n_states = len(thermodynamic_states)\n\n # Prepare metadata for analysis.\n reference_state = mmtools.states.ThermodynamicState(hostguest_test.system, 300*unit.kelvin)\n topography = Topography(hostguest_test.topology, ligand_atoms=range(126, 156))\n metadata = {\n 'standard_state_correction': 4.0,\n 'reference_state': mmtools.utils.serialize(reference_state),\n 'topography': mmtools.utils.serialize(topography)\n }\n analysis_atoms = topography.receptor_atoms\n\n # Create simulation and storage file.\n cls.tmp_dir = tempfile.mkdtemp()\n storage_path = os.path.join(cls.tmp_dir, 'test_analyze.nc')\n move = mmtools.mcmc.LangevinDynamicsMove(n_steps=1)\n if cls.ONLINE_ANALYSIS:\n online_analysis_interval = cls.n_steps - 1\n else:\n online_analysis_interval = None\n cls.sampler = cls.SAMPLER(mcmc_moves=move, number_of_iterations=cls.n_steps,\n online_analysis_interval=online_analysis_interval,\n online_analysis_minimum_iterations=0)\n cls.reporter = MultiStateReporter(storage_path, checkpoint_interval=cls.checkpoint_interval,\n analysis_particle_indices=analysis_atoms)\n cls.call_sampler_create(cls.sampler, cls.reporter, thermodynamic_states, sampler_states, unsampled_states,\n metadata=metadata)\n\n # Run some iterations.\n cls.n_replicas = cls.N_SAMPLERS\n cls.n_states = n_states\n cls.analysis_atoms = analysis_atoms\n cls.sampler.run(cls.n_steps-1) # Initial config\n cls.repex_name = \"RepexAnalyzer\" # kind of an unused test\n\n # Debugging Messages to sent to Nose with --nocapture enabled\n online_flag = \" \"\n if cls.ONLINE_ANALYSIS:\n online_flag += \"Online \"\n output_descr = \"Testing{}Analyzer: {} -- States: {} -- Samplers: {}\".format(\n online_flag, cls.SAMPLER.__name__, cls.N_STATES, cls.N_SAMPLERS)\n len_output = len(output_descr)\n print(\"#\" * len_output)\n print(output_descr)\n print(\"#\" * len_output)", "title": "" }, { "docid": "c19e3f713e3047ccee1dd38eff25b4d2", "score": "0.56053054", "text": "def setUp(self):\n context = {} # context in which token was used\n # moved the metrics list into context dict\n context['include_metrics'] = [('mccabe', 'McCabeMetric'),\n ('sloc', 'SLOCMetric')]\n context['quiet'] = True\n context['verbose'] = False\n context['base'] = ''\n self.in_file = os.path.abspath('tests/code_samples/js1.js')\n context['in_file_names'] = [self.in_file]\n context['output_format'] = None\n\n self.metrics = process(context)", "title": "" }, { "docid": "e234fe635943a11df6f3fca5e3d984e9", "score": "0.56042075", "text": "def setUp(self):\n \n self.testfilesdir = os.path.join(os.path.dirname(__file__), '..', 'testfiles', 'Tree', 'Metric')\n self.log = logging.getLogger()", "title": "" }, { "docid": "fee771bd29f6ac26a1e4fd23c864dcf0", "score": "0.5598643", "text": "def setup(self):\n info = {'index': 'orbit_num', 'kind': 'orbit'}\n self.testInst = pysat.Instrument('pysat', 'testing_xarray',\n clean_level='clean',\n orbit_info=info)\n self.testInst.custom.add(filter_data, 'modify')", "title": "" }, { "docid": "a931f0146d41ee30d0097b9bf95eced3", "score": "0.5597028", "text": "def setup(self):\n info = {'index': 'longitude', 'kind': 'longitude'}\n self.testInst = pysat.Instrument('pysat', 'testing',\n clean_level='clean',\n orbit_info=info)\n self.testInst.custom.add(filter_data, 'modify')", "title": "" }, { "docid": "479b4ec7b691a20282b38893d98b9cb3", "score": "0.5594915", "text": "def setUp(self):\n self.ev_path = os.path.join(this_directory, \".tmp_evaluations\")\n if not os.path.exists(self.ev_path):\n os.mkdir(self.ev_path)\n dummy_model_files = [os.path.join(self.ev_path, str(n)) for n in range(100)]\n dummy_pred_files = [os.path.join(self.ev_path, str(n)) for n in range(100, 200)]\n\n backend_mock = unittest.mock.Mock()\n backend_mock.get_model_dir.return_value = self.ev_path\n backend_mock.get_model_path.side_effect = dummy_model_files\n backend_mock.get_prediction_output_path.side_effect = dummy_pred_files\n D = get_multiclass_classification_datamanager()\n backend_mock.load_datamanager.return_value = D\n backend_mock.temporary_directory = tempfile.gettempdir()\n self.backend_mock = backend_mock\n\n self.port = logging.handlers.DEFAULT_TCP_LOGGING_PORT\n\n self.working_directory = os.path.join(this_directory, \".tmp_%s\" % self.id())", "title": "" }, { "docid": "d08a41ff23c1f2e1a2d807de41323658", "score": "0.5590458", "text": "def setUp(self):\n self.cmd = ClusterMetricsConfiguration()", "title": "" }, { "docid": "414c668d03bf5a47cf3324c274cf9c32", "score": "0.5586716", "text": "def setup(self):\n info = {'index': 'longitude', 'kind': 'longitude'}\n self.testInst = pysat.Instrument('pysat', 'testing',\n clean_level='clean',\n orbit_info=info, update_files=True)", "title": "" }, { "docid": "9ae438bc88006b5006d24c49e5d87587", "score": "0.5580739", "text": "def setup(self):\n info = {'index': 'mlt'}\n self.testInst = pysat.Instrument('pysat', 'testing_xarray',\n clean_level='clean',\n orbit_info=info)\n times = [[pysat.datetime(2008, 12, 31, 4),\n pysat.datetime(2008, 12, 31, 5, 37)],\n [pysat.datetime(2009, 1, 1),\n pysat.datetime(2009, 1, 1, 1, 37)]\n ]\n for seconds in np.arange(38):\n day = pysat.datetime(2009, 1, 2) + \\\n pds.DateOffset(days=int(seconds))\n times.append([day, day +\n pds.DateOffset(hours=1, minutes=37,\n seconds=int(seconds)) -\n pds.DateOffset(seconds=20)])\n\n self.testInst.custom.add(filter_data2, 'modify', times=times)", "title": "" }, { "docid": "891573b8e8fecfc1c5f4e00da34161d5", "score": "0.5570189", "text": "def setUp(self):\n self.all_stocks = stocks.StockCollection()\n sa.LoadCSV(TEST_FILES[\"march1.csv\"], self.all_stocks)\n sa.LoadCSV(TEST_FILES[\"march2.csv\"], self.all_stocks)\n sa.LoadCSV(TEST_FILES[\"march3.csv\"], self.all_stocks)\n sa.LoadCSV(TEST_FILES[\"march4.csv\"], self.all_stocks)\n sa.LoadCSV(TEST_FILES[\"march5.csv\"], self.all_stocks)\n sa.LoadTriplet(TEST_FILES[\"feb1.trp\"], self.all_stocks)\n sa.LoadTriplet(TEST_FILES[\"feb2.trp\"], self.all_stocks)\n sa.LoadTriplet(TEST_FILES[\"feb3.trp\"], self.all_stocks)\n sa.LoadTriplet(TEST_FILES[\"feb4.trp\"], self.all_stocks)", "title": "" }, { "docid": "ccbf0f2a51e6cd1bd609b1431ceb6435", "score": "0.5566679", "text": "def main():\n run_test_sum_of_digits()\n run_test_digits_in_cube()\n run_test_digits_in_power()\n run_test_fancy_sums_of_digits()", "title": "" }, { "docid": "6d7f680c7cdbb7b88e161c798fff054a", "score": "0.5564312", "text": "def setup(self):\n info = {'index': 'orbit_num', 'kind': 'orbit'}\n self.testInst = pysat.Instrument('pysat', 'testing_xarray',\n clean_level='clean',\n orbit_info=info, update_files=True)", "title": "" }, { "docid": "192683e0d6a8e05760b1dca304b692e8", "score": "0.5563879", "text": "def setUp(self):\n self.game = game.Game()\n self.dice = dice.Dice()\n self.computer = computer.Computer()\n self.leaderboards = leaderboards.Leaderboards()\n self.player1 = player.Player(\"Drake\", \"testing\")\n self.player2 = player.Player(\"Benson\", \"testing\")", "title": "" }, { "docid": "9640615e5ad1fecc25583bee6f7a3a22", "score": "0.55625457", "text": "def setUp(self):\n super().setUp()\n self.longitude_points = np.array(\n [-19.99999, -10.0, 0.0, 10.0, 20.00001], dtype=np.float32\n )\n self.longitude_points_thirds = np.array(\n [160.0, 160.33333, 160.66667, 161.0, 161.33333], dtype=np.float32\n )\n self.rtol = 1.0e-5\n self.expected = 10.0\n self.expected_thirds = 0.33333\n self.rtol_thirds = 4.0e-5", "title": "" }, { "docid": "5e11ecab92a0be64bf50a03411b2ddbd", "score": "0.55570656", "text": "def setUpClass(cls):\r\n cls.config.setup_toolbox('ENVI', 'qa_envitaskengine_datatype_enviroi',\r\n 'test_datatype_enviroi')", "title": "" }, { "docid": "6c0ed5cd69ea3ad202824a85e6b3581b", "score": "0.55563486", "text": "def setUp(self):\n \n config, _ = get_config(os.path.abspath('../configs/pln_config.json')) \n self.config = config\n hdf_path = self.config.data_path\n paths = [\"training\", \"validation\",\"testing\",\"general_set\", \"core_set\"]\n self.data_path = {}\n \n for path in paths:\n self.data_path[path] = f\"{hdf_path}/{path}.hdf\"", "title": "" }, { "docid": "ec38e4b4b0f8c659abcd2c2be6b551ce", "score": "0.55486226", "text": "def setup(self):\n info = {'index': 'latitude', 'kind': 'polar'}\n self.testInst = pysat.Instrument('pysat', 'testing_xarray',\n clean_level='clean',\n orbit_info=info)\n self.testInst.custom.add(filter_data, 'modify')", "title": "" } ]
e64d4bb277199d04a29fb9f3909ec61a
Output D x N x R
[ { "docid": "03383d4f38277365c6512771b3b383c1", "score": "0.0", "text": "def build_predict(self, Xnew):\n mu = tf.einsum('dr,nd->dnr', self.q_A_mu, Xnew)\n var = tf.einsum('dr,nd->dnr', tf.square(self.q_A_sqrt), tf.square(Xnew))\n return mu,var", "title": "" } ]
[ { "docid": "d4be365955cf2431b028aaeaee5d1904", "score": "0.58140504", "text": "def generate(self, n):", "title": "" }, { "docid": "4653cd46931570f3f90ab27808413713", "score": "0.5588879", "text": "def print_row(base, n):\r\n for i in range(1, n + 1):\r\n print(\"{0:>4}\".format(base * i), end=\"\")\r\n print(\"\")", "title": "" }, { "docid": "3a380f13c02d91f8df56e6d502048f46", "score": "0.55726326", "text": "def gen(self, n):\n a, b = 0, 1\n while b < n:\n print b,\n a, b = b, a+b", "title": "" }, { "docid": "5fb94865fade47e163db2dc4c6207acb", "score": "0.5568102", "text": "def transform(self,r,n):\n rn = np.asarray(r).copy()\n cell = self.atoms.get_cell()\n for a in range(3):\n rn = rn + n[a]*cell[a,:]\n return rn", "title": "" }, { "docid": "71e2f40d974209fb021c9bddbeee67a2", "score": "0.5564151", "text": "def table(self, L, R, n):\n s = ''\n for x in linspace(L, R, n):\n y = self(x)\n s += '%12g %12g\\n' % (x, y)\n return s", "title": "" }, { "docid": "0bccaab5121edb8c3604a0cccdd59cee", "score": "0.5549646", "text": "def ex5_DiamondPattern():\n N = int(input())\n counter = N\n print(1)\n for row in range(1, 2*N - 1):\n res = [1]\n if row < N:\n for column in range(0, 2*row):\n res.append(column + 2)\n else:\n counter -=1\n for column in range(2, 2*counter):\n res.append(column)\n \n res = list(map(str, res))\n print(' '.join(res))", "title": "" }, { "docid": "35f221dbc665f8a0d7401cb8a5411058", "score": "0.55113524", "text": "def table(self, L, R, n):\r\n s = ''\r\n for x in linspace(L, R, n):\r\n y = self(x)\r\n s += '(%12g, %12g)\\n' %(x, y)\r\n return s", "title": "" }, { "docid": "441b2564a38ce383b45f0db0dbdc51af", "score": "0.54704165", "text": "def output(self, ans):", "title": "" }, { "docid": "8b4744cf6a526f47fc4bb9605b1a6007", "score": "0.5468321", "text": "def ver(R,D,im,param=0):\n for i in range(param,R.shape[0]):\n mostrar_cuadricula(R[i],D[i],im)", "title": "" }, { "docid": "3a8bb4c30c9c8fef996cf7e22be8d9d8", "score": "0.543137", "text": "def R():", "title": "" }, { "docid": "6c7aa141fd196a38d3fadcc1768c26fd", "score": "0.5424518", "text": "def _render(self, mode='human', close=False):\n if close:\n return\n\n outfile = io.StringIO() if mode == 'ansi' else sys.stdout\n\n grid = np.arange(self.nS).reshape(self.shape)\n it = np.nditer(grid, flags=['multi_index'])\n while not it.finished:\n s = it.iterindex\n y, x = it.multi_index\n\n if self.s == s:\n output = \" x \"\n elif s == 0 or s == self.nS - 1:\n output = \" T \"\n else:\n output = \" o \"\n\n if x == 0:\n output = output.lstrip()\n if x == self.shape[1] - 1:\n output = output.rstrip()\n\n outfile.write(output)\n\n if x == self.shape[1] - 1:\n outfile.write(\"\\n\")\n\n it.iternext()", "title": "" }, { "docid": "f1e129c23cb4cf01235fd16044aa4d4b", "score": "0.54083943", "text": "def for_N():\r\n\r\n for row in range(5):\r\n for col in range(5):\r\n if col in (0,4) or row-col==0:\r\n print('*', end = ' ')\r\n else:\r\n print(' ', end = ' ')\r\n\r\n print()", "title": "" }, { "docid": "510b91767c32eeeeaa31c4a7508907f6", "score": "0.5379925", "text": "def for_D():\r\n\r\n for row in range(6):\r\n for col in range(5):\r\n if col==0 or row in (0,5) and col<4 or col==4 and row>0 and row<5:\r\n print('*', end = ' ')\r\n else:\r\n print(' ', end = ' ')\r\n print()", "title": "" }, { "docid": "02343d65a8c91714d807cc0f77b91775", "score": "0.5379106", "text": "def __repr__(self):\n return(str(self.n), str(self.m), str(self.I), str(self.I))", "title": "" }, { "docid": "a31c40e82079c27a8730e2b31479a2d2", "score": "0.53775483", "text": "def num_04(prn=True): \n N=10\n x = np.random.random_integers(0, 10, N)\n y = np.random.random_integers(10, 20, N)\n id = np.arange(10)\n dt_sub = np.dtype([('x', '<f8'), ('y', '<f8')])\n dt = np.dtype([('id', '<i4'), ('xy', dt_sub)])\n extra = np.ones(len(x), dt) #just for demo and printing purposes\n a = np.ones(len(x), dt)\n a['id'] = id\n a['xy']['x'] = x\n a['xy']['y'] = y\n frmt = \"\"\"\n :------------------------------------------------------------------\n {}\n : - sub-dtype: {}\n : - dtype as: np.dtype([('id', '<i4'),('xy', dt_sub)])\n : - yields: {}\n : - unshaped array...\n {}\n : - reshaped & filled array...\n {}\n : id ....{}\n : xy ....{}\n : x ....{}\n : y ....{}\n :\n :field access...\n : - ndarray: a['id'] a['xy'] a['xy']['x'] a['xy']['y']\n : - recarray: a.id a.xy a.xy.x a.xy.y via...\n : a.view(np.recarray)\n : - plus ndarray access \n : -reshaped...\n {}\n :------------------------------------------------------------------\n \"\"\"\n frmt = dedent(frmt)\n if prn: # optional printing\n args = [num_04.__doc__, dt_sub, dt, extra,\n a, id, a['xy'], x, y, a.reshape(-1, 1)]\n print(frmt.format(*args))\n return id, x, y, a", "title": "" }, { "docid": "12e51d3d57d509f6270f65bf8ec4b529", "score": "0.5372844", "text": "def output_dims(self):", "title": "" }, { "docid": "c8ac65ace09ac5b7fa65e41dc70203ca", "score": "0.5370918", "text": "def for_D():\r\n for row in range(9):\r\n for col in range(6):\r\n if col ==0 or row %8 == 0 and col!=5 or col ==5 and row %8 !=0:\r\n print('*',end=' ')\r\n else:\r\n print(' ',end=' ')\r\n print()", "title": "" }, { "docid": "440478bb800686f5f85c2300c521ebd8", "score": "0.5343885", "text": "def zoom(n,R,D,s,o):\n return decomprimir(R*n,D*n,s,o,10,0,n*64,n*64)", "title": "" }, { "docid": "d9466b403b35c93e5206287f127ef3b3", "score": "0.53405917", "text": "def show(pole):\n print(\"Pole PM: \")\n for i in range(N):\n for j in range(N):\n print(str(pole[i*N+j]).rjust(3), end=\"\")\n print()", "title": "" }, { "docid": "2592760a0c999a1c03b099b3aa865cd7", "score": "0.52879655", "text": "def display(count,size,first_position):\n print(\"Gen {}\\n\".format(count))\n for i in range(size):\n for j in range(size):\n print(\"{:4}\".format(first_position[i][j]),end=' ')\n print(\"\\n\")", "title": "" }, { "docid": "3c7d779fb87875d61e6bf7d345631ebc", "score": "0.5282126", "text": "def fourway(n,m):\n\n assert n<7 and (m<7 or m==16)\n M = 0 if (m==6 or m==16) else 6-m\n N = 6-n\n \n if n==0 and m==0:\n return (0,0) # Leak not modeled, don't bother. detailed and condensed semantics both give 0.\n if n==15:\n assert m==0\n sys = \"length a = 9\\nlength B = 9\\nlength c = %d\\nB c\\na B( + c* )\\n\" % n\n if m==0:\n sys = \"length a = 9\\nlength B = 9\\nlength c = %d\\nlength d = 9\\nB c\\na B( + d* c* )\\n\" % n\n if m>0:\n sys = \"length x = 21\\nlength b = %d\\nlength B = 9\\nlength c = %d\\nlength d = 9\\nB c\\na b(B( + d* c* ))\\n\" % (m,n)\n\n sys = \"length x = 21\\n\"\n if m>0: \n sys += \"length m = %d\\n\" % m\n if M>0: \n sys += \"length M = %d\\n\" % M\n if n>0: \n sys += \"length n = %d\\n\" % n\n if N>0: \n sys += \"length N = %d\\n\" % N\n if m>0 and M>0:\n sys += \"x*( m* M* + \"\n elif m>0:\n sys += \"x*( m* + \"\n else:\n sys += \"x*( M* + \"\n if n>0 and N>0:\n sys += \"N* n* )\\n\"\n elif n>0:\n sys += \"n* )\\n\"\n else:\n sys += \"N* )\\n\"\n if m>0:\n sys += \"m x( + \"\n else:\n sys += \"x( + \"\n if n>0:\n sys += \") n\\n\"\n else:\n sys += \")\\n\"\n\n pil_enum = CMI_enum(sys,8,'detailed')\n rates = [s for s in pil_enum if len(s)>0 and s[0]=='k']\n if watch:\n for s in pil_enum:\n print s\n\n # trust that the enumerator always lists reactions in a consistent order!\n if len(rates)==1: # must be condensed, then, so must not actually ever happen here.\n k_eff = float(rates[0].split()[1][1:])\n elif len(rates)==3: # must be reversible toehold, detailed model (i.e. just one toehold)\n k0=float(rates[0].split()[1][1:]) # forward binding rate\n k1=float(rates[1].split()[1][1:]) # branch migration & strand displacement step\n k2=float(rates[2].split()[1][1:]) # toehold dissociation\n k_eff = k0*k1/(k1+k2)\n elif len(rates)==13: # both toeholds bind reversibly, and hilarity ensues...\n k = [ float(r.split()[1][1:]) for r in rates ]\n # reactions come out in one of two possible orders, due to mysterious reasons...\n if rates[0].find(\"18 -> 23\") != -1:\n assert rates[1].find(\"6 -> 10\") != -1\n assert rates[2].find(\"5 -> 10\") != -1\n assert rates[3].find(\"44 -> 19\") != -1\n assert rates[4].find(\"2 + 1 -> 6\") != -1\n assert rates[5].find(\"2 + 1 -> 5\") != -1\n assert rates[6].find(\"6 -> 18 + 19\") != -1\n assert rates[7].find(\"5 -> 23 + 44\") != -1\n assert rates[8].find(\"10 -> 23 + 19\") != -1\n assert rates[9].find(\"6 -> 2 + 1\") != -1\n assert rates[10].find(\"5 -> 2 + 1\") != -1\n assert rates[11].find(\"10 -> 6\") != -1\n assert rates[12].find(\"10 -> 5\") != -1\n b10 = k[8]/(k[8]+k[11]+k[12])\n y6 = k[11]/(k[8]+k[11]+k[12])\n y5 = k[12]/(k[8]+k[11]+k[12])\n a5 = k[2]/(k[10]+k[7]+k[2])\n b5 = k[7]/(k[10]+k[7]+k[2])\n a6 = k[1]/(k[9]+k[6]+k[1])\n b6 = k[6]/(k[9]+k[6]+k[1])\n P10 = (b10+y6*b6+y5*b5)/(1-(y6*a6+y5*a5))\n k_eff = (k[4]*a6+k[5]*a5)*P10 + (k[4]*b6+k[5]*b5)\n elif rates[0].find(\"5 -> 10\") != -1:\n assert rates[1].find(\"6 -> 10\") != -1\n assert rates[2].find(\"43 -> 18\") != -1\n assert rates[3].find(\"19 -> 23\") != -1\n assert rates[4].find(\"1 + 2 -> 5\") != -1\n assert rates[5].find(\"1 + 2 -> 6\") != -1\n assert rates[6].find(\"5 -> 43 + 23\") != -1\n assert rates[7].find(\"6 -> 18 + 19\") != -1\n assert rates[8].find(\"10 -> 18 + 23\") != -1\n assert rates[9].find(\"5 -> 1 + 2\") != -1\n assert rates[10].find(\"6 -> 1 + 2\") != -1\n assert rates[11].find(\"10 -> 5\") != -1\n assert rates[12].find(\"10 -> 6\") != -1\n b10 = k[8]/(k[8]+k[11]+k[12])\n y6 = k[12]/(k[8]+k[11]+k[12])\n y5 = k[11]/(k[8]+k[11]+k[12])\n a5 = k[0]/(k[9]+k[6]+k[0])\n b5 = k[6]/(k[9]+k[6]+k[0])\n a6 = k[1]/(k[10]+k[7]+k[1])\n b6 = k[7]/(k[10]+k[7]+k[1])\n P10 = (b10+y6*b6+y5*b5)/(1-(y6*a6+y5*a5))\n k_eff = (k[5]*a6+k[4]*a5)*P10 + (k[5]*b6+k[4]*b5)\n else:\n print \"CRAP. Reactions are coming out in an unexpected order.\"\n\n pil_enum = CMI_enum(sys,8,'condensed')\n rates = [s for s in pil_enum if len(s)>0 and s[0]=='k']\n\n if len(rates)==1: # irreversibble toehold-mediated strand displacemen\n k_con = float(rates[0].split()[1][1:])\n elif len(rates)==2: # reversible toehold exchange #### check by hand to make sure first one is always forward\n k_con = float(rates[0].split()[1][1:]) # forward binding rate\n # now must modify stuff below to output & compare condensed rates\n\n if watch:\n for s in pil_enum:\n print s\n print \"Calculated k_eff = %f /M/s from detailed reactions and k_con = %f /M/s from condensed reactions.\" % (k_eff,k_con)\n raw_input(\"Press enter to continue...\") # in python 3, just input()\n\n return (k_eff,k_con)", "title": "" }, { "docid": "1fb8c2b0319da55997f32911bdcf7cbc", "score": "0.5255473", "text": "def display(self):\n value = 0\n for y in xrange(NY):\n row_byte = 0\n for x in xrange(NX):\n bit = self.vars[x][y].get()\n row_byte += bit<<x \n matrix.set_pixel(x, y, bit)\n value += row_byte<<(8*y) \n matrix.write_display()\n self.tx_raw64.delete(\"1.0\",END)\n self.tx_raw64.insert(\"1.0\",'0x'+format(value,'016x'))", "title": "" }, { "docid": "d32225b70b8eb926d1463135ad2c65b5", "score": "0.52508247", "text": "def D(n):\n assert 4 <= n\n\n g = Group.schlafli(*[3] * (n - 2), 2)\n g[1, n - 1] = 3\n return g", "title": "" }, { "docid": "4704ec14bd3f20937475dc8b480c81a4", "score": "0.5250244", "text": "def model(n):\r\n \r\n return \"\".join([str(i) for i in range(1, n + 1)])", "title": "" }, { "docid": "59229578e80b031d933bf8573d4c2165", "score": "0.52500486", "text": "def display(n):\n print ' -> '.join(map(str, to_list(n)))", "title": "" }, { "docid": "689b2885192e6c1d55e0ee63da4830e0", "score": "0.524732", "text": "def digits_demo(\n self,\n iterations = None\n):\n \n print( \"seven segments demo\" )\n \n for _ in repeater( iterations ):\n\n for i in range( self.n ):\n for n in range( 10 ):\n for d in range( self.n ):\n self.write(\n ( \" \" * i ) + \"%d\" % n,\n align = False\n )\n sleep_us( 10_000 )\n\n for i in range( 0, 10 ** self.n ):\n x = ( i // 100 ) % self.n\n self.write(\n \"%d\" % i,\n points = [ n == x for n in range( self.n ) ]\n )", "title": "" }, { "docid": "2b677d613dc445d5ace42a0d112ea8b8", "score": "0.524687", "text": "def iptgen(n=4,ex=[2]):\n ipt = cartesian(np.repeat([[0,1]],n,axis=0))\n tr = np.zeros(ipt.shape[0])\n for i in ex:\n tr += np.sum(ipt,axis=1) == i #This trace vector enable to to pick the desired vectors\n return np.repeat(ipt,tr>=1,axis=0)", "title": "" }, { "docid": "486fd31e0b18de745ce0c16d034d0074", "score": "0.521292", "text": "def generate_output_3(output): \n m = Mathtex(output, u)\n m.save('testnew.png', 'png')", "title": "" }, { "docid": "61a4af31e5e9feaac1228d03dcd62536", "score": "0.5208768", "text": "def output(self):", "title": "" }, { "docid": "93f73eb86b771e1f22f523832bd36b75", "score": "0.5206161", "text": "def generate_output_2(output): \n tex.Format()\n tex.sym_format(1)\n x = tex.print_LaTeX(output)\n print x\n #tex.xdvi(debug=True)", "title": "" }, { "docid": "0e87fb4ca237913597551a232f47c01d", "score": "0.5202697", "text": "def __ng(self, xlist, n):\r\n ret = []\r\n for i, x in enumerate(xlist):\r\n diff = i - n + 1\r\n if diff >= 0:\r\n tmp = []\r\n for j in range(n):\r\n k = i - j\r\n tmp.append(xlist[k])\r\n tmp.reverse()\r\n ret.append(''.join(tmp))\r\n return ret", "title": "" }, { "docid": "6ca3a0c30d76757d71f6c62733fcb8e5", "score": "0.52023214", "text": "def print_nda(nda, cmt, fmt=' %8.4f') :\n print('\\n%s.shape: %s' % (cmt, str(nda.shape)), end=' ')\n\n if len(nda.shape)==1 :\n for c in nda : print(fmt % c, end=' ')\n\n elif len(nda.shape)==2 : \n for row in nda :\n print('\\nrow: ', end=' ')\n for c in row : print(fmt % c, end=' ')\n \n elif len(nda.shape)==3 : \n for layer in nda :\n print('\\n(3d) layer: ', end=' ')\n for row in layer :\n print('\\nrow: ', end=' ')\n for c in row : print(fmt % c, end=' ')\n print('\\n')", "title": "" }, { "docid": "6819a2809ffb3029703c9c672893b5cb", "score": "0.5197579", "text": "def rwc(n0, dm):\n return np.pi * 1000.0 * dm ** 4 * n0 / 4 ** 4", "title": "" }, { "docid": "1416973fe6d19f91c0a8816d3415cbec", "score": "0.5193823", "text": "def ind2gen(index, n):\r\n # For example, ind2gen(255,8) = [1, 1, 1, 1, 1, 1, 1, 1]\r\n genotype = np.zeros(n)\r\n if index >= 2 ** n:\r\n print(\"ind2gen error\")\r\n return genotype\r\n while n > 0:\r\n n = n - 1\r\n if index % 2 == 0:\r\n genotype[n] = 0\r\n else:\r\n genotype[n] = 1\r\n index = index // 2\r\n return genotype", "title": "" }, { "docid": "320aef993e39f3d1df2359e72ee5fd94", "score": "0.5193602", "text": "def for_N():\r\n for row in range(5):\r\n for col in range(5):\r\n if col in(0,4) or row==col:\r\n print(\"*\",end=\" \")\r\n else:\r\n print(\" \",end=\" \")\r\n print()", "title": "" }, { "docid": "f494fecb643d23f9935d7a8079928e40", "score": "0.51790774", "text": "def zero_output(K,P):\n print \"Generating results.txt with all 0\"\n df_allk = list(); ddf_allk = list()\n for k in range(K-1):\n df=dict(); ddf=dict()\n for name in P.methods:\n df[name] = 0.0\n ddf[name] = 0.0\n df_allk = numpy.append(df_allk,df)\n ddf_allk = numpy.append(ddf_allk,ddf)\n segments = ['Coulomb' , 'vdWaals' , 'TOTAL']\n def printLine(str1, str2, d1=None, d2=None):\n text = str1\n for name in P.methods:\n if d1 == 'plain':\n text += ' ' + str2\n if d1 == 'name':\n text += ' ' + str2 % (name, P.units)\n if d1 and d2:\n text += ' ' + str2 % (d1[name]/P.beta_report, d2[name]/P.beta_report)\n outtext.append(text + '\\n')\n return\n\n d = P.decimal\n str_dash = (d+7 + 6 + d+2)*'-'\n str_dat = ('X%d.%df +- X%d.%df' % (d+7, d, d+2, d)).replace('X', '%')\n str_names = ('X%ds X-%ds' % (d+6, d+8)).replace('X', '%')\n outtext = []\n printLine(12*'-', str_dash, 'plain')\n printLine('%-12s' % ' States', str_names, 'name')\n printLine(12*'-', str_dash, 'plain')\n for k in range(K-1):\n printLine('%4d -- %-4d' % (k, k+1), str_dat, df_allk[k], ddf_allk[k])\n printLine(12*'-', str_dash, 'plain')\n w = 12 + (1+len(str_dash))*len(P.methods)\n str_align = '{:I^%d}' % w\n if len(P.lv_names)>1:\n for i in range(len(segments)):\n printLine('%9s: ' % segments[i], str_dat, df_allk[i], ddf_allk[i])\n else:\n printLine('%9s: ' % segments[-1], str_dat, 0.000, 0.000)\n # Store results.\n outfile = open(os.path.join(P.output_directory, 'results.txt'), 'w')\n outfile.write('# Command line was: %s\\n\\n' % ' '.join(sys.argv) )\n outfile.writelines(outtext)\n outfile.close()", "title": "" }, { "docid": "ebbad7f20cf7c266ac0db1d4065546a8", "score": "0.51702535", "text": "def for_R():\r\n\r\n for row in range(6):\r\n for col in range(5):\r\n if col==0 or row%3==0 and col<3 or col==3 and row%3!=0 and row<3 or row-col==2:\r\n print('*', end = ' ')\r\n else:\r\n print(' ', end = ' ')\r\n print()", "title": "" }, { "docid": "bd21807a9f77efc7cbcc25e2bd164193", "score": "0.5166616", "text": "def _dumpgrid(dmp_i, cnt_r, dmp_r, grd, k, nage, nx, ny):\n conc = False\n if len(grd.shape) == 5:\n conc = True\n ii = 0\n fact = 1\n pos = 0\n #print 'cnt_r: ' + str(cnt_r)\n for ir in range(cnt_r):\n\n if conc:\n #print 'dmp_r: ' + str(dmp_r)\n #print 'length of dmp_r: ' + str(len(dmp_r))\n if dmp_r[ir] * fact > 0:\n n = dmp_i[ii]\n ii = ii + 1\n fact = fact * -1.\n else:\n n = n + 1\n\n kz = n / (H.numxgrid * H.numygrid)\n jy = (n - kz * H.numxgrid * H.numygrid) / H.numxgrid\n ix = n - H.numxgrid * H.numygrid * kz - H.numxgrid * jy\n grd[ix, jy, kz - 1, k, nage] = abs(dmp_r[ir])\n\n#\n# print \"n ==> ix,jy,kz,k,nage\"\n# print \"%s ==> %s,%s,%s,%s,%s\" % (n,ix,jy,kz,k,nage)\n# print grd.shape\n# print grd[0,0,0,0,0]\n\n\n else:\n if dmp_r[ir] * fact > 0:\n n = dmp_i[ii]\n ii = ii + 1\n fact = fact * -1.\n else:\n n = n + 1\n #pos = pos + 1\n jy = n / H.numxgrid\n ix = n - H.numxgrid * jy\n grd[ix, jy, k, nage] = abs(dmp_r[ir])\n\n return grd #flipud(grd.transpose())", "title": "" }, { "docid": "f2198473dce969cdb128758e989fe378", "score": "0.5150352", "text": "def generate_variable(r, c, n):\n return f'{r},{c}_{n}'", "title": "" }, { "docid": "56cfe493cced29627a8220048b050ba1", "score": "0.5150147", "text": "def show_utility(values: np.ndarray):\n row_divider = \"-\" * ((8 * values.shape[0]) + values.shape[0] + 1)\n for row in range(values.shape[0]):\n print(row_divider)\n out = \"| \"\n for col in range(values.shape[1]):\n out += str(round(values[(row, col)], 2)).ljust(6) + ' | '\n print(out)\n print(row_divider)", "title": "" }, { "docid": "e1ed07c92d4d336654a53a696d430869", "score": "0.51489705", "text": "def exo4():\n for i in 1: 4:\n q = qlist(i)\n Dend = Dland(pend(1), pend(2), : )\n H = max(abs(Dland(: , : , 1: q)-repmat(Dend(1: q), [n n 1])), [], 3)\n %\n options.end_points = pend\n options.heuristic = H\n options.nb_iter_max = Inf\n options.constraint_map = Inf + zeros(n)\n [D, S] = perform_fast_marching(1./ W, pstart, options)\n %\n I = find(S <0)\n U = cat(3, M, M, M)\n U(I) = 1; U([I + n^2, I + 2*n^2]) = U([I + n^2, I + 2*n^2])*.3\n subplot(2, 2, i)\n hold on\n imageplot(U)\n h = plot(p(2, : ), p(1, : ), '.k'); set(h, 'LineWidth', 2)\n h = plot(pstart(2), pstart(1), '.g'); set(h, 'MarkerSize', 25)\n h = plot(pend(2), pend(1), '.b'); set(h, 'MarkerSize', 25)\n h = plot(landmarks(1, 1: q), landmarks(2, 1: q), 'y.'); set(h, 'MarkerSize', 15)\n axis ij", "title": "" }, { "docid": "cf08c07b093fdeaaa7d0d1d7928301f3", "score": "0.51153255", "text": "def run(self, n: int, output_file: str):", "title": "" }, { "docid": "49318f7d84051f5e7c3784b75fe64e04", "score": "0.5114152", "text": "def run():\n num = int(input())\n for i in range(1, num+1):\n for j in range(i):\n print(\"%02d\" %(j+1), end=\" \")\n wow = j\n for _ in range(2*num-2*i):\n print(\"%02d\" %(wow+1), end=\" \")\n for dog in range(i-1, 0, -1):\n print(\"%02d\" %dog, end=\" \")\n print()\n for i in range(num-1, 0, -1):\n for j in range(i):\n print(\"%02d\" %(j+1), end=\" \")\n wow = j\n for _ in range(2*num-2*i):\n print(\"%02d\" %(wow+1), end=\" \")\n for dog in range(i-1, 0, -1):\n print(\"%02d\" %dog, end=\" \")\n print()", "title": "" }, { "docid": "20480a09181f582d5bc69cc0fc6a31bf", "score": "0.51118934", "text": "def getOutput(self):", "title": "" }, { "docid": "20480a09181f582d5bc69cc0fc6a31bf", "score": "0.51118934", "text": "def getOutput(self):", "title": "" }, { "docid": "4dc3a46a8c529720e54b084f6aea4cb2", "score": "0.5101232", "text": "def getOutput(self, n):\n return self.outputs[n-1]", "title": "" }, { "docid": "20e51e4f39e719b46ad98a8560a48c20", "score": "0.5098332", "text": "def num_05(): # needs to call 4\n id, x, y, a = num_04(prn=False)\n frmt = \"\"\"\n :------------------------------------------------------------------\n {}\n :Array basics....\n :Input ndarray...\n {!r}\n :...reshaped... \n {!r}\n :Viewed as recarray... \n {!r}\n :...reshaped... \n {!r}\n :------------------------------------------------------------------\n \"\"\"\n a_rec = a.view(np.recarray)\n frmt = dedent(frmt)\n args = [num_05.__doc__, a, a.reshape(-1, 1), a_rec, a_rec.reshape(-1, 1)]\n print(frmt.format(*args))\n frmt = \"\"\"\n :------------------------------------------------------------------\n :ndarray and recarray access...\n : - both...\n : - a['id'] = {}\n : - a['xy'] = {}\n : - a['xy']['x'] = {}\n : - recarray only...\n : - a_rec.id = {}\n : - a_rec.xy = {}\n : - a_rec.xy.x = {}\n :------------------------------------------------------------------ \n \"\"\"\n args = [a['id'], a['xy'], a['xy']['x'], a_rec.id, a_rec.xy, a_rec.xy.x]\n print(dedent(frmt).format(*args))\n return a", "title": "" }, { "docid": "38ae4e37a0e1db29f67cd3c662fb3007", "score": "0.50825477", "text": "def v(i, j, d, size):\n return (size**2) * (i - 1) + size * (j - 1) + d", "title": "" }, { "docid": "6db1422ba0d8271bd6556ceb8c213c04", "score": "0.50691587", "text": "def D(self):\n self.dot()", "title": "" }, { "docid": "e9e55270867908024ee5e3771a31e609", "score": "0.50577825", "text": "def _repr_(self):\n tmp='p-adic automorphic form on '+str(self._parent)+':\\n'\n tmp+=' e | c(e)'\n tmp+='\\n'\n for e in range(Integer(self._nE/2)):\n tmp+=' '+str(e)+' | '+str(self._F[e])+'\\n'\n return tmp", "title": "" }, { "docid": "67e635e8ed9eca9810403abdd218712f", "score": "0.5056885", "text": "def main():\n count = int(input())\n space, many = 1, count+(count-1)\n for i in range(1, count+1):\n for number in range(1, space):\n print(\"%02d\" %number, end=\" \")\n print((\"%02d \" %i)*many, end=\"\")\n for number in range(space-1, 0, -1):\n print(\"%02d\" %number, end=\" \")\n print()\n space += 1\n many -= 2\n space -= 2\n many += 4\n for i in range(count-1, 0, -1):\n for number in range(1, space):\n print(\"%02d\" %number, end=\" \")\n print((\"%02d \" %i)*many, end=\"\")\n for number in range(space-1, 0, -1):\n print(\"%02d\" %number, end=\" \")\n print()\n space -= 1\n many += 2", "title": "" }, { "docid": "b2674595658196107f5858efcc51c062", "score": "0.5043628", "text": "def main(num):\n for count in range(1, num + 1):\n for _ in range(num - count):\n print(end=' ')\n for count2 in range(1, count + 1):\n print(\"%02d \" %count2, end='')\n print()", "title": "" }, { "docid": "8ea92fb1b3deb70ef74153cc05b8d040", "score": "0.50432503", "text": "def draw_square(t, n, sz):\t\n x = sz\n for i in range(n):\n for j in range(4):\n t.right(89)\n t.fd(sz)\n sz = sz + x", "title": "" }, { "docid": "899aba9941a9240a00dca80239ecbc06", "score": "0.50431824", "text": "def while_N():\r\n\r\n row = 0\r\n while row<5:\r\n col = 0\r\n while col<5:\r\n if col in (0,4) or row-col==0:\r\n print('*', end = ' ')\r\n else:\r\n print(' ', end = ' ')\r\n col += 1\r\n print()\r\n row += 1", "title": "" }, { "docid": "e82605fe882e06d3101ac0b923d43824", "score": "0.5043171", "text": "def numb_table(n):\n for i in xrange(1, n+1):\n for j in xrange(1, n+1):\n print '%.2f' % round(1.0/(i+j), 2),\n print", "title": "" }, { "docid": "3ea9a2c193733129c7b47182d0674474", "score": "0.50395286", "text": "def print_christmas_tree(size):\n # replace the line below with your code\n out = \"\"\n for i in range(1, size + 1):\n out += \" \" * (size - i)\n out += \"*\" * (i * 2 - 1)+ \"\\n\"\n out += (\" \" * (size - 1)) + \"*\"\n return out", "title": "" }, { "docid": "04afe0be130227da59a54d1a6c2e1f53", "score": "0.50317276", "text": "def draw(self, N, spaces=None):\n if self._transd:\n draws = self._kde.draw(N, spaces=spaces)\n else:\n draws = self._kde.draw(N)\n return draws", "title": "" }, { "docid": "0e8a5b79733c1cbb17151c727e6844a6", "score": "0.5029839", "text": "def makeU4(n,m):\n U4 = np.add.outer(np.arange(n)**2,np.arange(m)**2)\n return U4", "title": "" }, { "docid": "b36b456df2698b95c904fce49e39b7d1", "score": "0.50297946", "text": "def n_outputs(self):\n return 2", "title": "" }, { "docid": "1a87c95eded545bcea78bb608a080b0c", "score": "0.5015433", "text": "def _write_crd(self, filename, iteration, replica, title, ncfile):\n # Extract coordinates to be written.\n coordinates = numpy.array(ncfile.variables['positions'][iteration,replica,:,:])\n coordinates *= 10.0 # convert nm to angstroms\n \n # Create file.\n outfile = open(filename, 'w')\n \n # Write title.\n outfile.write(title + '\\n')\n \n # Write number of atoms.\n natoms = ncfile.variables['positions'].shape[2]\n outfile.write('%6d\\n' % natoms)\n \n # Write coordinates.\n for index in range(natoms):\n outfile.write('%12.7f%12.7f%12.7f' % (coordinates[index,0], coordinates[index,1], coordinates[index,2]))\n if ((index+1) % 2 == 0): outfile.write('\\n')\n \n # Close file.\n outfile.close()", "title": "" }, { "docid": "9c278fa2b18d91c92bce47031572cd90", "score": "0.5014373", "text": "def for_X():\r\n\r\n for row in range(5):\r\n for col in range(6):\r\n if row-col==0 or row+col==4:\r\n print('*', end = ' ')\r\n else:\r\n print(' ', end = ' ')\r\n print()", "title": "" }, { "docid": "6f94233eb202c847263794df5b76424a", "score": "0.50116044", "text": "def output(X,nnets):\n #MPI\n comm = MPI.COMM_WORLD\n rank = comm.rank\n size = comm.size\n\n if rank == 0:\n results = []\n for nn in nnets:\n results+= [nn_psgd._output(X,nn['weights'],nn_psgd.act_funcs_from_string(nn['act_funcs'],len(nn['weights'])-1))]\n return results", "title": "" }, { "docid": "599caa380001f627ef93d2b58bd376fd", "score": "0.50007045", "text": "def while_D():\r\n\r\n row = 0\r\n while row<6:\r\n col = 0\r\n while col<5:\r\n if col==0 or row in (0,5) and col<4 or col==4 and row>0 and row<5:\r\n print('*', end = ' ')\r\n else:\r\n print(' ', end = ' ')\r\n col +=1\r\n print()\r\n row +=1", "title": "" }, { "docid": "66d29588a3fa7fb9c5424bbd69a19672", "score": "0.49978733", "text": "def draw(self, N):\n draws = []\n for t in range(self.sigma.size):\n draws.append(self.sigma.item(t) * self.RNG.randn(N) + self.mu.item(t))\n\n return draws", "title": "" }, { "docid": "bbe563ce3e398bffd2c77156bb72f510", "score": "0.49892113", "text": "def iwc(n0, dm):\n return np.pi * 917.0 * dm ** 4 * n0 / 4 ** 4", "title": "" }, { "docid": "79a508d49a30acd1a4fe5f8e5ec0a3b4", "score": "0.4982554", "text": "def render_nd(game, xray=False):\n dimension = game[\"dimensions\"]\n Gamecopy = make_board(dimension, None)\n for coordinate in possibleCoordinates(dimension):\n if not xray and not get_coordinate(game[\"mask\"], coordinate):\n set_coordinate(Gamecopy, coordinate, \"_\")\n elif get_coordinate(game[\"board\"], coordinate) == 0:\n set_coordinate(Gamecopy, coordinate, \" \")\n else: \n value = str(get_coordinate(game[\"board\"], coordinate))\n set_coordinate(Gamecopy, coordinate, value)\n return Gamecopy", "title": "" }, { "docid": "d718e016008eda542b3c2108a6f09264", "score": "0.49820656", "text": "def add_per_atom_info(natomsd, nfild):\n tpe = np.ones((natomsd), dtype = int)\n molid = np.ones((natomsd), dtype = int)\n for i in range(natomsd):\n molid[i] = i / nfild + 1\n return tpe, molid", "title": "" }, { "docid": "5adc2a41dec06a4427fd97ae5eaf562c", "score": "0.49792972", "text": "def markdown_row(self, ncol, which):\n if which == 'C':\n dat = self.C\n elif which == 'c':\n dat = self.d1\n elif which == 'f':\n dat = self.d2\n line = '|%d|' % (self.N*2)\n for i in range(1,self.N+1):\n line = line + ' $%s$ |' % (dat[i])\n for i in range(1,ncol - self.N+1):\n line = line + ' |'\n line = line + '\\n'\n return line", "title": "" }, { "docid": "46d4faed23a3759dc055f2adddd3f192", "score": "0.4977833", "text": "def gen(self, N):\n raise NotImplementedError('This is an abstract method and should be overriden.')", "title": "" }, { "docid": "bced160dd6fe599229f3892185153436", "score": "0.49770412", "text": "def draw(self, N):\n draws = []\n for j in range(self.scale.size):\n draws.append(\n self.scale.item(j)\n * (-np.log(1.0 - self.RNG.rand(N))) ** (1.0 / self.shape.item(j))\n )\n return draws[0] if len(draws) == 1 else draws", "title": "" }, { "docid": "7f5737c98d272ced1a5ed4bb9f3cd327", "score": "0.49755332", "text": "def _repr_(self):\n s='Space of automorphic forms on '+str(self._X)+' with values in '+str(self._U)\n return s", "title": "" }, { "docid": "2c48f917da537776ae4ed46c340848c6", "score": "0.49733922", "text": "def output(*args):", "title": "" }, { "docid": "2c48f917da537776ae4ed46c340848c6", "score": "0.49733922", "text": "def output(*args):", "title": "" }, { "docid": "cd52afc1a74627cb701f80b973389645", "score": "0.49701324", "text": "def BinaryDihedralPresentation(n):\n F = FreeGroup('x,y,z')\n x,y,z = F.gens()\n rls = (x**-2 * y**2, x**-2 * z**n, x**-2 * x*y*z)\n return FinitelyPresentedGroup(F, rls)", "title": "" }, { "docid": "d69c8fe58cae14a2cee1c2241a7b75c4", "score": "0.49686903", "text": "def natpattern(n):\n return (\"%0\" + repr(int(np.ceil(np.log10(n + 1)))) + \"d\")", "title": "" }, { "docid": "09917302e49dd5cb812e44249aed3dc1", "score": "0.49591744", "text": "def __str__(self):\n ran = range(self.size)\n return '\\n'.join(' '.join(self.grid[x,y] for x in ran) for y in ran)", "title": "" }, { "docid": "071f1449c7d8e8aa4215706be8aa9efc", "score": "0.49574217", "text": "def IdTotal(n):\n P = np.ones((1,n), dtype=int)\n return AugmentedIdentity(P)", "title": "" }, { "docid": "d964b4ba5b96684a13f64d11caf0c1d3", "score": "0.49445045", "text": "def get_output(disqualify)->str:\n\n \n output=\"\"\n \n for row in disqualify:\n s=\" \".join(map(str,row))\n output=\"\\n\".join((output,s))\n \n return output", "title": "" }, { "docid": "834b1a430047cc1a50f49df7804b6e16", "score": "0.49409932", "text": "def multable(n):\r\n # create 'table' as a matrix of 3-character strings\r\n table = [[f\"{p*q:3}\" for q in range(1, n+1)] for p in range(1, n+1)]\r\n # join each line from 'table' into a single string\r\n lines = [' '.join(line) for line in table]; #inspect()\r\n return '\\n'.join(lines) # join all lines into a multi-line string\r", "title": "" }, { "docid": "2ceb3e28f80b243e23f8f5775c8d0962", "score": "0.49399197", "text": "def exo4():\n ntests = 50\n Tlist = linspace(.1, 3, ntests)\n err = []\n for i in 1: ntests:\n T = Tlist(i)\n % decoding\n pvertexI = floor(abs(pvertex/ T)).*sign(pvertex)\n pvertexQ = sign(pvertexI) .* (abs(pvertexI) + .5) * T\n vertex1 = pvertexQ*U'\n % entropic\n t = min(pvertexI(: )): max(pvertexI(: ))\n h = hist(pvertexI(: ), t)\n h = max(h, 1e-10); h = h/ sum(h)\n E = -sum(log2(h).*h)\n % recode\n nbits(i) = 3*E\n err(i) = snr(vertex, vertex1)\n plot(nbits, err); axis('tight')\n set_label('nb.bits', 'SNR')", "title": "" }, { "docid": "813e44fc45320588391977eb078b88ec", "score": "0.4938792", "text": "def cdf_output_2D(self,output_path,filehead='fluctuation'):\n file_start = output_path + filehead\n for i in range(self.n_cross_section):\n for j in range(len(self.time_steps)):\n\n fname = file_start + str(self.time_steps[j])+'_'+str(i) + '.cdf'\n f = nc.netcdf_file(fname,'w')\n f.createDimension('z_dim',self.grid.NZ)\n f.createDimension('r_dim',self.grid.NR)\n\n rr = f.createVariable('rr','d',('r_dim',))\n rr[:] = self.grid.R1D[:]\n zz = f.createVariable('zz','d',('z_dim',))\n zz[:] = self.grid.Z1D[:]\n rr.units = zz.units = 'Meter'\n\n bb = f.createVariable('bb','d',('z_dim','r_dim'))\n bb[:,:] = self.B_on_grid[:,:]\n bb.units = 'Tesla'\n\n dne = f.createVariable('dne','d',('z_dim','r_dim'))\n dne[:,:] = self.dne_ad_on_grid[i,j,:,:] + self.nane_on_grid[i,j,:,:]\n dne.units = 'per cubic meter'\n\n ne = f.createVariable('ne','d',('z_dim','r_dim'))\n ne[:,:] = self.ne0_on_grid[:,:] + dne[:,:]\n ne.units = 'per cubic meter'\n\n te = f.createVariable('te','d',('z_dim','r_dim'))\n te[:,:] = self.te_on_grid[:,:]/1000\n te.units = 'keV'\n\n ti = f.createVariable('ti','d',('z_dim','r_dim'))\n ti[:,:] = self.ti_on_grid[:,:]/1000\n ti.units = 'keV'\n\n f.close()", "title": "" }, { "docid": "d9550dd9757ef406ac60fe69473ec521", "score": "0.4938056", "text": "def anal_savemols(selection,nsnap,crd) :\n for residue in selection.residues () :\n crd[\"frame\"] += 1\n crd[\"file\"].write(\"%d\\n%d:%d\\n\"%(len(residue),mdcrd[\"frame\"],nsnap))\n for atom in residue :\n crd[\"file\"].write(\"%s %.3f %.3f %.3f\\n\"%(atom.name,atom.position[0],atom.position[1],atom.position[2]))", "title": "" }, { "docid": "c30715332a57b497cf663f3b3f05c1f3", "score": "0.49375907", "text": "def generate_output(self):\n\n for phrase, phrase_cnt in self.phrase_db.items():\n target, foreign = phrase.split(PHRASE_SEP)\n target_cnt = self.target_db[target]\n\n print self.format(target, foreign, float(phrase_cnt) / float(target_cnt))", "title": "" }, { "docid": "3921e32dc8661b3ba2b328e709d911ab", "score": "0.493428", "text": "def __repr__(self):\n #x = self.write_as_plot3d()\n #self.log.debug(\"*******\")\n points = ''\n header = self.get_header()\n\n #nfull_lines = nm // 2\n #npartial_lines = nm % 2\n #nlines = nfull_lines + npartial_lines\n\n nfull_lines = self.nrows // 2\n npartial_lines = self.nrows % 2\n #nlines = nfull_lines + npartial_lines\n\n for c in range(self.ncols):\n npoints_left = nfull_lines * 2 + npartial_lines\n for r in range(0, self.nrows, 2):\n if npoints_left > 1:\n x1, y1, z1 = self.xyz[r, c, :]\n x2, y2, z2 = self.xyz[r + 1, c, :]\n points += self.write_points([x1, y1, z1], [x2, y2, z2])\n else:\n x1, y1, z1 = self.xyz[r, c, :]\n points += self.write_point([x1, y1, z1])\n npoints_left -= 2\n return header + points", "title": "" }, { "docid": "c88fd82f3729c9ac10868701cc8d720a", "score": "0.4922759", "text": "def show(self):\n\n #finds every element and stores it in order\n elements = [[0 for i in range(self.n)] for j in range(self.n)]\n for i in range(self.n * self.n):\n elements[self.array[0,i]][self.array[1,i]] = self.array[2,i]\n\n #prints the table\n for i in range(self.n):\n line = \"\"\n for j in range(self.n):\n line += str(elements[i][j])\n if j != self.n - 1:\n line += \"|\"\n print(line)\n print()", "title": "" }, { "docid": "ae29e1cac5c7977e114ef16e29c4d8e0", "score": "0.49214783", "text": "def while_D():\r\n row =0\r\n while row<9:\r\n col =0\r\n while col <6:\r\n if col ==0 or row %8 == 0 and col!=5 or col ==5 and row %8 !=0:\r\n print('*',end=' ')\r\n else:\r\n print(' ',end=' ')\r\n col +=1\r\n print()\r\n row +=1", "title": "" }, { "docid": "67a434dca1fface7758903b6f603431c", "score": "0.49209973", "text": "def projectCRD(self,crdobj,outf,nq=None):\n if not nq: nq=self.nq\n pt=None\n if isthere(outf): #check outf is new file\n sys.stderr.write('file %s exists: overwrite?:(y/n)'%(outf,))\n if sys.stdin.readline()[0].lower()!='y':\n sys.stderr.write('OK. Nothing to do!\\n')\n return False\n pt=open(outf,'a') #open for append\n if not self.qs.any(): self.normalize_modes()\n iframe=0; buf=''; bufmaxsize=int(50E4)#50E6);#50MB\n buf='' \n while crdobj.loadframe():\n iframe+=1; buf+='#frame %05d\\n'%(iframe,) #store number\n fr=crdobj.frame.ravel()\n for q in self.qs[0:nq]:\n buf+=' %6.2lf'%( (fr*q.n.ravel()).sum(),) #scalar product\n buf+='\\n'\n if len(buf)>bufmaxsize: #flush buffer\n pt.write(buf); buf=''\n if buf: pt.write(buf)\n return True", "title": "" }, { "docid": "771c5023b00d7be7f511ae15508e9fa1", "score": "0.49203938", "text": "def output(self):\n self.numList.reverse()\n def lengthFinder(columnNumber):\n currentLength=0\n longestLength=0\n for i in range(columnNumber, len(self.numList),5):\n currentLength=len(self.numList[i])\n if currentLength>longestLength:\n longestLength=currentLength\n return longestLength+1\n columnWidth=[]\n for i in range(5):\n columnWidth.append(lengthFinder(i))\n for i in range(len(self.numList)):\n print('{0:>{width}}'.format(self.numList[i], width=columnWidth[i%5]), end=' ')\n if i%5==4:\n print()\n print()", "title": "" }, { "docid": "f12e5412d22d29463b5c0103810c12e1", "score": "0.49159524", "text": "def compute_ndsv(self):\n fpath = os.path.join(self.dir, 'ndsv.tif')\n combinations = self.ndsv_labels\n profile = self.profile.copy()\n profile.update(count=len(combinations), dtype='float32')\n with rasterio.open(fpath, 'w', **profile) as dst:\n for v, (bi_label, bj_label) in enumerate(combinations):\n bi = getattr(self, bi_label).astype('float32')\n bj = getattr(self, bj_label).astype('float32')\n dst.write(calc_ndi(bi, bj), v+1)\n return fpath", "title": "" }, { "docid": "ad3909fd0ad1955b93a16cec1a0ad736", "score": "0.4905101", "text": "def identity(D, one):\n return Mat((D,D), {(d,d):1 for d in D})", "title": "" }, { "docid": "ab1265a9cc4a1bf67f106837474349f9", "score": "0.49037835", "text": "def __repr__(self):\n s = self.n_samples_per_distrib\n d = self.n_distribs\n c = self.n_components\n return 'Dimension mapping ' + str(self.shape) + \\\n ' <-> ' + str((s, d, c))", "title": "" }, { "docid": "d494f9bb8aa6232f3fbe486adb8f5a40", "score": "0.49029955", "text": "def NI(image):\n return transpose(N(image))[::-1,...]", "title": "" }, { "docid": "66f262bf6961f42c662405acc236485a", "score": "0.49011576", "text": "def make_table():\n for x in range(20, 100, 20):\n for r in range(20, 100, 20):\n print(\"max_r=\", r, \"circle_count =\", str(x),\"avg_density\", sample(30, x, 1, r))", "title": "" }, { "docid": "3129b1ee2e1fe226c37c37f45e88a829", "score": "0.4900358", "text": "def draw(self, N):\n draws = self.RNG.multivariate_normal(self.mu, self.Sigma, N)\n\n return draws", "title": "" }, { "docid": "8ce160cb020682d30220aa695051e0c6", "score": "0.48975816", "text": "def ex10_StarPattern():\n N = int(input())\n rightSpaceSize = 3\n leftSpaceSize = N * 2 - 2\n space, star = ' ', '*'\n\n for idx in range(1, N + 1):\n if idx is 1:\n print((space * leftSpaceSize) + star)\n leftSpaceSize -= 2\n elif idx is not N:\n print((space * leftSpaceSize) + star + (space * rightSpaceSize) + star)\n rightSpaceSize += 4\n leftSpaceSize -= 2\n else:\n lastRow = star\n for idx in range(0, (N * 2 - 2)):\n lastRow += (space + star)\n print(lastRow)", "title": "" }, { "docid": "47ab2035e28965faeff407d58adde4e8", "score": "0.489476", "text": "def __transform(self, n):\n return n+100000", "title": "" }, { "docid": "f625446a2d60b0fd4e3ca42fced8f46a", "score": "0.48945445", "text": "def a_r(output_size):\r\n\t\t\r\n\t\treturn np.random.sample(output_size)", "title": "" }, { "docid": "f53ab368932fb1170f91716d43e6a580", "score": "0.48920688", "text": "def num_07():\n frmt = \"\"\"\n :------------------------------------------------------------------\n {}\n :Input array........\n : - shape: {} ndim: {}\n {}\\n\n :Transposed array... swapping -> transpose(1, 0, 2)\n : - shape: {} ndim: {}\n {}\\n\n :Reshaped array.....\n : - (a.shape[1], a.shape[2]*a.shape[0])\n : - (3, (4*2)) = (3, 8)\n : - shape: {} ndim: {}\n {}\n :------------------------------------------------------------------\n \"\"\"\n np.set_printoptions(edgeitems=4,linewidth=80,precision=2,suppress=True,threshold=10)\n a = np.array([[[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9,10,11]],\n [[12, 13, 14, 15], \n [16, 17, 18, 19],\n [20, 21, 22, 23]]])\n b0 = a.transpose(1, 0, 2) # step 1\n b = b0.reshape(a.shape[1], a.shape[2]*a.shape[0])\n args = [num_07.__doc__, a.shape, a.ndim, a, b0.shape,\n b0.ndim, b0, b.shape, b.ndim, b]\n print(dedent(frmt).format(*args))\n out = []\n for i in range(len(b0)):\n delta = [[\"\\n \", \"__\"],\n [\"[[\", \"_[\"],\n [\"]]\", \"]_\"]]\n line = (np.array_str(b0[i]))\n for j in delta:\n line=line.replace(j[0], j[1])\n out.append(line)\n z = \"\".join([i+\"\\n\" for i in out]) \n return a, b0, b, out", "title": "" }, { "docid": "b6ae309c22dbb8af430fbdf8597cefe1", "score": "0.4890858", "text": "def drum_arbore(self, fisier_output):\r\n nod_c = self\r\n drum = [nod_c]\r\n while nod_c.parinte is not None:\r\n drum = [nod_c.parinte] + drum\r\n nod_c = nod_c.parinte\r\n # print(nod_c.parinte.miscare)\r\n fisier_output.write(\"Stare initiala :\")\r\n\r\n for miscare in drum:\r\n fisier_output.write(miscare.nod_graf.miscare)\r\n fisier_output.write(\"\\n\\n\")\r\n for nod in miscare.nod_graf.info:\r\n fisier_output.write(str(nod) + \"\\n\\n\")\r\n return drum", "title": "" }, { "docid": "ebdc1e7c88ec50e16ce5e565c0616227", "score": "0.48903528", "text": "def unpad_and_glue(preds, out_crops, L): \n \n distogram = torch.empty((32, L, L), dtype=torch.float32)\n k = int(np.sqrt(len(preds)))\n \n i0 = 0\n for i in range(k):\n j0 = 0\n for j in range(k):\n unpadded = unpad_crop(preds[i * k + j], out_crops[i * k + j])\n width, height = unpadded.shape[1:]\n distogram[:, i0:(i0 + width), j0:(j0 + height)] = torch.exp(unpadded) # log(softmax) -> softmax\n j0 += height\n i0 += width\n \n return distogram", "title": "" }, { "docid": "31eb563009fb471f88d551225cee24da", "score": "0.48825836", "text": "def __repr__( self ):\n\t\tdata = llg.get_data(self.index)\n\t\treturn '\\n'.join([', '.join(['%07.3f'%data[i*4+j] for j in range(4)]) for i in range(4)])", "title": "" } ]
f1a1023b98b579d7bcc140258bce727d
Loads this image with pixel data from a bytes object.
[ { "docid": "327f6fd607f27d8f5dc9d3d41aa91e08", "score": "0.6282929", "text": "def frombytes(self, data, decoder_name=\"raw\", *args):\r\n\r\n # may pass tuple instead of argument list\r\n if len(args) == 1 and isinstance(args[0], tuple):\r\n args = args[0]\r\n\r\n # default format\r\n if decoder_name == \"raw\" and args == ():\r\n args = self.mode\r\n\r\n # unpack data\r\n d = _getdecoder(self.mode, decoder_name, args)\r\n d.setimage(self.im)\r\n s = d.decode(data)\r\n\r\n if s[0] >= 0:\r\n raise ValueError(\"not enough image data\")\r\n if s[1] != 0:\r\n raise ValueError(\"cannot decode image data\")", "title": "" } ]
[ { "docid": "d880873652bb2fdae43df29047336b0a", "score": "0.73065776", "text": "def frombytes(self, buffer):\r\n return self.image.frombytes(buffer)", "title": "" }, { "docid": "3b219f784b91c5f63acf07f227e9bfbd", "score": "0.7045793", "text": "def loadPixels(self):\n n = self.width * self.height\n self.buf = self.img.get_image_data().get_data('BGRA', -self.width * 4)\n if npy:\n self.pixels = numpy.fromstring(self.buf, dtype=ctypes.c_uint)\n else:\n self.pixels = ctypes.cast(self.buf, ctypes.POINTER(ctypes.c_uint))", "title": "" }, { "docid": "f02ae370be8d5cacef5f23a10e87f368", "score": "0.69848955", "text": "def load_bytes(self, byte_data):\n self.byte_data = byte_data\n self.bytes_to_data()", "title": "" }, { "docid": "940553d2aa1f53014dcc11662d48a149", "score": "0.6307243", "text": "def load(self, filename):\n self.filename = filename\n\n ext = filename.split(os.path.extsep)[-1]\n if ext == 'raw':\n with open(filename) as f:\n d = f.read()\n\n # XXX this assumes pilatus 100K...\n self.image = None\n self.raw = np.fromstring(d, '>f').astype('int32').reshape((195,-1))\n pass\n\n else:\n self.image = Image.open(filename)\n self.raw = np.asarray(self.image)\n\n self.pixels = self.raw.copy()\n try:\n self.info = self.parse_description(self.image.tag.get(270))\n except:\n pass\n self.loaded = True", "title": "" }, { "docid": "531bf816da1140c7695ac0e10f6d0189", "score": "0.6111692", "text": "def __init__(self, bmp_data):\n\n self.magic_number = \"?P\"\n self.width = bmp_data.width\n self.height = bmp_data.height\n self.palette = bmp_data.palette\n self.image_data = self.rle_compress(bmp_data.image_data)", "title": "" }, { "docid": "9054f6f4c97fdd2c678b660e1466a098", "score": "0.60340214", "text": "def load_image(self, fil):\n if self.loadable:\n o_image = im.imread(fil)\n self.setData(imageMath.rgbaFormat(o_image))", "title": "" }, { "docid": "fe39f09035bfdf0b00b0b526126b6c15", "score": "0.5977972", "text": "def __init__(self, filepath, bytes=False):\n\n # IHDR values\n self.width = None\n self.height = None\n self.bit_depth = None\n self.colour_type = None\n self.interlace = None\n self.idat_decomp = None\n self.px_bytes = 3\n\n self.line_bytes = None\n\n if bytes:\n self.file = io.BytesIO(filepath)\n else:\n self.file = open(filepath, 'rb') # @type io.BufferedReader", "title": "" }, { "docid": "1cda707c83d1b1b4bbb78c13bb42dc30", "score": "0.59076715", "text": "def load_imdata(self):\n if isinstance(self.source, str):\n return io.imread(self.source)\n return self.source.copy()", "title": "" }, { "docid": "b84b407fc5089014a86fec7ff92350f3", "score": "0.5891995", "text": "def LoadFromBuffer(self, data):", "title": "" }, { "docid": "0b2f8478ee29bd29c8456c0cd72e139d", "score": "0.58396786", "text": "def blob(self, bytes_):\n self._blob = bytes_", "title": "" }, { "docid": "5bb1c1edf2ef35089f673de17ad8f09e", "score": "0.5801263", "text": "def __init__(self, path):\n self._IMAGE = _LIBPIXL.pixl_load_image(c_char_p(path.encode()))", "title": "" }, { "docid": "b4e577274439836d8c9fa71ac51c4f61", "score": "0.5738294", "text": "def readImageData(self , offset , size):\n file = open(self.__filename , \"rb\")\n file.seek(offset)\n data = file.read(size)\n file.close()\n return data", "title": "" }, { "docid": "1a055a002a7d3ddc81c1987113acfe96", "score": "0.5731233", "text": "def fromBinary(data):\n raise NotImplementedError('Override this method')", "title": "" }, { "docid": "dfa5884734ff3e148292f1f327a7c5d1", "score": "0.56709087", "text": "def image_data(self):\n pass", "title": "" }, { "docid": "856855710a74de88cbd4ae1c39975773", "score": "0.5664854", "text": "def pixel_data(self):\n\n try:\n pil_img = self._to_pil_image()\n if _HAS_NUMPY:\n return _np.asarray(pil_img)\n else:\n ret = _array.array('B')\n if self._channels == 1:\n ret.fromlist([z for z in pil_img.getdata()])\n else:\n ret.fromlist([z for i in pil_img.getdata() for z in i])\n return ret\n except ImportError:\n print(\"Install pillow to get the pixel_data property\")", "title": "" }, { "docid": "41c163eab18be40848a5f9e38dddd788", "score": "0.5649742", "text": "def __init__(self, *args):\n \n if len(args) == 1 and isinstance(args[0], pyglet.image.AbstractImage):\n # Wraps an AbstractImage\n self.img = args[0]\n elif len(args) in (2, 3):\n # Creates an ImageData from width, height and type\n if len(args) == 2:\n # default \n w, h = args\n format = ARGB\n else:\n w, h, format = args\n data = create_string_buffer(w * h * len(format))\n self.img = pyglet.image.ImageData(w, h, format, data.raw)\n else:\n assert (len(args) == 0)\n # Do an initial loading of the pixels[] array\n self.loadPixels()\n self.updatePixels()", "title": "" }, { "docid": "bfd9e90203368effd6e55d09817aa6f2", "score": "0.5633227", "text": "def frombytes(mode, size, data, decoder_name=\"raw\", *args):\r\n\r\n # may pass tuple instead of argument list\r\n if len(args) == 1 and isinstance(args[0], tuple):\r\n args = args[0]\r\n\r\n if decoder_name == \"raw\" and args == ():\r\n args = mode\r\n\r\n im = new(mode, size)\r\n im.frombytes(data, decoder_name, args)\r\n return im", "title": "" }, { "docid": "d98b19377b1c853a65a80931ed2bfb5f", "score": "0.56294703", "text": "def decode(cls, data):\n return imdecode(data)", "title": "" }, { "docid": "725bec1bfe3ff5ed431ce0696a1b88ce", "score": "0.5625944", "text": "def decode_data(self, encoded_data):\n # NOTE: This could vary depending on your data\n return Image.open(BytesIO(base64.b64decode(encoded_data)))", "title": "" }, { "docid": "bf5e9e30ab1e304c95836359ad32dfbd", "score": "0.5591227", "text": "def test_state_load_from_bytes(self):\n data = bytearray(self.data)\n\n self.assertEqual(\n state.State(data), state.State.load(data, self.offset))", "title": "" }, { "docid": "02e5224679961c8297c746da59302691", "score": "0.55813754", "text": "def load_image(input_file: Union[str, BytesIO]) -> Image:\n image = open_image(input_file)\n image.load()\n return image", "title": "" }, { "docid": "c3a146ea0aa3652335f24538b1dcf8cf", "score": "0.5580869", "text": "def from_bytes(cls, b):\n return cls(*struct.unpack(\"!IIIIHHbb\", b))", "title": "" }, { "docid": "5a12f5664b6daf9f8fd27ba6f40007f4", "score": "0.55780685", "text": "def load(cls, fname):\n with open(fname, 'rb') as img_handle:\n img = PILImage.open(img_handle)\n img_data = img.getdata()\n if img.mode.startswith('RGB'):\n pixels = [round(.299*p[0] + .587*p[1] + .114*p[2]) for p in img_data]\n elif img.mode == 'LA':\n pixels = [p[0] for p in img_data]\n elif img.mode == 'L':\n pixels = list(img_data)\n else:\n raise ValueError('Unsupported image mode: %r' % img.mode)\n w, h = img.size\n return cls(w, h, pixels)", "title": "" }, { "docid": "9abfea270769be22ee801cd7baa85f56", "score": "0.5537813", "text": "def unpack_bytes(self, ):\n\t\tpass", "title": "" }, { "docid": "b350ad471dad0f2f32bfd69797ee2623", "score": "0.5529657", "text": "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "b350ad471dad0f2f32bfd69797ee2623", "score": "0.5529657", "text": "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "b350ad471dad0f2f32bfd69797ee2623", "score": "0.5529657", "text": "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "b350ad471dad0f2f32bfd69797ee2623", "score": "0.5529657", "text": "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "b350ad471dad0f2f32bfd69797ee2623", "score": "0.5529657", "text": "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "b350ad471dad0f2f32bfd69797ee2623", "score": "0.5529657", "text": "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "b350ad471dad0f2f32bfd69797ee2623", "score": "0.5529657", "text": "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "1f5d4eb4201a6611532341c7b1aa72dd", "score": "0.55191153", "text": "def from_bytes(cls, b):\n\t\treturn cls(*struct.unpack(\"!IIIIHHbb\", b))", "title": "" }, { "docid": "acbf85357ebcda726d45287092d10b2a", "score": "0.54973197", "text": "def data(self, *args):\n return _osg.Image_data(self, *args)", "title": "" }, { "docid": "9945b24253c461bcd3b6a49a58beb74e", "score": "0.5493648", "text": "def _load_img(path):\n path = os.path.join(os.path.dirname(__file__), path)\n image = np.load(path)\n assert image.dtype == np.uint8\n assert image.shape == (64, 64, 3)\n return image", "title": "" }, { "docid": "55255d3f92cc6dbe9cdef0077fc96b1b", "score": "0.54888314", "text": "def load_imdata(self):\n if isinstance(self.source, np.ndarray):\n return self.source.copy()\n raise ValueError(\"Data must be a numpy array\")", "title": "" }, { "docid": "4fd31640a76a528e0657ff2318145ebd", "score": "0.5477911", "text": "def load_image(self, image_id) -> np.ndarray:\n info = self.image_info[image_id]\n fp = info[\"path\"]\n image = imread(fp)\n image = resizeAndPad(image, self.shape)\n # If grayscale. Convert to RGB for consistency.\n if len(image.shape) != 3 or image.shape[2] != 3:\n image = np.stack((image,) * 3, -1)\n return image", "title": "" }, { "docid": "358006f2e5ed072ff6546a04c4550904", "score": "0.5468296", "text": "def __init__(self, sz=0, filename=None):\n if sz > 0:\n self.bitmap = bytearray(sz)\n self.bitmap_sz = sz\n elif filename:\n self._read_file(filename)\n self.bitmap_sz = len(self.bitmap)\n else:\n raise Exception(\"Need sz or filename to initialize the bitmap\")\n self.num_bits = self.bitmap_sz*8\n\n self.hex_digits = self._count_hex_digits()", "title": "" }, { "docid": "3d5220c6674f3777977fcb85fe67368b", "score": "0.5463704", "text": "def _load_data(self, image_id):\n raise NotImplementedError()", "title": "" }, { "docid": "ec2ce284034bbcbc62541dda1295615e", "score": "0.54631823", "text": "def _read_raw_data(self, f):\n\n # is this image a type we can read?\n assert self._data_type in [\"h\", \"f\", \"B\", \"l\", \"b\", \"H\", \"I\", \"d\"]\n\n if self._data_type == \"f\":\n raw_data = read_float32(streambuf(f), self._image_num_elements)\n elif self._data_type == \"B\":\n raw_data = read_uint8(streambuf(f), self._image_num_elements)\n elif self._data_type == \"h\":\n raw_data = read_int16(streambuf(f), self._image_num_elements)\n elif self._data_type == \"H\":\n raw_data = read_uint16(streambuf(f), self._image_num_elements)\n elif self._data_type == \"l\":\n raw_data = read_int32(streambuf(f), self._image_num_elements)\n elif self._data_type == \"I\":\n raw_data = read_uint32(streambuf(f), self._image_num_elements)\n\n # no C++ reader for remaining types (should be unusual anyway)\n else:\n vals = struct.unpack(\n self._byteord + self._data_type * self._image_num_elements,\n f.read(self._data_size * self._image_num_elements),\n )\n if self._data_type == \"d\":\n raw_data = flex.double(vals)\n if self._data_type in [\"b\", \"?\"]:\n raw_data = flex.int(vals)\n\n raw_data.reshape(flex.grid(self._image_size[1], self._image_size[0]))\n return raw_data", "title": "" }, { "docid": "8d6e2981000580442eef4c6afc4e997c", "score": "0.54616404", "text": "def __init__(self, filename):\r\n BiffRecord.__init__(self)\r\n\r\n self.width, self.height, self.size, data = _process_bitmap(filename)\r\n # Write the IMDATA record to store the bitmap data\r\n cf = 0x09\r\n env = 0x01\r\n lcb = self.size\r\n self._rec_data = pack(\"<HHL\", cf, env, lcb) + data", "title": "" }, { "docid": "f3fcd5fdbea6feada94e29cfd5f692f4", "score": "0.5451367", "text": "def _readRGB(self, xSizeRGB, ySizeRGB):\n return self.fromfile(self.fid, count=xSizeRGB * ySizeRGB * 4,\n dtype='<u1').reshape(xSizeRGB, ySizeRGB, 4)", "title": "" }, { "docid": "22b55ac7c61967dd417ff32307d6febc", "score": "0.5450173", "text": "def pixbuf2image(self, pix):\n data = pix.get_pixels()\n w = pix.props.width\n h = pix.props.height\n stride = pix.props.rowstride\n mode = \"RGB\"\n if pix.props.has_alpha == True:\n mode = \"RGBA\"\n im = Image.frombytes(mode, (w, h), data, \"raw\", mode, stride)\n return im", "title": "" }, { "docid": "f2aa7edfd85d7a969590fd653b209120", "score": "0.544669", "text": "def image(self, image):\n if image.mode != '1':\n raise ValueError('Image must be in mode 1.')\n imwidth, imheight = image.size\n if imwidth != self.width or imheight != self.height:\n raise ValueError('Image must be same dimensions as display ({0}x{1}).' \\\n .format(self.width, self.height))\n\n # Grab all the pixels from the image, faster than getpixel.\n pix = image.load()\n # Iterate through the memory pages\n index = 0\n for page in range(self._pages):\n # Iterate through all x axis columns.\n for x in range(self.width):\n # Set the bits for the column of pixels at the current position.\n bits = 0\n # Don't use range here as it's a bit slow\n for bit in [0, 1, 2, 3, 4, 5, 6, 7]:\n bits = bits << 1\n bits |= 0 if pix[(x, page*8+7-bit)] == 0 else 1\n # Update buffer byte and increment to next byte.\n self._buffer[index] = bits\n index += 1", "title": "" }, { "docid": "1d2290ac4208502f334764ca70310ee2", "score": "0.5444834", "text": "def _convert_to_pillow_image(image_data: bytes) -> Image.Image:\n size = len(image_data)\n\n # Constuct numpy array out of source data\n array = np.empty(size, dtype=np.uint8)\n array[0:size] = list(image_data)\n\n # Decode compressed source data into uncompressed image data\n image = Image.open(io.BytesIO(array))\n return image", "title": "" }, { "docid": "d44d4f82b20efd17720f2cbe6766b1db", "score": "0.54440576", "text": "def load_bytes(self, data, encoding=None):\n self._encoded_data = data\n\n if encoding:\n self._encoding = self._validate_encoding(encoding)", "title": "" }, { "docid": "d438a179c033284a7aee4b0df95c5917", "score": "0.53900325", "text": "def getImg(self):\n offset = parse32le(self.data[68:72])\n length = parse32le(self.data[72:76])\n return memoryview(self.data)[offset:offset+length]", "title": "" }, { "docid": "e575f4bc47f14e8dee3320c875ba35de", "score": "0.5384897", "text": "def loadPixels():\n current = get()\n if npy:\n screen.pixels = numpy.array(current.pixels)\n return\n screen.pixels = (c_long * (width * height))()\n for i in range(width * height):\n screen.pixels[i] = current.pixels[i]", "title": "" }, { "docid": "8be4111173d45be5aa5d72e09d939577", "score": "0.538331", "text": "def loadb(bytes_: bytes,\n cls: Optional[type] = None,\n encoding: str = 'utf-8',\n jdkwargs: Optional[Dict[str, object]] = None,\n *args,\n **kwargs) -> object:\n if not isinstance(bytes_, bytes):\n raise DeserializationError('loadb accepts bytes only, \"{}\" was given'\n .format(type(bytes_)), bytes_, cls)\n jdkwargs = jdkwargs or {}\n str_ = bytes_.decode(encoding=encoding)\n return loads(str_, cls, jdkwargs=jdkwargs, *args, **kwargs)", "title": "" }, { "docid": "b8aeb8d60d45c7008705a67a10a8a4e5", "score": "0.53744775", "text": "def load_image(self, image_id):\n info = self.image_info[image_id]\n bg_color = np.array(info['bg_color']).reshape([1, 1, 3])\n image = np.ones([info['height'], info['width'], 3], dtype=np.uint8)\n image = image * bg_color.astype(np.uint8)\n for shape, color, dims in info['shapes']:\n image = self.draw_shape(image, shape, dims, color)\n return image", "title": "" }, { "docid": "b8aeb8d60d45c7008705a67a10a8a4e5", "score": "0.53744775", "text": "def load_image(self, image_id):\n info = self.image_info[image_id]\n bg_color = np.array(info['bg_color']).reshape([1, 1, 3])\n image = np.ones([info['height'], info['width'], 3], dtype=np.uint8)\n image = image * bg_color.astype(np.uint8)\n for shape, color, dims in info['shapes']:\n image = self.draw_shape(image, shape, dims, color)\n return image", "title": "" }, { "docid": "c7bbb79184bed66ba00bc7a78c35859e", "score": "0.53708166", "text": "def load_image(self, image_id):\n # Load image", "title": "" }, { "docid": "0247939bfa2d8a34450b219c0b1ea3ca", "score": "0.53637046", "text": "def load(self, resource_name, value):\n self._img.release()\n \n if (resource_name == 'File'):\n self._img = NSImage.alloc().initWithContentsOfFile_(str(value))", "title": "" }, { "docid": "0318ec40c3a7e03f729bc6e7f8c3681a", "score": "0.53577435", "text": "def _load_image(self, img_path: str):\n\n return Image.open(img_path)", "title": "" }, { "docid": "0c5420c6af6b18cf9033d267dfefe51f", "score": "0.53569216", "text": "def load_image(name):\n img = Image.open(name)\n img.load()\n\n return asarray(img, dtype=\"B\")", "title": "" }, { "docid": "21e930e186decfe26c20b45202d21e03", "score": "0.53558004", "text": "def load_image(self, image_index):\n return self.image_data[image_index].data", "title": "" }, { "docid": "efbf82d123834c7f1e7410f5752c609e", "score": "0.53418154", "text": "def from_bytes(payload):\n pass", "title": "" }, { "docid": "1303639a64da40ec68f70ed3f92f9cea", "score": "0.53404725", "text": "def load_image(self, image_path, key=-1):\n self.image, rect = image(image_path, key)\n # self.rect.width, self.rect.height = rect.width, rect.height", "title": "" }, { "docid": "c034e69464a1f9a033933c695e429d8a", "score": "0.53219837", "text": "def __init__(self, infile):\n\n aux = Auxiliary\n\n infile.seek(0, os.SEEK_SET)\n\n magic_number = infile.read(2)\n if magic_number != \"BM\":\n raise \"File is not a BMP\"\n\n self.file_size = aux.read_dword(infile, 2)\n self.image_offset = aux.read_dword(infile, 10)\n self.header_size = aux.read_dword(infile, 14)\n self.width = aux.read_dword(infile, 18)\n self.height = aux.read_dword(infile, 22)\n self.bpp = aux.read_word(infile, 28)\n self.image_size = aux.read_dword(infile, 34)\n self.palette = self.read_palette(infile)\n self.image_data = self.read_image(infile)", "title": "" }, { "docid": "8f7045bd25e435c5d0f73fb3265963bb", "score": "0.53126806", "text": "def from_blob(cls, blob):\r\n stream = BytesIO(blob)\r\n return cls._from_stream(stream, blob)", "title": "" }, { "docid": "7e64027f2c50ada206342e0fc6a4e844", "score": "0.53109777", "text": "def load(self, load):\n assert(len(load) >= 2)\n length = struct.unpack('<H', load[:2])[0]\n self.text = load[2:][:length]\n assert(len(self.text) == length)", "title": "" }, { "docid": "8641972c1761b56b59ea5e6021bb0a6a", "score": "0.53101194", "text": "def _load_libtiff(self):\n\n pixel = Image.Image.load(self)\n\n if self.tile is None:\n raise IOError(\"cannot load this image\")\n if not self.tile:\n return pixel\n\n self.load_prepare()\n\n if not len(self.tile) == 1:\n raise IOError(\"Not exactly one tile\")\n\n # (self._compression, (extents tuple),\n # 0, (rawmode, self._compression, fp))\n extents = self.tile[0][1]\n args = self.tile[0][3] + (self.ifd.offset,)\n decoder = Image._getdecoder(self.mode, 'libtiff', args,\n self.decoderconfig)\n try:\n decoder.setimage(self.im, extents)\n except ValueError:\n raise IOError(\"Couldn't set the image\")\n\n if hasattr(self.fp, \"getvalue\"):\n # We've got a stringio like thing passed in. Yay for all in memory.\n # The decoder needs the entire file in one shot, so there's not\n # a lot we can do here other than give it the entire file.\n # unless we could do something like get the address of the\n # underlying string for stringio.\n #\n # Rearranging for supporting byteio items, since they have a fileno\n # that returns an IOError if there's no underlying fp. Easier to\n # deal with here by reordering.\n if Image.DEBUG:\n print(\"have getvalue. just sending in a string from getvalue\")\n n, err = decoder.decode(self.fp.getvalue())\n elif hasattr(self.fp, \"fileno\"):\n # we've got a actual file on disk, pass in the fp.\n if Image.DEBUG:\n print(\"have fileno, calling fileno version of the decoder.\")\n self.fp.seek(0)\n # 4 bytes, otherwise the trace might error out\n n, err = decoder.decode(b\"fpfp\")\n else:\n # we have something else.\n if Image.DEBUG:\n print(\"don't have fileno or getvalue. just reading\")\n # UNDONE -- so much for that buffer size thing.\n n, err = decoder.decode(self.fp.read())\n\n self.tile = []\n self.readonly = 0\n # libtiff closed the fp in a, we need to close self.fp, if possible\n if hasattr(self.fp, 'close'):\n if not self.__next:\n self.fp.close()\n self.fp = None # might be shared\n\n if err < 0:\n raise IOError(err)\n\n self.load_end()\n\n return Image.Image.load(self)", "title": "" }, { "docid": "559e2cec95c37e460635087cc6418c10", "score": "0.529858", "text": "def deserialize(self, str):\n try:\n end = 0\n _x = self\n start = end\n end += 52\n (_x.confidence, _x.area, _x.x, _x.y, _x.z, _x.pixel_x, _x.pixel_y,) = _get_struct_dI5d().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "title": "" }, { "docid": "55c872b9da84404376ed8d72e17c0433", "score": "0.5289556", "text": "def frombuffer(mode, size, data, decoder_name=\"raw\", *args):\r\n \"Load image from bytes or buffer\"\r\n\r\n # may pass tuple instead of argument list\r\n if len(args) == 1 and isinstance(args[0], tuple):\r\n args = args[0]\r\n\r\n if decoder_name == \"raw\":\r\n if args == ():\r\n if warnings:\r\n warnings.warn(\r\n \"the frombuffer defaults may change in a future release; \"\r\n \"for portability, change the call to read:\\n\"\r\n \" frombuffer(mode, size, data, 'raw', mode, 0, 1)\",\r\n RuntimeWarning, stacklevel=2\r\n )\r\n args = mode, 0, -1 # may change to (mode, 0, 1) post-1.1.6\r\n if args[0] in _MAPMODES:\r\n im = new(mode, (1, 1))\r\n im = im._new(\r\n core.map_buffer(data, size, decoder_name, None, 0, args)\r\n )\r\n im.readonly = 1\r\n return im\r\n\r\n return frombytes(mode, size, data, decoder_name, args)", "title": "" }, { "docid": "8f85388a65e15c2c908639595de8c1c8", "score": "0.5282472", "text": "def load_image(self, image_id):\r\n info = self.image_info[image_id]\r\n img = io.imread(info[\"path\"])\r\n if len(img.shape) > 2:\r\n img = img[:,:,:3]\r\n else:\r\n img = np.repeat(img[:,:,None], 3, axis=2)\r\n\r\n # upscaling\r\n height = img.shape[0]\r\n width = img.shape[1]\r\n img = transform.resize(img, (height * 2, width * 2))\r\n img = (img*255).astype('uint8')\r\n\r\n return img", "title": "" }, { "docid": "8b33e9105b82fc2504b3d750ca799348", "score": "0.52790457", "text": "def decode_image(bytes_string):\n img = cv2.imdecode(np.frombuffer(bytes_string, np.uint8),-1)\n img = img[:,:,-1]\n return img", "title": "" }, { "docid": "c9312abccdbd3e70be623240b7843244", "score": "0.52774173", "text": "def load(self, resource_name, value):\r\n self._img.release()\r\n\r\n if (resource_name == 'File'):\r\n self._img = NSImage.alloc().initWithContentsOfFile_(str(value))", "title": "" }, { "docid": "20b758d64ea9b191ea301e89411f6325", "score": "0.526731", "text": "def LoadBitmap(self, name):", "title": "" }, { "docid": "ab43d7031862682a1efdd36940c15a89", "score": "0.5263327", "text": "def imdecode(self, s):\n img = mx.image.imdecode(s) # mx.ndarray\n return img", "title": "" }, { "docid": "e5f49c95ac57adf09fad2263d8954190", "score": "0.5246894", "text": "def from_bytes(cls, bytes):\n samples, sample_rate = soundfile.read(\n io.BytesIO(bytes), dtype='float32')\n return cls(samples, sample_rate)", "title": "" }, { "docid": "51bc750002e74d5bccb54262ae302f63", "score": "0.52371377", "text": "def __init__(self, image):\n self.array = np.asarray(image)", "title": "" }, { "docid": "90e96d5399d9a68e946e044d501b2481", "score": "0.523615", "text": "def _load_libtiff(self):\r\n\r\n pixel = Image.Image.load(self)\r\n\r\n if self.tile is None:\r\n raise IOError(\"cannot load this image\")\r\n if not self.tile:\r\n return pixel\r\n\r\n self.load_prepare()\r\n\r\n if not len(self.tile) == 1:\r\n raise IOError(\"Not exactly one tile\")\r\n\r\n # (self._compression, (extents tuple), 0, (rawmode, self._compression, fp))\r\n ignored, extents, ignored_2, args = self.tile[0]\r\n decoder = Image._getdecoder(self.mode, 'libtiff', args, self.decoderconfig)\r\n try:\r\n decoder.setimage(self.im, extents)\r\n except ValueError:\r\n raise IOError(\"Couldn't set the image\")\r\n\r\n if hasattr(self.fp, \"getvalue\"):\r\n # We've got a stringio like thing passed in. Yay for all in memory.\r\n # The decoder needs the entire file in one shot, so there's not\r\n # a lot we can do here other than give it the entire file.\r\n # unless we could do something like get the address of the underlying\r\n # string for stringio.\r\n #\r\n # Rearranging for supporting byteio items, since they have a fileno\r\n # that returns an IOError if there's no underlying fp. Easier to deal\r\n # with here by reordering.\r\n if Image.DEBUG:\r\n print (\"have getvalue. just sending in a string from getvalue\")\r\n n,err = decoder.decode(self.fp.getvalue())\r\n elif hasattr(self.fp, \"fileno\"):\r\n # we've got a actual file on disk, pass in the fp.\r\n if Image.DEBUG:\r\n print (\"have fileno, calling fileno version of the decoder.\")\r\n self.fp.seek(0)\r\n n,err = decoder.decode(b\"fpfp\") # 4 bytes, otherwise the trace might error out\r\n else:\r\n # we have something else.\r\n if Image.DEBUG:\r\n print (\"don't have fileno or getvalue. just reading\")\r\n # UNDONE -- so much for that buffer size thing.\r\n n,err = decoder.decode(self.fp.read())\r\n\r\n\r\n self.tile = []\r\n self.readonly = 0\r\n # libtiff closed the fp in a, we need to close self.fp, if possible\r\n if hasattr(self.fp, 'close'):\r\n self.fp.close()\r\n self.fp = None # might be shared\r\n\r\n if err < 0:\r\n raise IOError(err)\r\n\r\n self.load_end()\r\n\r\n return Image.Image.load(self)", "title": "" }, { "docid": "b8a833e76af33e0361e90f5a8af859f3", "score": "0.5232845", "text": "def load_image(self, image_id):\n image = build_RS_data.read_image(self.image_info[image_id]['path'])\n\n # rasterio output (nband, height,width), but keras and tf require (height,width,nband)\n image = np.transpose(image, (1, 2, 0)) #\n\n # # Load image\n # image = skimage.io.imread(self.image_info[image_id]['path'])\n # # If grayscale. Convert to RGB for consistency.\n # if image.ndim != 3:\n # image = skimage.color.gray2rgb(image)\n # # If has an alpha channel, remove it for consistency\n # if image.shape[-1] == 4:\n # image = image[..., :3]\n return image", "title": "" }, { "docid": "65d065a54f74e6196229fa6c413b3cd1", "score": "0.52121246", "text": "def load(self):\r\n if self.im and self.palette and self.palette.dirty:\r\n # realize palette\r\n self.im.putpalette(*self.palette.getdata())\r\n self.palette.dirty = 0\r\n self.palette.mode = \"RGB\"\r\n self.palette.rawmode = None\r\n if \"transparency\" in self.info:\r\n if isinstance(self.info[\"transparency\"], int):\r\n self.im.putpalettealpha(self.info[\"transparency\"], 0)\r\n else:\r\n self.im.putpalettealphas(self.info[\"transparency\"])\r\n self.palette.mode = \"RGBA\"\r\n\r\n if self.im:\r\n if HAS_CFFI and USE_CFFI_ACCESS:\r\n if self.pyaccess:\r\n return self.pyaccess\r\n from PIL import PyAccess\r\n self.pyaccess = PyAccess.new(self, self.readonly)\r\n if self.pyaccess:\r\n return self.pyaccess\r\n return self.im.pixel_access(self.readonly)", "title": "" }, { "docid": "b9a98a5bfbf985eeee3736b9d69eafe5", "score": "0.5210892", "text": "def _pixel_data(image):\n # Test for PIL.Image, numpy.ndarray, and imageio.core.util without\n # requiring that cv2, PIL, or imageio are installed.\n\n image_type = str(type(image))\n if 'PIL.' in image_type:\n if 'L' != image.mode:\n image = image.convert('L')\n pixels = image.tobytes()\n width, height = image.size\n elif 'numpy.ndarray' in image_type or 'imageio.core.util' in image_type:\n # Different versions of imageio use a subclass of numpy.ndarray\n # called either imageio.core.util.Image or imageio.core.util.Array.\n if 3 == len(image.shape):\n # Take just the first channel\n image = image[:, :, 0]\n if 'uint8' != str(image.dtype):\n image = image.astype('uint8')\n try:\n pixels = image.tobytes()\n except AttributeError:\n # `numpy.ndarray.tobytes()` introduced in `numpy` 1.9.0 - use the\n # older `tostring` method.\n pixels = image.tostring()\n height, width = image.shape[:2]\n else:\n # image should be a tuple (pixels, width, height)\n pixels, width, height = image\n\n # Check dimensions\n if 0 != len(pixels) % (width * height):\n raise PyZbarError(\n (\n 'Inconsistent dimensions: image data of {0} bytes is not '\n 'divisible by (width x height = {1})'\n ).format(len(pixels), (width * height))\n )\n\n # Compute bits-per-pixel\n bpp = 8 * len(pixels) // (width * height)\n if 8 != bpp:\n raise PyZbarError(\n 'Unsupported bits-per-pixel [{0}]. Only [8] is supported.'.format(\n bpp\n )\n )\n\n return pixels, width, height", "title": "" }, { "docid": "04bbfd4b191103fef26d2425ccf53add", "score": "0.5208924", "text": "def from_protobuf(self, input_bytes):\n raise NotImplementedError", "title": "" }, { "docid": "44e96d4558f2c497b8261a059c487257", "score": "0.5206837", "text": "def decode(self):\n binzip = base64.b64decode(self.data)\n self.data = None\n binpack = zlib.decompress(binzip)\n del binzip\n _count = self.size.voxelCount\n bindata = struct.unpack(\"{}{}{}\".format(ENDIANNESS[self.endianness], _count, FORMAT_CHARS[self.mode]), binpack)\n del binpack\n self.data = numpy.array(bindata).reshape(*self.size.value[::-1])\n del bindata\n # self.data = numpy.frombuffer(zlib.decompress(binzip)).reshape(*self.size.value)\n # del binzip", "title": "" }, { "docid": "e13aa0fb177f825398e7408776a244a8", "score": "0.5197099", "text": "def __init__(self, image):\n self.image = image", "title": "" }, { "docid": "6a8e4270b8dbf8612683108207148a6f", "score": "0.51780266", "text": "def asImage(self, *args):\n return _osg.BufferData_asImage(self, *args)", "title": "" }, { "docid": "ffc2174687e7acabfb49617cf4597b80", "score": "0.51661783", "text": "def load(self):\n\n super(Bin, self).load()\n self._create_spaxels()", "title": "" }, { "docid": "09385964fbc794361175ead3d8ad35de", "score": "0.51626074", "text": "def from_bytes(cls, bytes_: bytes) -> Type:\n return cls(int.from_bytes(bytes_, 'big'))", "title": "" }, { "docid": "057e1dafa6a9cf3f2ae3fbe274eb6632", "score": "0.5162049", "text": "def import_bitmapr(\n cls,\n *,\n bitmap,\n values,\n nvals=None,\n nrows=None,\n ncols=None,\n is_iso=False,\n take_ownership=False,\n secure_import=False,\n dtype=None,\n format=None,\n name=None,\n **opts,\n ):\n return cls._import_bitmapr(\n bitmap=bitmap,\n values=values,\n nvals=nvals,\n nrows=nrows,\n ncols=ncols,\n is_iso=is_iso,\n take_ownership=take_ownership,\n secure_import=secure_import,\n dtype=dtype,\n format=format,\n name=name,\n method=\"import\",\n opts=opts,\n )", "title": "" }, { "docid": "0f888e88188bdb49384298990493845f", "score": "0.51600957", "text": "def readBinary(self,file):\n iulib.read_image_binary(self,file)", "title": "" }, { "docid": "4c7b8de27c571de078d1cd3a68a77fe5", "score": "0.5159917", "text": "def byte_array(self, value):\n if (value is None):\n raise TypeError(\"width of Image must be not None\")\n\n self.__byte_array = value", "title": "" }, { "docid": "414586e19fa95eddbf6365c913efa376", "score": "0.51576483", "text": "def load_image_file(file_path: str) -> np.ndarray:\n image_data = Image.open(file_path)\n return np.array(image_data).astype('uint8')", "title": "" }, { "docid": "c2e048e5e7bfb670206020648e163046", "score": "0.5147731", "text": "def __init__(self, *args, **kwargs):\n super(Byte, self).__init__(*args, generic=(Byte,), **kwargs)", "title": "" }, { "docid": "ce016ae8294f07dae592a2603915a6c8", "score": "0.5142915", "text": "def set_data_from_bytes(self, bytes_list: List[bytes]) -> None:\n if self.datatype.repeat > 0 and len(bytes_list) == 0:\n raise ValueError(\"Unable to set bytes when no bytes are given\")\n if self.datatype.repeat > 0 and self.datatype.repeat != len(bytes_list):\n raise ValueError(\"Unable to set bytes when bytes list isn't equal to repeat\")\n\n result = [parse_bytes_to_val(self, entry_bytes) for entry_bytes in bytes_list]\n self.data = bytes_parser.vorl(self, result)", "title": "" }, { "docid": "695c1540ede1e4b3a1210bc9d8f708f8", "score": "0.5134974", "text": "def setImage(self, *args):\n return _osg.PixelBufferObject_setImage(self, *args)", "title": "" }, { "docid": "8d5f1a90c4614d7eb8fc1541a41c207d", "score": "0.5134361", "text": "def read_32(fobj, start_length, size):\r\n (start, length) = start_length\r\n fobj.seek(start)\r\n pixel_size = (size[0] * size[2], size[1] * size[2])\r\n sizesq = pixel_size[0] * pixel_size[1]\r\n if length == sizesq * 3:\r\n # uncompressed (\"RGBRGBGB\")\r\n indata = fobj.read(length)\r\n im = Image.frombuffer(\"RGB\", pixel_size, indata, \"raw\", \"RGB\", 0, 1)\r\n else:\r\n # decode image\r\n im = Image.new(\"RGB\", pixel_size, None)\r\n for band_ix in range(3):\r\n data = []\r\n bytesleft = sizesq\r\n while bytesleft > 0:\r\n byte = fobj.read(1)\r\n if not byte:\r\n break\r\n byte = i8(byte)\r\n if byte & 0x80:\r\n blocksize = byte - 125\r\n byte = fobj.read(1)\r\n for i in range(blocksize):\r\n data.append(byte)\r\n else:\r\n blocksize = byte + 1\r\n data.append(fobj.read(blocksize))\r\n bytesleft = bytesleft - blocksize\r\n if bytesleft <= 0:\r\n break\r\n if bytesleft != 0:\r\n raise SyntaxError(\r\n \"Error reading channel [%r left]\" % bytesleft\r\n )\r\n band = Image.frombuffer(\r\n \"L\", pixel_size, b\"\".join(data), \"raw\", \"L\", 0, 1\r\n )\r\n im.im.putband(band.im, band_ix)\r\n return {\"RGB\": im}", "title": "" }, { "docid": "9928c46430153cb18261ed12a0a4efa2", "score": "0.51308155", "text": "def readPixels(self, *args):\n return _osg.Image_readPixels(self, *args)", "title": "" }, { "docid": "0a69a433b7c4d0504448205e4d8f827f", "score": "0.51299834", "text": "def load_image(image_file):\n raise RuntimeError(\"method 'load_image' must be defined in the derived class\")", "title": "" }, { "docid": "1e316189f9c27aee6cefcc486f8825b4", "score": "0.51289606", "text": "def set_image(self, image):\n canvas = Image.new(\"P\", (self.cols, self.rows))\n canvas.paste(image, (self.offset_x, self.offset_y))\n self.buf = numpy.array(canvas, dtype=numpy.uint8).reshape((self.rows, self.cols))", "title": "" }, { "docid": "84584e7fe04df57c10dfae559c5e630f", "score": "0.5124216", "text": "def load_data(self, data):\n self.data = data\n self.raw_data = data\n return", "title": "" }, { "docid": "75665408d0b21953029f945ae3036001", "score": "0.5119351", "text": "def load_data(self) -> None:\n img.apply(self.label, self.item.get_icon(self.subKey, self.is_pre))", "title": "" }, { "docid": "81921ae8a70769137a1067f65f56385c", "score": "0.5113821", "text": "def load_image(in_image):\n img = Image.open(in_image)\n return img", "title": "" }, { "docid": "81921ae8a70769137a1067f65f56385c", "score": "0.5113821", "text": "def load_image(in_image):\n img = Image.open(in_image)\n return img", "title": "" }, { "docid": "369d05d837942a90ae99a4b31e71c649", "score": "0.511366", "text": "def image_from_stored_pixel(component_type, format, data):\n\n tex = Texture(\"\")\n tex.setup_1d_texture(1, component_type, format)\n tex.set_ram_image(data)\n\n img = PNMImage()\n assert tex.store(img)\n return img", "title": "" }, { "docid": "24a05a38c3eade73b3f728dc35868b9d", "score": "0.51104605", "text": "def test_read(self):\n obj = openimage(self.fn)\n\n self.assertEqual(obj.bytecode, numpy.uint16, msg=\"bytecode is OK\")\n self.assertEqual(9, obj.dim1, \"dim1\")\n self.assertEqual(11, obj.dim2, \"dim2\")\n self.assertTrue(numpy.allclose(obj.data, self.ary), \"data\")", "title": "" }, { "docid": "50f1e2c9a592e68f61a6b7f4a7d1561f", "score": "0.5107033", "text": "def load(url):\n response = requests.get(url)\n pil_image = Image.open(BytesIO(response.content)).convert(\"RGB\")\n # convert to BGR format\n image = np.array(pil_image)[:, :, [2, 1, 0]]\n return image", "title": "" }, { "docid": "f4fbadc3b7a646260862c95b2c4d96f8", "score": "0.5105873", "text": "def load(self, path):\n sample = Image.open(path)\n if sample.mode != \"RGB\":\n sample = sample.convert(\"RGB\")\n return sample", "title": "" }, { "docid": "4cc6fd445560960384131f94380f631c", "score": "0.51050985", "text": "def load_image(self, image_id):\n # Load image\n image = skimage.io.imread(self.image_info[image_id]['path'])\n # If grayscale. Convert to RGB for consistency.\n if image.ndim != 3:\n image = skimage.color.gray2rgb(image)\n # If has an alpha channel, remove it for consistency\n if image.shape[-1] == 4:\n image = image[..., :3]\n return image", "title": "" } ]
393f2b2481feb0157d5fe5b38f663725
Process incoming message stanzas. Be aware that this also includes MUC messages and error messages. It is usually a good idea to check the messages's type before processing or sending replies.
[ { "docid": "370a931f0d44cbd3dd5d54f8f744fa1d", "score": "0.0", "text": "def guest_message(self, msg):\n self.log.debug('guest message: {}'.format(msg['body']))\n\n if msg['type'] in ('chat', 'normal') and msg['body']:\n reply = self.process_command(msg)\n if reply:\n msg.reply(reply).send()", "title": "" } ]
[ { "docid": "1a797e23c1af7b815abde05d5b690c17", "score": "0.72993636", "text": "def _process(self):\n logger.info('Received message %s, a %s message: %r',\n self.message_id, self.message_type, self.message_body)\n\n # Since the example messages publish vary in type, redefine the\n # content-type to JSON so it's auto-encoded on the way out\n properties = self.properties\n properties.content_type = 'application/json'\n properties.type = 'Response message'\n\n # Reply to the message using the message reply_to value in properties\n self._reply({'processed': True}, properties)", "title": "" }, { "docid": "976ae2ba22ad3d8c766101412164bfd1", "score": "0.7125415", "text": "def _process_message(self, message_json):\n from eikon.streaming_session.session import Session\n\n if self._session.is_closing():\n return\n\n message_type = message_json['Type']\n _id = message_json.get(\"ID\")\n if _id == self._ws_login_id:\n self.log(Session.TRACE, f\"Receive message for login {_id}: {message_json}\")\n else:\n self.log(Session.TRACE, f\"Receive message for stream {_id}: {message_json}\")\n\n if message_type == \"Refresh\":\n if 'Domain' in message_json:\n message_domain = message_json['Domain']\n if message_domain == \"Login\":\n self._process_login_response(message_json)\n return\n self._process_refresh_message(message_json)\n elif message_type == 'Update':\n self._process_update_message(message_json)\n elif message_type == 'Status':\n if 'Domain' in message_json:\n message_domain = message_json['Domain']\n if message_domain == \"Login\":\n self._process_login_response(message_json)\n return\n self._process_status_message(message_json)\n elif message_type == 'Error':\n self._process_error_message(message_json)\n elif message_type == \"Ping\":\n self.log(logging.INFO, 'Receive ping from server ...')\n pong_json = {'Type': 'Pong'}\n self.send(pong_json)\n self.log(logging.INFO, ' ... send pong response')", "title": "" }, { "docid": "891559260bf449a0ac4f24d1a8bdcd06", "score": "0.682861", "text": "def handle_stanza(self, zmq_messages):\n\n # Handle all pulled ZMQ messages\n for zmq_message in zmq_messages:\n message = cPickle.loads(zmq_message.bytes)\n\n if isinstance(message, ZMQForwarder_message):\n self.handle_forwarder_message(message)\n else:\n self.route_stanza(message, zmq_message.bytes)", "title": "" }, { "docid": "c2e3859cd060d47be55e233306ea4e3a", "score": "0.67864347", "text": "def _parse_messages(self, messages):\n state_message = None\n num_state_messages = 0\n for message in messages:\n log(f'Environment received message {message}', self._verbose, level=3)\n message_type = message[MessageAttributes.TYPE]\n if message_type == IncomingMessageTypes.STATE:\n num_state_messages += 1\n state_message = message\n\n else:\n handling_method = getattr(self, '_parse_' + message_type)\n handling_method(message)\n\n if state_message is not None:\n if num_state_messages > 1:\n log('Warning!!!!! Received multiple state messages - consider sending state less frequent.',\n self._verbose, level=2)\n\n self._parse_states(state_message)", "title": "" }, { "docid": "60475218a52b35e8bedf1329605c993c", "score": "0.66942656", "text": "def handle_message(self, message):\n if not message[\"body\"]:\n return\n\n self.run_handler(EVENT_MESSAGE, message, None)\n\n if message[\"type\"] == \"groupchat\":\n # discard muc messages here.\n return\n\n body = message[\"body\"].split()\n\n command_n = body[0]\n arguments = body[1:]\n\n command = getattr(self, \"cmd_%s\" % command_n, None)\n\n if command:\n self.log.info(\"chat command %s %r\" % (command_n, arguments))\n result = command(message, arguments)\n if result is not None:\n if isinstance(result, list):\n for r in result:\n self.reply(message, r)\n elif isinstance(result, basestring):\n self.reply(message, result)", "title": "" }, { "docid": "db4be3b6c56f6260b84675b8864e1e0d", "score": "0.66183877", "text": "def _handle_message(self, msg, stats):\n try:\n self._handle_message_by_type(msg[0], msg, stats)\n except Exception:\n self.logger.exception(\"Error while handling a message\")\n self.logger.debug(\"Message caused the error %s\", str(msg))", "title": "" }, { "docid": "47169ef4b1abe36cd09287d131b863c9", "score": "0.6596102", "text": "def process_queue( self ):\n # 0. For each message in the queue\n while len( self.MSG_Queue ):\n # 1. Fetch message\n msg = self.MSG_Queue.popleft()\n # 2. Try to deliver the message to each of the subscribers of this type\n try:\n subs = self.MSG_subscribers[ msg.type ]\n for sub in subs:\n # NOTE: Every subscriber MUST have a `receive` method! (Would have an interface for more complex app)\n sub.receive( msg )\n except KeyError:\n print( \"MessageBus.process_queue: Message type\" , msg.type , \"has no subscribers!\\nDropped:\" , msg.msg )", "title": "" }, { "docid": "e1a61a5b0448b2d9e76fba07fbc9217f", "score": "0.6581126", "text": "def process_messages(self) -> None:\n\n try:\n while True:\n message = self.get_message()\n self._publish(message)\n logger.debug(\n f\"Published Pika events to exchange '{RABBITMQ_EXCHANGE}' on host \"\n f\"'{self.parameters.host}':\\n{message[0]}\"\n )\n except EOFError:\n # Will most likely happen when shutting down Rasa X.\n logger.debug(\n \"Pika message queue of worker was closed. Stopping to listen for more \"\n \"messages on this worker.\"\n )", "title": "" }, { "docid": "2aa1d5116179e742c9036eb16aff5b7b", "score": "0.65339684", "text": "def handleProcessed(self, message):\r\n\r\n # Remove message from list of sent messages.\r\n self.sent_messages.remove(message)\r\n\r\n # Disconnect messages processed signal.\r\n message.processed.disconnect(self.handleProcessed)\r\n \r\n # Call message finalizer.\r\n message.finalize()\r\n\r\n # Always exit on exceptions in strict mode.\r\n if self.strict and message.hasErrors():\r\n for m_error in message.getErrors():\r\n if m_error.hasException():\r\n m_error.printException()\r\n self.cleanUp()\r\n return\r\n\r\n # Notify the sender if errors occured while processing the\r\n # message and exit if the sender doesn't handle the error.\r\n if message.hasErrors():\r\n if not message.getSource().handleErrors(sent_message):\r\n self.cleanUp()\r\n return\r\n\r\n # Check the responses if we are in strict mode.\r\n if self.strict:\r\n validator = halMessage.valid_messages[message.m_type].get(\"resp\")\r\n for response in message.getResponses():\r\n halMessage.validateResponse(validator, message, response)\r\n\r\n # Notify the sender of any responses to the message.\r\n message.getSource().handleResponses(message)\r\n\r\n # Print a warning if the message was 'get functionality'\r\n # and there were no responses.\r\n if message.isType(\"get functionality\") and not message.hasResponses():\r\n print(\">> Warning functionality '\" + message.getData()[\"name\"] + \"' not found!\")\r\n hdebug.logText(\"no functionality \" + message.getData()[\"name\"])\r\n\r\n # Start message processing timer in case there are other messages\r\n # waiting for this message to get finalized.\r\n self.startMessageTimer()", "title": "" }, { "docid": "2f2cdf0be80a0d68bd94d220cdc55c27", "score": "0.6443218", "text": "def handle_message(self):\n\n # TODO write handle functions\n distribute = {\n \"broken\": handle_broken,\n \"cost\": handle_cost,\n \"profit\": handle_profit\n }\n\n while not self.signals[\"shutdown\"]:\n time.sleep(0.1)\n\n if len(self.message_recv_queue):\n message = self.message_recv_queue.pop()\n msg_type = message[\"message\"]\n\n distribute[msg_type](self, msg_type)", "title": "" }, { "docid": "1297dea6340849fc32b0340443f8b746", "score": "0.6438555", "text": "def message_handler():\n msg = network.pop_message()\n while msg is not None:\n if msg.subject[:5] == 'start':\n # split the message into list of JID's\n players = msg.body.split('::')\n for player in players:\n if player != '':\n # removing resource part of the jid for clean names without resource identifier\n name = player.split('/')[0]\n new_player = Player(name=name)\n service.player_list.append(new_player)\n service.started = True\n\n if msg.subject[:12] == 'round_result':\n # split the message into a dict of attacks\n attacks = msg.body.split('::')\n attack_dict = {}\n # TODO counter starts at one since first entry will be empty\n counter = 1\n # recreate the attack dictionary from the server\n while counter < len(attacks) - 1:\n attack_dict[attacks[counter]] = attacks[counter+1]\n counter = counter + 1\n service.previous_round = attack_dict\n\n if msg.subject[:9] == 'new_round':\n service.clicked_player = None\n service.timestamp = time()\n\n # go to next message\n msg = network.pop_message()", "title": "" }, { "docid": "1bb4f23ce70086ab6ca1d0dc674ddeb3", "score": "0.6423268", "text": "def processIncoming(self):\n while self.queue.qsize( ):\n try:\n msg = self.queue.get(0)\n # Check contents of message and do whatever is needed. As a\n # simple test, print it (in real life, you would\n # suitably update the GUI's display in a richer fashion).\n print msg\n except Queue.Empty:\n # just on general principles, although we don't\n # expect this branch to be taken in this case\n pass", "title": "" }, { "docid": "35f69cf2df3a20f07ec5be72ed28c94b", "score": "0.6398006", "text": "def _process_messages(self, message):\n if message.is_text():\n if message.body.startswith(GD_NAMES):\n message = self.message_re.match(message.body)\n if message:\n plugin = message.group('plugin')\n action = message.group('action')\n data = message.group('data')\n\n try:\n self.active_plugins[plugin].run(action, data)\n except KeyError:\n self.logging.warning(\n 'Unable to find requested plugin ' + plugin)", "title": "" }, { "docid": "2d6d90d764cd82b0740e673ed18ea189", "score": "0.6331528", "text": "def process_message(self, message):\n body, message_type = self.route(message)\n if message['type'] != 'private':\n message['type'] = message_type\n return self.create_message_from_message(message, body)", "title": "" }, { "docid": "e8bf747437535d9bbd2e4bf29c1f792a", "score": "0.6315551", "text": "def handle_request(self, msg):\n client_addr, _, message = msg\n self.msgs_recv += 1\n\n try:\n unpacked = msgpack.unpackb(message, encoding='utf-8')\n except msgpack.exceptions.UnpackValueError:\n self._logger.error('Failed to unpack', exc_info=True)\n IOLoop.instance().stop()\n\n processed = self.process(unpacked)\n packed = msgpack.packb(processed)\n\n try:\n self.reply(self.frontstream, client_addr, packed)\n except TypeError:\n self._logger.error('Encountered error', exc_info=True)", "title": "" }, { "docid": "e0f2a77b9eef76105e7202af4304e139", "score": "0.6313487", "text": "def processMessage(self, command: str, msg):\n\n _LOGGER.debug(f\"CONTROLLER: received {command} response: {msg}\")\n\n try:\n if command == \"SendQuery\":\n self.receivedQueryResult(msg[\"queryName\"], msg[\"answer\"])\n elif command == \"NotifyList\":\n self.receivedNotifyList(msg[\"objectList\"])\n elif command == \"WriteParamList\":\n self.receivedWriteParamList(msg[\"objectList\"][0][\"changes\"])\n elif command == \"SendParamList\":\n self.receivedSystemConfig(msg[\"objectList\"])\n else:\n _LOGGER.debug(f\"no handler for {command}\")\n except Exception as err:\n _LOGGER.error(f\"error {err} while processing {msg}\")\n # traceback.print_exc()", "title": "" }, { "docid": "d1197ab0b37ebda9cee807ce9cd78149", "score": "0.62928164", "text": "def process_task(self, body, message):\n\n logger.info(f'Received message with body={body}')\n\n unpacked_envelope = unpackb(body)\n type = unpacked_envelope.get('messageType')\n\n if type in [MessageTypes.SERVICE, MessageTypes.DELEGATION]:\n logger.error(f'Message Type {type} not implemented')\n\n if type in [MessageTypes.INVITE]:\n self.save_message(body)\n\n if type == MessageTypes.REGISTRATION:\n self.save_persona(body)\n self.save_message(body)\n\n if type in [MessageTypes.ASSERTION, MessageTypes.ATTESTATION]:\n self.save_message(body)\n\n message.ack()\n\n logger.info(f'Message processed')", "title": "" }, { "docid": "933510e67c7323fb1b86dfd88a1e124d", "score": "0.6262415", "text": "def handleSendMessage(self):\r\n # Process the next message.\r\n if (len(self.queued_messages) > 0):\r\n cur_message = self.queued_messages.popleft()\r\n \r\n #\r\n # If this message requested synchronization and there are\r\n # pending messages then push it back into the queue.\r\n #\r\n if cur_message.sync and (len(self.sent_messages) > 0):\r\n print(\"> waiting for the following to be processed:\")\r\n for message in self.sent_messages:\r\n text = \" '\" + message.m_type + \"' from \" + message.getSourceName() + \", \"\r\n text += str(message.getRefCount()) + \" module(s) have not responded yet.\"\r\n print(text)\r\n print(\"\")\r\n self.queued_messages.appendleft(cur_message)\r\n \r\n #\r\n # Otherwise process the message.\r\n #\r\n else:\r\n print(cur_message.source.module_name + \" '\" + cur_message.m_type + \"'\")\r\n\r\n # Check for \"closeEvent\" message from the main window.\r\n if cur_message.isType(\"close event\") and (cur_message.getSourceName() == \"hal\"):\r\n self.cleanUp()\r\n return\r\n\r\n else:\r\n # Check for \"sync\" message, these don't actually get sent.\r\n if cur_message.isType(\"sync\"):\r\n pass\r\n\r\n # Otherwise send the message.\r\n else:\r\n cur_message.logEvent(\"sent\")\r\n\r\n cur_message.processed.connect(self.handleProcessed)\r\n self.sent_messages.append(cur_message)\r\n for module in self.modules:\r\n cur_message.ref_count += 1\r\n module.handleMessage(cur_message)\r\n\r\n # Process any remaining messages with immediate timeout.\r\n if (len(self.queued_messages) > 0):\r\n self.startMessageTimer()", "title": "" }, { "docid": "6d56dc00540e375c60ed12f5f1459fa2", "score": "0.6251187", "text": "def processIncoming(self):\n while self.queue.qsize():\n try:\n msg = self.queue.get(0)\n # Check contents of message and do what it says\n # As a test, we simply print it\n self.text.config(state=NORMAL)\n self.text.insert(END,\n \"Received LLAP from {} with DATA: {}\\n\".format(\n msg['devID'], msg['payload']),\n 'receive')\n if msg['devID'] == self.devID.get():\n if msg['payload'].startswith(\"A\"):\n if msg['payload'][2:3] == '0':\n self.updateGraph(msg['payload'][4:])\n \n self.text.see(END)\n self.text.config(state=DISABLED)\n self.queue.task_done()\n except Queue.Empty:\n pass", "title": "" }, { "docid": "db8f46eb57eeacae2e8776912fccad29", "score": "0.62429327", "text": "def treatMessage(cls, message):\n cls.logger.debug(\"Treating message %s\", message.__class__.__name__)\n assert isinstance(message, Message)\n # forwarding mechanism\n if message.targetId != Identification.PROBE_ID:\n if ProbeStorage.isKnownId(message.targetId):\n message.recipientId = message.targetId\n else:\n #if we already forwarded it before , stop here\n if message.hash in cls.forwardedMessages:\n cls.logger.warning(\"Throwing message %s in forward because message was previously forwarded.\", message.__class__.__name__)\n return\n Scheduler.forward()\n message.recipientId = ProbeStorage.getOtherRandomId()\n cls.logger.info(\"Forwarding message %s for %s to id %s\", message.__class__.__name__, message.targetId, message.recipientId)\n cls.forwardedMessages.append(message.hash)\n Client.send(message)\n return\n # handle special class of messages separately\n if isinstance(message, TestMessage):\n cls.treatTestMessage(message)\n elif isinstance(message, WatcherMessage):\n cls.treatWatcherMessage(message)\n elif isinstance(message, BroadCast):\n # broadcast = do required action first and continue broadcast\n cls.logger.ddebug(\"Handling Broadcast\")\n try:\n ActionMan.addTask(MTA.toAction(message.getMessage()))\n except ActionError:\n pass\n # be sure to propagate broadcast if a reasonable error occurs\n ActionMan.addTask(MTA.toAction(message))\n # Client.broadcast(message)\n else:\n # handles everything else, including Do messages\n ActionMan.addTask(MTA.toAction(message))", "title": "" }, { "docid": "98e2ab0beda06993b1a89b20af2d37e3", "score": "0.62428844", "text": "def _process_message(self, message_json):\n message_type = message_json['Type']\n\n if message_type == \"Refresh\":\n if 'Domain' in message_json:\n message_domain = message_json['Domain']\n if message_domain == \"Login\":\n self._process_login_response(message_json)\n elif message_type == \"Ping\":\n pong_json = {'Type': 'Pong'}\n self.web_socket_app.send(json.dumps(pong_json))\n print(\"SENT on \" + self.session_name + \":\")\n print(json.dumps(pong_json, sort_keys=True, indent=2, separators=(',', ':')))", "title": "" }, { "docid": "5834c7a0c3f4d52202d524db7936bcce", "score": "0.62395567", "text": "def _on_mpd_message(self, msg):\n # 1st part is empty\n msg.pop(0)\n # 2nd part is protocol version\n # TODO: version check\n proto = msg.pop(0)\n # 3rd part is message type\n msg_type = msg.pop(0)\n # XXX: hardcoded message types!\n # any message resets the liveness counter\n self.need_handshake = False\n self.curr_liveness = self.HB_LIVENESS\n if msg_type == b'\\x05': # disconnect\n self.curr_liveness = 0 # reconnect will be triggered by hb timer\n elif msg_type == b'\\x02': # request\n # remaining parts are the user message\n envelope, msg = split_address(msg)\n envelope.append(b'')\n envelope = [ b'', self._proto_version, b'\\x03'] + envelope # REPLY\n self.envelope = envelope\n mes = MQMessage()\n mes.set(msg)\n self.on_mdp_request(mes)\n else:\n # invalid message\n # ignored\n pass\n return", "title": "" }, { "docid": "0fda55f18bb1fa73fe6a758f792ef74b", "score": "0.6216952", "text": "def handle_messaging(self):\n\t\t#: Main loop. This checks the contents of self.messages every\n\t\t#: second. If a message exists in self.messages, then send that\n\t\t#: message to every user, and clear self.messages.\n\t\twhile self.running:\n\n\t\t\t#: Check if there are any unprocessed messages.\n\t\t\tif len(self.messages) != 0:\n\n\t\t\t\t#: If there are, go through each message, and\n\t\t\t\t#: each connected client, and send that message\n\t\t\t\t#: to each client in self.clientlist.\n\t\t\t\tfor message in self.messages:\n\t\t\t\t\tfor client, address in self.clientlist:\n\t\t\t\t\t\t#: If an exception is thrown, then\n\t\t\t\t\t\t#: this client no longer exists, and\n\t\t\t\t\t\t#: should be removed.\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t#: Dont send the message to the client who sent it.\n\t\t\t\t\t\t\t#: Unless this is the address is the host (for testing\n\t\t\t\t\t\t\t#: purposes.)\n\t\t\t\t\t\t\tif message[1] != address or address == \"127.0.0.1\":\n\t\t\t\t\t\t\t\tclient.send(message[0].encode())\n\t\t\t\t\t\texcept Exception as e:\n\n\t\t\t\t\t\t\t#: Print the exeption's stacktrace.\n\t\t\t\t\t\t\ttraceback.print_exc()\n\n\t\t\t\t\t\t\t#: This client is missing, and should be removed.\n\t\t\t\t\t\t\tself.close_client(client, address, \"missing connection\")\n\n\t\t\t\t\t#: Log that the message has been sent to each\n\t\t\t\t\t#: client to the main server.\n\t\t\t\t\tprint(\"Sending message from {} to all\".format(message[1]))\n\t\t\t\t\tself.messages.remove(message) #: Remove proccessed message.\n\n\t\t\telse:\n\n\t\t\t\t#: If no messages were recieved, wait one second.\n\t\t\t\t#: If messages were recieved, then dont wait the\n\t\t\t\t#: second, as sending a message to each client may\n\t\t\t\t#: have taken time.\n\t\t\t\ttime.sleep(1)", "title": "" }, { "docid": "bf0c92affe475a3fc9302306c60b6381", "score": "0.62099236", "text": "def handle_message(self, msg, status):\r\n\r\n # Skype API may give different encodings\r\n # on different platforms\r\n body = ensure_unicode(msg.Body)\r\n\r\n logger.debug(\"Tasks handler got: %s\" % body)\r\n\r\n # Parse the chat message to commanding part and arguments\r\n words = body.split(\" \")\r\n lower = body.lower()\r\n\r\n if len(words) == 0:\r\n return False\r\n\r\n # Parse argument for two part command names\r\n if len(words) >= 2:\r\n desc = \" \".join(words[2:])\r\n else:\r\n desc = None\r\n\r\n chat_id = get_chat_id(msg.Chat)\r\n\r\n # Check if we match any of our commands\r\n for name, cmd in self.commands.items():\r\n if lower.startswith(name):\r\n cmd(msg, status, desc, chat_id)\r\n return True\r\n\r\n return False", "title": "" }, { "docid": "f42311a08c72e481a546d868775c17ba", "score": "0.6208667", "text": "def process_message(self, message):\n try:\n self.logger.debug(\"Received message %s with id %r.\", message, message.message_id)\n self.broker.emit_before(\"process_message\", message)\n\n res = None\n if not message.failed:\n actor = self.broker.get_actor(message.actor_name)\n res = actor(*message.args, **message.kwargs)\n\n self.broker.emit_after(\"process_message\", message, result=res)\n\n except SkipMessage as e:\n self.logger.warning(\"Message %s was skipped.\", message)\n self.broker.emit_after(\"skip_message\", message)\n\n except BaseException as e:\n if isinstance(e, RateLimitExceeded):\n self.logger.warning(\"Rate limit exceeded in message %s: %s.\", message, e.message)\n else:\n self.logger.warning(\"Failed to process message %s with unhandled exception.\", message, exc_info=True)\n\n self.broker.emit_after(\"process_message\", message, exception=e)\n\n finally:\n # NOTE: There is no race here as any message that was\n # processed must have come off of a consumer. Therefore,\n # there has to be a consumer for that message's queue so\n # this is safe. Probably.\n self.consumers[message.queue_name].post_process_message(message)\n self.work_queue.task_done()", "title": "" }, { "docid": "de072e7078695b7abe681a1dd6ce0f83", "score": "0.61942124", "text": "def handle_internal_messages(self) -> None:\n self._handle_decision_maker_out_queue()\n # get new behaviours and handlers from the agent skills\n self._handle_new_behaviours()\n self._handle_new_handlers()", "title": "" }, { "docid": "122995ccbb9a7b7f86f827c09b7befcd", "score": "0.6188609", "text": "def run(self):\n\n for message in self.consumer:\n\n msg_data = json.loads(message.value.decode('utf-8'))\n\n try:\n\n api_data = self.process.run(msg_data)\n\n if api_data:\n msg_data.update(api_data)\n\n self.producer.send(self.process.destination_topic, json.dumps(msg_data).encode())\n\n except Exception:\n\n if self.process.source_topic != self.error_topic:\n\n err_msg = [{'imdb_id': msg_data['imdb_id'],\n 'error_message': '{0}: {1}'.format(self.process_name, traceback.format_exc())}]\n\n self.producer.send(self.error_topic, json.dumps(err_msg).encode('utf-8'))\n\n self.consumer.commit()", "title": "" }, { "docid": "42d7700664da65a3e5fdb851fe58690f", "score": "0.61816597", "text": "def _process_messages(self, request_id, response, exception):\n # pprint(request_id)\n if exception is not None:\n # Do something with the exception.\n pass\n else:\n # pprint(response)\n if 'payload' not in response:\n print('Warning: No payload...')\n pprint(response)\n return\n if 'headers' not in response['payload']:\n print('Warning: No headers...')\n pprint(response)\n return\n if len(response['payload']['headers']) > 1:\n print('Warning: header length > 1')\n pprint(response)\n return\n\n size = response['sizeEstimate'] / 1024**2\n labels = []\n if 'labelIds' in response:\n labels = response['labelIds']\n for i, label in enumerate(labels):\n labels[i] = self._labels[label]\n sender = response['payload']['headers'][0]['value']\n\n if sender in self._stats:\n self._stats[sender]['count'] += 1\n self._stats[sender]['size'] += size\n for label in labels:\n if label not in self._stats[sender]['labels']:\n self._stats[sender]['labels'].append(label)\n else:\n self._stats[sender] = {\n 'count': 1,\n 'size': size,\n 'labels': labels\n }", "title": "" }, { "docid": "78bc9fd992291cd9180173c2abed4d67", "score": "0.61669695", "text": "def process_message(self, stomp):\n message = RabbitMessage.unpack(stomp.json)\n destination = re.search(r'([0-9a-f]+).(?:notifications|messages)', stomp.headers['destination']).groups()[0]\n if message['action'] == 'add' and message['object'] == 'message':\n sent = datetime.strptime(message['published'], '%Y-%m-%dT%H:%M:%S.%fZ')\n recv = datetime.utcnow()\n elapsed = abs((recv - sent).total_seconds())\n self.received.append((message, elapsed))\n #self.log('{}@{} ({:.3f}): {}'.format(message['user']['username'], destination, elapsed, message['data']['text']))\n self.trigger('message_received', stomp)\n elif message['action'] == 'add' and message['object'] == 'conversation':\n self.log('{}@{}: Just started a chat'.format(message['user']['username'], destination))\n self.trigger('conversation_started', stomp)\n elif message['action'] == 'ack' and message['object'] == 'message':\n sent = datetime.strptime(message['published'], '%Y-%m-%dT%H:%M:%S.%fZ')\n recv = datetime.utcnow()\n elapsed = abs((recv - sent).total_seconds())\n self.acknowledged.append((message, elapsed))\n self.trigger('message_ackd', stomp)\n else:\n print '\\n{}\\n'.format(message)", "title": "" }, { "docid": "235948a9056a12176d82294aa3a85c69", "score": "0.61624646", "text": "def process(self, message):\n if not message.text:\n return\n message_text = message.text.split('/Rachel')[-1]\n print \"Processing message: '\" + message_text + \"'\"\n\n # Check for greeting\n if re.search(r'\\b(hello|hey|hi)\\b', message_text, re.IGNORECASE):\n message.reply(self.greet(message.sender['first']))\n\n # Check for \"I love you\"\n if re.search('i love you', message_text, re.IGNORECASE):\n message.reply(self.love(message.sender))\n\n #check for OR\n if re.search(r'\\bOR\\b', message_text):\n message.reply(self.choose(message_text))\n\n # Check for affirmation request\n if re.search(r'right\\, rachel\\?', message_text, re.IGNORECASE):\n message.reply(self.affirm(message.sender))\n\n # Check for a giphy search\n if re.search('gif', message_text, re.IGNORECASE):\n message.reply_with_photo(plugins.gif.get_gif(message))\n\n # Check for a general-knowledge question\n if re.search(r'^(rachel\\,).*\\?$', message_text, re.IGNORECASE):\n message.reply(plugins.wolfram.get_answer(message))\n\n # Check for reminder request\n if re.search(r'^(remind me).*', message_text, re.IGNORECASE):\n print \"reminder!\"\n message.reply(plugins.remind.store_reminder(self.state, message))\n self.save_state()\n\n # Check for karma increase\n if re.search(r'\\+\\+$', message_text):\n message.reply(plugins.karma.increase_karma(self.state, message_text.split('++')[0].lower()))\n self.save_state()\n\n\n # Check for karma decrease\n if re.search(r'\\-\\-$', message_text):\n message.reply(plugins.karma.decrease_karma(self.state, message_text.split('--')[0].lower()))\n self.save_state()\n\n\n # Check for karma all\n if message_text == \"karma all\":\n message.reply(plugins.karma.karma_all(self.state))", "title": "" }, { "docid": "51ce2be06415060087d8c4f33d09f158", "score": "0.6155375", "text": "async def _handle_messages(ctx: FilterContext) -> None:\n if not ctx.message or not ctx.message.guild:\n return\n\n # If deletion somehow fails at least this will allow scheduling for deletion.\n ctx.messages_deletion = True\n channel_messages = defaultdict(set) # Duplicates will cause batch deletion to fail.\n for message in {ctx.message} | ctx.related_messages:\n channel_messages[message.channel].add(message)\n\n success = fail = 0\n deleted = list()\n for channel, messages in channel_messages.items():\n try:\n await channel.delete_messages(messages)\n except HTTPException:\n fail += len(messages)\n else:\n success += len(messages)\n deleted.extend(messages)\n scheduling.create_task(upload_messages_attachments(ctx, deleted))\n\n if not fail:\n if success == 1:\n ctx.action_descriptions.append(\"deleted\")\n else:\n ctx.action_descriptions.append(\"deleted all\")\n elif not success:\n if fail == 1:\n ctx.action_descriptions.append(\"failed to delete\")\n else:\n ctx.action_descriptions.append(\"all failed to delete\")\n else:\n ctx.action_descriptions.append(f\"{success} deleted, {fail} failed to delete\")", "title": "" }, { "docid": "f1b91cad84452da670d47103681b37b6", "score": "0.61379224", "text": "def process(self, _, data):\n data = Data.parse(data)\n if data.msg_id <= self.message_id and data.actref != SYNC:\n return\n self.update_msg_id(data.msg_id)\n if data.actref == OPEN:\n self.detect()\n elif data.actref == CLOSE:\n pass\n elif data.actref == SYNC:\n self.sync(data)", "title": "" }, { "docid": "84bdb5904ebbcdcd187d10cc67486d3c", "score": "0.61334866", "text": "def _on_message(self, msg):\n # type: (List[bytes]) -> None\n\n # 2nd part is protocol version\n protocol_version = msg.pop(0)\n if protocol_version != UNI_CLIENT_HEADER: # version check, ignore old versions\n logger.error(\"Message doesn't start with {}\".format(UNI_CLIENT_HEADER))\n return\n # 3rd part is message type\n msg_type = msg.pop(0)\n # any message resets the liveness counter\n self._need_handshake = False\n self._connected_event.set()\n self._curr_liveness = HB_LIVENESS\n if msg_type == WORKER_DISCONNECT: # disconnect\n self._curr_liveness = 0 # reconnect will be triggered by hb timer\n elif msg_type == WORKER_REQUEST: # request\n # remaining parts are the user message\n self._on_request(msg)\n elif msg_type == WORKER_HEARTBEAT:\n # received hardbeat - timer handled above\n pass\n else:\n logger.error(\"Uniworker received unrecognized message\")", "title": "" }, { "docid": "3c1e4b1962c8fb7d14ce6ec6ddc23303", "score": "0.6132106", "text": "def dataReceived(self, data):\n messages = self.split_messages(data)\n\n for msg_json in messages:\n msg = json.loads(msg_json)\n \n if self.state == \"INITIALIZATION\":\n if msg[\"type\"] == \"greetings\":\n self.client.handleInit()\n self.state = \"PLAYING\"\n elif self.state == \"PLAYING\":\n if msg[\"type\"] == \"chat\":\n self.client.display.addMessage(msg[\"content\"])\n if msg[\"type\"] == \"move\":\n self.client.handleMove(msg[\"description\"])\n elif msg[\"type\"] == \"illegal-move\":\n self.client.handleIllegalMove(msg[\"description\"])\n elif msg[\"type\"] == \"status\":\n if msg[\"status\"] == \"ready\":\n self.client.handleReady()\n elif msg[\"type\"] == \"lightboard\":\n self.client.handleUpdateBoard(msg[\"description\"])\n elif msg[\"type\"] == \"checks\":\n self.client.handleChecks(msg[\"description\"])\n elif msg[\"type\"] == \"checkmates\":\n self.client.CheckMates(msg[\"description\"])\n elif msg[\"type\"] == \"disconnection\":\n self.client.handleDisconnection(msg[\"description\"])\n else:\n pass", "title": "" }, { "docid": "cce3405abac47a0c40b5be9983dc1fb9", "score": "0.61310345", "text": "def process_msg(msg: Message) -> None:\n\n # def hack_pkts(this, prev) -> None: # TODO: needs work, e.g. merge 000A fragments\n # # TODO: ?move to ctl._handle_msg() and/or system._handle_msg()?\n # if re.search(\"I.* 01.* 000A \", str(this._pkt)): # HACK: and dtm < 3 secs\n # # TODO: an edge case here: >2 000A packets in a row\n # if prev is not None and re.search(\"I.* 01.* 000A \", str(prev._pkt)):\n # this._payload = prev.payload + this.payload # merge frags, and process\n\n if _LOGGER.getEffectiveLevel() == logging.INFO: # i.e. don't log for DEBUG\n _LOGGER.info(msg)\n\n if not msg.is_valid or msg._gwy.config.reduce_processing >= DONT_CREATE_ENTITIES:\n return\n\n # TODO: This will need to be removed for HGI80-impersonation\n # 18:/RQs are unreliable, although any corresponding RPs are often required\n if msg.src.type == \"18\":\n return\n\n try: # process the packet payload\n _create_devices(msg) # from pkt header & from msg payload (e.g. 000C)\n _create_zones(msg) # create zones & (TBD) ufh_zones too?\n\n if msg._gwy.config.reduce_processing < DONT_UPDATE_ENTITIES:\n # _update_entities(msg, msg._gwy._prev_msg) # update the state database\n msg.src._handle_msg(msg)\n\n except (AssertionError, NotImplementedError) as err:\n (_LOGGER.error if DEV_MODE else _LOGGER.warning)(\n \"%s << %s\", msg._pkt, f\"{err.__class__.__name__}({err})\"\n )\n return # NOTE: use raise only when debugging\n\n except (AttributeError, LookupError, TypeError, ValueError) as err:\n (_LOGGER.exception if DEV_MODE else _LOGGER.error)(\n \"%s << %s\", msg._pkt, f\"{err.__class__.__name__}({err})\"\n )\n return # NOTE: use raise only when debugging\n\n except CorruptStateError as err: # TODO: add CorruptEvohomeError\n (_LOGGER.exception if DEV_MODE else _LOGGER.error)(\"%s << %s\", msg._pkt, err)\n return # TODO: bad pkt, or Schema\n\n msg._gwy._prev_msg = msg", "title": "" }, { "docid": "25a908b5a6b2107de42d33bad1a1aafc", "score": "0.6077322", "text": "def process(self, msg):\n raise NotImplementedError", "title": "" }, { "docid": "54b5cf928cf3175fa4102a8ce078ccf0", "score": "0.6073985", "text": "def process_message(self, message):\n name = message[0]\n if len(message) == 2:\n if message[-1] == \"JOIN\":\n self._process_join(name)\n elif message[-1] == \"EXIT\":\n self._process_exit(name)\n elif message[-1] == \"READY\":\n self._process_ready(name)\n\n else:\n if name != self._player_name and message[1] == \"MOVE\":\n return self._process_move(message[2:])\n elif message[1] == \"RESP\":\n response = self._process_response(name, message[2:])\n return response\n elif message[1] == \"END\":\n self._set_phase(\"end\", message[2])\n return None", "title": "" }, { "docid": "059cd3fa15bdb1e34c0474b3a4a4ecaa", "score": "0.60575324", "text": "def parse_messages(self, output):\n raise NotImplementedError(\"Subclasses should implement this!\")", "title": "" }, { "docid": "bd28e889940caa4fbf2b6dfb72511287", "score": "0.6045801", "text": "def process_msg(self, msg):\n try:\n xid = datetime.datetime.now().timestamp()\n self.logger.debug(\"%s, Msg text: %s\", xid, msg.text)\n self.logger.debug(\"%s, process_msg_step_0\", xid)\n chat_uid = \"%s.%s\" % (msg.channel_id, msg.origin['uid'])\n tg_chats = db.get_chat_assoc(slave_uid=chat_uid)\n tg_chat = None\n multi_slaves = False\n\n if tg_chats:\n tg_chat = tg_chats[0]\n slaves = db.get_chat_assoc(master_uid=tg_chat)\n if slaves and len(slaves) > 1:\n multi_slaves = True\n\n msg_prefix = \"\" # For group member name\n tg_chat_assoced = False\n\n if msg.source != MsgSource.Group:\n msg.member = {\"uid\": -1, \"name\": \"\", \"alias\": \"\"}\n\n # Generate chat text template & Decide type target\n tg_dest = getattr(config, self.channel_id)['admins'][0]\n self.logger.debug(\"%s, process_msg_step_1, tg_dest=%s, msg.origin=%s\", xid, tg_dest, str(msg.origin))\n if msg.source == MsgSource.Group:\n self.logger.debug(\"msg.member: %s\", str(msg.member))\n msg_prefix = msg.member['name'] if msg.member['name'] == msg.member['alias'] or not msg.member['alias'] \\\n else \"%s (%s)\" % (msg.member['alias'], msg.member['name'])\n\n if tg_chat: # if this chat is linked\n tg_dest = int(tg_chat.split('.')[1])\n tg_chat_assoced = True\n\n if tg_chat and not multi_slaves: # if singly linked\n if msg_prefix: # if group message\n msg_template = \"%s:\\n%s\" % (msg_prefix, \"%s\")\n else:\n msg_template = \"%s\"\n elif msg.source == MsgSource.User:\n emoji_prefix = msg.channel_emoji + utils.Emojis.get_source_emoji(msg.source)\n name_prefix = msg.origin[\"name\"] if msg.origin[\"alias\"] == msg.origin[\"name\"] or not msg.origin['alias'] \\\n else \"%s (%s)\" % (msg.origin[\"alias\"], msg.origin[\"name\"])\n msg_template = \"%s %s:\\n%s\" % (emoji_prefix, name_prefix, \"%s\")\n elif msg.source == MsgSource.Group:\n emoji_prefix = msg.channel_emoji + utils.Emojis.get_source_emoji(msg.source)\n name_prefix = msg.origin[\"name\"] if msg.origin[\"alias\"] == msg.origin[\"name\"] or not msg.origin['alias'] \\\n else \"%s (%s)\" % (msg.origin[\"alias\"], msg.origin[\"name\"])\n msg_template = \"%s %s [%s]:\\n%s\" % (emoji_prefix, msg_prefix, name_prefix, \"%s\")\n elif msg.source == MsgSource.System:\n emoji_prefix = msg.channel_emoji + utils.Emojis.get_source_emoji(msg.source)\n name_prefix = msg.origin[\"name\"] if msg.origin[\"alias\"] == msg.origin[\"name\"] or not msg.origin['alias'] \\\n else \"%s (%s)\" % (msg.origin[\"alias\"], msg.origin[\"name\"])\n msg_template = \"%s %s:\\n%s\" % (emoji_prefix, name_prefix, \"%s\")\n else:\n msg_template = \"Unknown message source (%s)\\n%s\" % (msg.source, \"%s\")\n\n # Type dispatching\n self.logger.debug(\"%s, process_msg_step_2\", xid)\n append_last_msg = False\n if msg.type == MsgType.Text:\n parse_mode = \"HTML\" if self._flag(\"text_as_html\", False) else None\n if tg_chat_assoced:\n last_msg = db.get_last_msg_from_chat(tg_dest)\n if last_msg:\n if last_msg.msg_type == \"Text\":\n append_last_msg = str(last_msg.slave_origin_uid) == \"%s.%s\" % (msg.channel_id, msg.origin['uid'])\n if msg.source == MsgSource.Group:\n append_last_msg &= str(last_msg.slave_member_uid) == str(msg.member['uid'])\n append_last_msg &= datetime.datetime.now() - last_msg.time <= datetime.timedelta(\n seconds=self._flag('join_msg_threshold_secs', 15))\n else:\n append_last_msg = False\n else:\n append_last_msg = False\n self.logger.debug(\"Text: Append last msg: %s\", append_last_msg)\n self.logger.debug(\"%s, process_msg_step_3_0, tg_dest = %s, tg_chat_assoced = %s, append_last_msg = %s\",\n xid, tg_dest, tg_chat_assoced, append_last_msg)\n if tg_chat_assoced and append_last_msg:\n self.logger.debug(\"%s, process_msg_step_3_0_1\", xid)\n msg.text = \"%s\\n%s\" % (last_msg.text, msg.text)\n try:\n tg_msg = self.bot.bot.editMessageText(chat_id=tg_dest,\n message_id=last_msg.master_msg_id.split(\".\", 1)[1],\n text=msg_template % msg.text,\n parse_mode=parse_mode)\n except telegram.error.BadRequest:\n tg_msg = self.bot.bot.editMessageText(chat_id=tg_dest,\n message_id=last_msg.master_msg_id.split(\".\", 1)[1],\n text=msg_template % msg.text)\n else:\n self.logger.debug(\"%s, process_msg_step_3_0_3\", xid)\n try:\n tg_msg = self.bot.bot.send_message(tg_dest, text=msg_template % msg.text, parse_mode=parse_mode)\n except telegram.error.BadRequest:\n tg_msg = self.bot.bot.send_message(tg_dest, text=msg_template % msg.text)\n self.logger.debug(\"%s, process_msg_step_3_0_4, tg_msg = %s\", xid, tg_msg)\n self.logger.debug(\"%s, process_msg_step_3_1\", xid)\n elif msg.type == MsgType.Link:\n thumbnail = urllib.parse.quote(msg.attributes[\"image\"] or \"\", safe=\"?=&#:/\")\n thumbnail = \"<a href=\\\"%s\\\">🔗</a>\" % thumbnail if thumbnail else \"🔗\"\n text = \"%s <a href=\\\"%s\\\">%s</a>\\n%s\" % \\\n (thumbnail,\n urllib.parse.quote(msg.attributes[\"url\"], safe=\"?=&#:/\"),\n html.escape(msg.attributes[\"title\"] or msg.attributes[\"url\"]),\n html.escape(msg.attributes[\"description\"] or \"\"))\n if msg.text:\n text += \"\\n\\n\" + msg.text\n try:\n tg_msg = self.bot.bot.send_message(tg_dest, text=msg_template % text, parse_mode=\"HTML\")\n except telegram.error.BadRequest:\n text = \"🔗 %s\\n%s\\n\\n%s\" % (html.escape(msg.attributes[\"title\"] or \"\"),\n html.escape(msg.attributes[\"description\"] or \"\"),\n urllib.parse.quote(msg.attributes[\"url\"] or \"\", safe=\"?=&#:/\"))\n if msg.text:\n text += \"\\n\\n\" + msg.text\n tg_msg = self.bot.bot.send_message(tg_dest, text=msg_template % msg.text)\n elif msg.type in [MsgType.Image, MsgType.Sticker]:\n self.logger.debug(\"%s, process_msg_step_3_2\", xid)\n self.logger.debug(\"Received %s\\nPath: %s\\nMIME: %s\", msg.type, msg.path, msg.mime)\n self.logger.debug(\"Path: %s\\nSize: %s\", msg.path, os.stat(msg.path).st_size)\n if os.stat(msg.path).st_size == 0:\n os.remove(msg.path)\n tg_msg = self.bot.bot.send_message(tg_dest,\n msg_template % (\"Error: Empty %s received. (MS01)\" % msg.type))\n else:\n if not msg.text:\n if msg.type == MsgType.Image:\n msg.text = \"sent a picture.\"\n elif msg.type == MsgType.Sticker:\n msg.text = \"sent a sticker.\"\n if msg.mime == \"image/gif\":\n tg_msg = self.bot.bot.sendDocument(tg_dest, msg.file, caption=msg_template % msg.text)\n else:\n try:\n tg_msg = self.bot.bot.sendPhoto(tg_dest, msg.file, caption=msg_template % msg.text)\n except telegram.error.BadRequest:\n tg_msg = self.bot.bot.sendDocument(tg_dest, msg.file, caption=msg_template % msg.text)\n os.remove(msg.path)\n self.logger.debug(\"%s, process_msg_step_3_3\", xid)\n elif msg.type == MsgType.File:\n if os.stat(msg.path).st_size == 0:\n os.remove(msg.path)\n tg_msg = self.bot.bot.send_message(tg_dest,\n msg_template % (\"Error: Empty %s received. (MS02)\" % msg.type))\n else:\n if not msg.filename:\n file_name = os.path.basename(msg.path)\n msg.text = \"sent a file.\"\n else:\n file_name = msg.filename\n tg_msg = self.bot.bot.send_document(tg_dest, msg.file, caption=msg_template % msg.text,\n filename=file_name)\n os.remove(msg.path)\n elif msg.type == MsgType.Audio:\n if os.stat(msg.path).st_size == 0:\n os.remove(msg.path)\n return self.bot.bot.send_message(tg_dest,\n msg_template % (\"Error: Empty %s received. (MS03)\" % msg.type))\n msg.text = msg.text or ''\n self.logger.debug(\"%s, process_msg_step_4_1, no_conversion = %s\", xid,\n self._flag(\"no_conversion\", False))\n if self._flag(\"no_conversion\", False):\n self.logger.debug(\"%s, process_msg_step_4_2, mime = %s\", xid, msg.mime)\n if msg.mime == \"audio/mpeg\":\n tg_msg = self.bot.bot.sendAudio(tg_dest, msg.file, caption=msg_template % msg.text)\n else:\n tg_msg = self.bot.bot.sendDocument(tg_dest, msg.file, caption=msg_template % msg.text)\n else:\n pydub.AudioSegment.from_file(msg.file).export(\"%s.ogg\" % msg.path,\n format=\"ogg\",\n codec=\"libopus\",\n bitrate=\"65536\",\n parameters=[\"-vbr\", \"on\", \"-compression_level\", \"10\"])\n ogg_file = open(\"%s.ogg\" % msg.path, 'rb')\n tg_msg = self.bot.bot.sendVoice(tg_dest, ogg_file, caption=msg_template % msg.text)\n os.remove(\"%s.ogg\" % msg.path)\n os.remove(msg.path)\n elif msg.type == MsgType.Location:\n self.logger.info(\"---\\nsending venue\\nlat: %s, long: %s\\ntitle: %s\\naddr: %s\",\n msg.attributes['latitude'], msg.attributes['longitude'], msg.text, msg_template % \"\")\n tg_msg = self.bot.bot.sendVenue(tg_dest, latitude=msg.attributes['latitude'],\n longitude=msg.attributes['longitude'], title=msg.text,\n address=msg_template % \"\")\n elif msg.type == MsgType.Video:\n if os.stat(msg.path).st_size == 0:\n os.remove(msg.path)\n return self.bot.bot.send_message(tg_dest, msg_template % (\"Error: Empty %s recieved\" % msg.type))\n if not msg.text:\n msg.text = \"sent a video.\"\n tg_msg = self.bot.bot.sendVideo(tg_dest, msg.file, caption=msg_template % msg.text)\n os.remove(msg.path)\n elif msg.type == MsgType.Command:\n buttons = []\n for i, ival in enumerate(msg.attributes['commands']):\n buttons.append([telegram.InlineKeyboardButton(ival['name'], callback_data=str(i))])\n tg_msg = self.bot.bot.send_message(tg_dest, msg_template % msg.text,\n reply_markup=telegram.InlineKeyboardMarkup(buttons))\n self.msg_status[\"%s.%s\" % (tg_dest, tg_msg.message_id)] = Flags.COMMAND_PENDING\n self.msg_storage[\"%s.%s\" % (tg_dest, tg_msg.message_id)] = {\"channel\": msg.channel_id,\n \"text\": msg_template % msg.text,\n \"commands\": msg.attributes['commands']}\n else:\n tg_msg = self.bot.bot.send_message(tg_dest, msg_template % \"Unsupported incoming message type. (UT01)\")\n self.logger.debug(\"%s, process_msg_step_4\", xid)\n if msg.source in (MsgSource.User, MsgSource.Group):\n msg_log = {\"master_msg_id\": \"%s.%s\" % (tg_msg.chat.id, tg_msg.message_id),\n \"text\": msg.text or \"Sent a %s.\" % msg.type,\n \"msg_type\": msg.type,\n \"sent_to\": \"Master\",\n \"slave_origin_uid\": \"%s.%s\" % (msg.channel_id, msg.origin['uid']),\n \"slave_origin_display_name\": msg.origin['alias'],\n \"slave_member_uid\": msg.member['uid'],\n \"slave_member_display_name\": msg.member['alias'],\n \"slave_message_uid\": msg.uid}\n if tg_chat_assoced and append_last_msg:\n msg_log['update'] = True\n db.add_msg_log(**msg_log)\n self.logger.debug(\"%s, process_msg_step_5\", xid)\n except Exception as e:\n self.logger.error(repr(e) + traceback.format_exc())", "title": "" }, { "docid": "86342ebe2f78e184dd11ddca352f0e85", "score": "0.60419613", "text": "def received(self, message):\n # print \"Received: \" + str(message)\n if message is None: # The input thread calls this function with None as\n # the argument when the socket closes, so we take the opportunity\n # to clean everything up.\n self.cleanup()\n return\n if message[\"_type\"] == 2: # Response to a command we presumably sent\n # with query, so look up the function waiting for the response, if\n # one exists, and call it.\n f = self.query_map.get(message[\"_id\"], None)\n if f:\n # There was a function, so remove it from the map of functions\n # waiting for responses\n del self.query_map[message[\"_id\"]]\n with print_exceptions:\n if message.get(\"_error\"): # Error happened, so call the\n # function with an exception\n f(exceptions.CommandErrorException(message[\"_error\"][\"text\"]))\n else: # Response was successful; pass it into the function\n f(message)\n if message[\"_type\"] in [1, 3]: # Command or notification\n # TODO: consider merging this part of this class with\n # local.RemoteConnection since it's substantially the same. Maybe\n # even consider having the command/response system be its own layer\n # underneath this class, which would then function purely as the\n # layer on top of that that deals with Autobus-specific stuff.\n processor = getattr(self, \"process_\" + message[\"_command\"], None)\n if not processor:\n print \"Invalid message received and ignored. Command: %s\" % message[\"_command\"]\n return\n processor(message)", "title": "" }, { "docid": "618f7b26305afd47378f0552fd8dd12b", "score": "0.6039737", "text": "async def _process_message(self, message):\n service, message = tuple(message)\n message = jsonapi.loads(message)\n\n # setup heartbeat settings for this service (or update it)\n # any message is treated as ping\n service_ping = {\n \"last_ping\": self._now(),\n \"liveness\": self._conf.ping_max_liveness,\n }\n\n if service in self._services_ping:\n self._services_ping[service].update(service_ping)\n else:\n service_ping[\"alive\"] = False\n service_ping[\"id\"] = -1\n self._services_ping[service] = service_ping\n\n # do not propagate ping messages\n if message.get(\"event\", None) == \"ping\":\n identity = message.get(\"data\")[\"id\"]\n\n if (\n \"alive\" in self._services_ping[service]\n and not self._services_ping[service][\"alive\"]\n ):\n self._services_ping[service][\"alive\"] = True\n\n if (\n \"id\" in self._services_ping[service]\n and self._services_ping[service][\"id\"] != identity\n ):\n self._services_ping[service][\"id\"] = identity\n\n if self._connection_callback:\n await self._connection_callback(service.decode(), True)\n\n return\n\n await self.recv(service, message)", "title": "" }, { "docid": "c962494cf287a02e66dcec0a035ebd98", "score": "0.6028927", "text": "def handle_inbox(self, item):\n logger.info(\"Received inbox item\")\n\n # Private Message\n if isinstance(item, praw.models.Message):\n self.handle_private_message(item)\n\n # Username Mention\n if isinstance(item, praw.models.Comment):\n self.handle_username_mention(item)", "title": "" }, { "docid": "2248d22022ef7c1d846e6a0362e4b2bb", "score": "0.60257536", "text": "async def __handle_messages(self, message, edited_message=None):\n\n message = edited_message or message\n\n if not message.guild or message.author.bot:\n return\n\n if not get_generator_response(\n self.generator, MessageResponseGenerator, message\n ):\n return\n\n if self.delete_message:\n await message.delete()\n\n member_warnings = (\n self._cache.setdefault(message.guild.id, {}).get(message.author.id, 0) + 1\n )\n self._cache[message.guild.id][message.author.id] = member_warnings\n\n await self.call_event(\"on_inappropriate_message\", message, member_warnings)\n\n if punishment := get_relevant_punishment(self.punishments, member_warnings):\n await punishment.punishment_manager.punish(\n message, message.author, punishment\n )", "title": "" }, { "docid": "eaab22ebe30d956a1ce83c2305bbb01b", "score": "0.6014569", "text": "def processMsg(message):\n room = session.get('room')\n clientID = message['ID']\n # TODO: implement sentiment analysis functionality here\n # the messages 'parrnerKick' and 'tooMuchHate' are emitted from here\n msg_score = sentanalysis.analyze(message[\"msg\"])\n session['sent_score'] += msg_score\n emit('relayMsg', {\"ID\": clientID, \"msg\": message[\"msg\"]}, room=room)\n if msg_score < -2:\n overall_neg = session['sent_score'] < -4 and session['strikes'] >= 3\n too_much_neg = session['strikes'] >= 6\n if overall_neg or too_much_neg:\n # Inform the active client & partner that the client is kicked\n emit(\"kicked\", {\"ID\": clientID}, room=room)\n else:\n session['strikes'] += 1\n emit(\"tooMuchHate\", {\"ID\": clientID})", "title": "" }, { "docid": "e080954e9df4f7eee6d11393d916dfb2", "score": "0.6000843", "text": "def application_message(self, bus, msg):\n msgtype = msg.structure.get_name()\n if msgtype == 'partial_result':\n self.partial_result(msg.structure['hyp'], msg.structure['uttid'])\n elif msgtype == 'result':\n self.final_result(msg.structure['hyp'], msg.structure['uttid'])\n #self.pipeline.set_state(gst.STATE_PAUSED)", "title": "" }, { "docid": "1f57008c69d13def7c821d714578cca1", "score": "0.5997177", "text": "def incoming(self):\n #\n # EVENTS\n #\n #\n if self.message.get(\"event\") == \"onMessage\":\n # No Group Messages\n if not self.message.get(\"data\", {}).get(\"isGroupMsg\"):\n # create message\n message, created = self.register_message()\n self.rocket = self.get_rocket_client()\n if not self.rocket:\n return HttpResponse(\"Rocket Down!\", status=503)\n\n # get a room\n room = self.get_room()\n if room:\n mimetypes_to_upload = [\n \"audio/ogg; codecs=opus\",\n \"application/pdf\",\n \"image/webp\",\n ]\n print(\"got room: \", room.room_id)\n #\n # MEDIA (PICTURE) MESSAGE\n #\n if self.message.get(\"data\", {}).get(\"isMedia\"):\n if settings.DEBUG:\n print(\"MEDIA FILE\")\n mime = self.message.get(\"data\", {}).get(\"mimetype\")\n # decrypt media\n data = self.decrypt_media()\n # we got data\n # HERE we send the media file\n #\n # if caption, send it too\n if data:\n file_sent = self.outcome_file(\n data,\n room.room_id,\n mime,\n description=self.message.get(\"data\", {}).get(\n \"caption\", None\n ),\n )\n if file_sent.ok:\n self.message_object.delivered = True\n self.message_object.save()\n else:\n file_sent = False\n\n #\n # PTT / OGG / VOICE OVER WHATSAPP\n\n elif (\n self.message.get(\"data\", {}).get(\"mimetype\")\n in mimetypes_to_upload\n ):\n if self.message.get(\"data\", {}).get(\"type\") == \"sticker\":\n if settings.DEBUG:\n print(\"STICKER! \")\n send_sticker_message = self.room_send_text(\n room.room_id, \"User sent sticker\"\n )\n if send_sticker_message.ok:\n self.message_object.delivered = True\n self.message_object.save()\n else:\n mime = self.message.get(\"data\", {}).get(\"mimetype\")\n if \"audio/ogg\" in mime:\n if self.connector.config.get(\n \"auto_answer_on_audio_message\", False\n ):\n message = {\n \"msg\": self.connector.config.get(\n \"auto_answer_on_audio_message\"\n )\n }\n deliver = self.outgo_text_message(message)\n if self.connector.config.get(\n \"convert_incoming_audio_to_text\"\n ):\n deliver = self.outcome_text(\n room.room_id,\n self.connector.config.get(\n \"convert_incoming_audio_to_text\"\n ),\n )\n # decrypt media\n data = self.decrypt_media()\n # we got data\n if data:\n file_sent = self.outcome_file(data, room.room_id, mime)\n else:\n file_sent = False\n # if file was sent\n if file_sent.ok:\n self.message_object.delivered = True\n self.message_object.save()\n #\n # SEND LOCATION\n #\n elif (\n self.message.get(\"data\", {}).get(\"mimetype\") is None\n and self.message.get(\"data\", {}).get(\"type\") == \"location\"\n ):\n lat = self.message.get(\"data\", {}).get(\"lat\")\n lng = self.message.get(\"data\", {}).get(\"lng\")\n link = \"https://www.google.com/maps/search/?api=1&query={0}+{1}\".format(\n lat, lng\n )\n text = \"Lat:{0}, Long:{1}: Link: {2}\".format(lat, lng, link)\n self.outcome_text(room.room_id, text)\n #\n #\n # TEXT ONLY MESSAGE\n #\n else:\n if self.message.get(\"data\", {}).get(\"quotedMsg\"):\n quote_type = (\n self.message.get(\"data\", {})\n .get(\"quotedMsg\")\n .get(\"type\")\n )\n if settings.DEBUG:\n print(\"MESSAGE IS A REPLY. TYPE: \", quote_type)\n if quote_type == \"chat\":\n quoted_body = (\n self.message.get(\"data\", {})\n .get(\"quotedMsg\")\n .get(\"body\")\n )\n if self.connector.config.get(\n \"outcome_message_with_quoted_message\", True\n ):\n message = \":arrow_forward: IN RESPONSE TO: {0} \\n:envelope: {1}\"\n message = message.format(\n quoted_body,\n self.get_message_body(),\n )\n else:\n message = self.get_message_body()\n elif quote_type in [\"document\", \"image\", \"ptt\"]:\n message = \"DOCUMENT RESENT:\\n {0}\".format(\n self.get_message_body()\n )\n quoted_id = self.message.get(\"data\", {}).get(\n \"quotedMsg\"\n )[\"id\"]\n quoted_mime = self.message.get(\"data\", {}).get(\n \"quotedMsg\"\n )[\"mimetype\"]\n data = self.decrypt_media(quoted_id)\n # we got data\n # HERE we send the media file\n #\n if data:\n file_sent = self.outcome_file(\n data, room.room_id, quoted_mime\n )\n else:\n file_sent = False\n else:\n message = self.get_message_body()\n deliver = self.outcome_text(room.room_id, message)\n if settings.DEBUG:\n print(\"DELIVER OF TEXT MESSAGE:\", deliver.ok)\n\n # here we get regular events (Battery, Plug Status)\n if self.message.get(\"event\") == \"onBattery\":\n self.rocket = self.get_rocket_client()\n if not self.rocket:\n return HttpResponse(\"Rocket Down!\", status=503)\n # this prevent some bogus request from wa after logout on this event\n if self.message.get(\"data\") and int(self.message.get(\"data\")):\n text_message = \":battery:\\n:satellite: Battery level: {0}%\".format(\n self.message.get(\"data\")\n )\n self.outcome_admin_message(text_message)\n\n # here we get regular events (Battery, Plug Status)\n if self.message.get(\"event\") == \"onPlugged\":\n self.rocket = self.get_rocket_client()\n if not self.rocket:\n return HttpResponse(\"Rocket Down!\", status=503)\n\n if self.message.get(\"data\") is True:\n text_message = \":radioactive:\\n:satellite: Device is charging\"\n self.outcome_admin_message(text_message)\n if self.message.get(\"data\") is False:\n text_message = \":electric_plug:\\n:satellite: Device is unplugged\"\n self.outcome_admin_message(text_message)\n\n # when device logged out\n if self.message.get(\"event\") == \"onLogout\":\n self.rocket = self.get_rocket_client()\n if not self.rocket:\n return HttpResponse(\"Rocket Down!\", status=503)\n text_message = (\n \":warning::warning::warning::warning:\\n:satellite: Device Logged Out!\"\n )\n self.outcome_admin_message(text_message)\n\n #\n # ADMIN / CONNECTION MESSAGES\n #\n\n # state changed\n if self.message.get(\"event\") == \"onStateChanged\":\n # for some reason, some times, the lib sends this very frequently\n # https://github.com/open-wa/wa-automate-nodejs/issues/949\n if self.message.get(\"data\") not in [\"TIMEOUT\", \"CONNECTED\"]:\n self.rocket = self.get_rocket_client()\n if not self.rocket:\n return HttpResponse(\"Rocket Down!\", status=503)\n\n text_message = (\n \":information_source:\\n:satellite: {0} > {1}: {2} \".format(\n self.message.get(\"sessionId\"),\n self.message.get(\"event\"),\n self.message.get(\"data\"),\n )\n )\n self.outcome_admin_message(text_message)\n\n # incoming call\n if self.message.get(\"event\") == \"onIncomingCall\":\n self.register_message()\n self.rocket = self.get_rocket_client()\n if not self.rocket:\n return HttpResponse(\"Rocket Down!\", status=503)\n self.get_room()\n if self.connector.config.get(\"auto_answer_incoming_call\"):\n message = {\n \"msg\": self.connector.config.get(\"auto_answer_incoming_call\")\n }\n self.outgo_text_message(message)\n if self.connector.config.get(\"convert_incoming_call_to_text\"):\n # change the message to the custom one\n # adapt to be like a regular incoming\n # this event doesnt come with the name, lets get it from the api\n visitor_id = self.message.get(\"data\", {}).get(\"peerJid\")\n payload = {\"args\": {\"contactId\": visitor_id}}\n session = self.get_request_session()\n url = self.connector.config[\"endpoint\"] + \"/getContact\"\n r = session.post(url, json=payload)\n if r.ok:\n visitor_name = r.json()[\"response\"][\"formattedName\"]\n self.message = {\n \"ts\": int(time.time()),\n \"event\": \"onMessage\",\n \"data\": {\n \"body\": self.connector.config.get(\n \"convert_incoming_call_to_text\"\n ),\n \"from\": visitor_id,\n \"isGroup\": False,\n \"id\": self.message.get(\"id\"),\n \"sender\": {\"name\": visitor_name},\n },\n }\n self.incoming()\n\n #\n # LAUNCH EVENTS AND QRCODE\n #\n if self.message.get(\"namespace\") and self.message.get(\"data\"):\n # get rocket or return error\n self.rocket = self.get_rocket_client()\n if not self.rocket:\n return HttpResponse(\"Rocket Down!\", status=503)\n message = self.message\n # OPEN WA REDY. Get unread messages\n if \"@OPEN-WA ready\" in message.get(\"data\"):\n print(\"INITIATING INTAKE UNREAD TASK\")\n tasks.intake_unread_messages.delay(self.connector.id)\n\n if message.get(\"namespace\") == \"qr\":\n # recreate qrcode image. WA-Automate doesnt work on older phones\n # too small, can't focus.\n code = self.get_qrcode_from_base64(message.get(\"data\"))\n base64_fixed_code = self.generate_qrcode(code)\n self.outcome_qrbase64(base64_fixed_code)\n else:\n text_message = \":information_source:\\n:satellite: {0} > {1}: {2} \".format(\n message.get(\"sessionId\"),\n message.get(\"namespace\"),\n message.get(\"data\")\n if message.get(\"data\") != \"SUCCESS\"\n else \"\"\":white_check_mark::white_check_mark::white_check_mark:\n SUCESS!!! :white_check_mark::white_check_mark::white_check_mark:\"\"\",\n )\n self.outcome_admin_message(text_message)\n\n return JsonResponse({})", "title": "" }, { "docid": "024ff1f5d3855bdb199de65f180c052c", "score": "0.59936064", "text": "def _on_message(self, message):\n print(\"RECEIVED on \" + self.session_name + \":\")\n message_json = json.loads(message)\n data = json.dumps(message_json, sort_keys=True, indent=2, separators=(',', ':'))\n\n for singleMsg in message_json:\n print(singleMsg)\n try:\n #print(singleMsg['UpdateType'], singleMsg['Fields'])\n data = singleMsg['Fields']\n data_type = singleMsg['UpdateType']\n try:\n self._write_market_data(data, data_type)\n except:\n error = traceback.format_exc()\n print(error)\n measurement = \"refinitiv_\" + data_type + \"_\" + self.ric\n logger(measurement, error, self.ric)\n except:\n pass\n self._process_message(singleMsg)", "title": "" }, { "docid": "7d7a7b9ae8732e3ca2082ebd9008b04f", "score": "0.599273", "text": "def handle_messages(self):\r\n raise NotImplementedError # pragma: nocover\r", "title": "" }, { "docid": "78f0ea53bbfb72d1362bd0600fe83204", "score": "0.5984562", "text": "def on_ws_message(self, message):\r\n\t\t\t\t# Check what type of data we are receiving\r\n\t\tif self.opcode == TEXT_DATA_OPCODE:\r\n\r\n\t\t\t# This is the main processing of messages being received to be handled. Most\r\n\t\t\t# work is done inside of parseMessage and just a response is created. There is\r\n\t\t\t# a special case below for uploading files over the websocket. TODO: We could\r\n\t\t\t# probably fix this by making JsonMessage a member variable of this class rather than static functions.\r\n\t\t\tmessage_response = MessageParser.parseMessage(message, None, self.logger)\r\n\r\n\t\t\t# TODO: We should send an error message here because we couldn't handle the message.\r\n\t\t\tself.send_message( json.dumps(message_response) )", "title": "" }, { "docid": "6b7f361dd7fa4effc12638d9cfb7cd8a", "score": "0.5980388", "text": "def handle_message(self, msg, status):", "title": "" }, { "docid": "79b0fa00f708c605deac4053e7447bc6", "score": "0.5972509", "text": "def handle_message(self, message):\n self.trigger('message')\n\n try:\n stomp_message = self.stomp.decode(message.content)\n except StompAccessDenied as exc:\n self.log(exc.message)\n self.send(self.stomp.connect_frame(self.login, self.token, **{\"product\": self.__client__}))\n return\n except StompExchangeNotFound as exc:\n self.log(exc.message)\n self.disconnect()\n return\n except StompError as exc:\n self.log(exc.message)\n self.disconnect()\n return\n\n if stomp_message.command == 'CONNECTED':\n self.log('STOMP Session succesfully started')\n destination = \"/exchange/{}.subscribe\".format(self.username)\n self.send(self.stomp.subscribe_frame(destination))\n self.log('Listening on {} messages'.format(self.username))\n self.trigger('start_listening')\n\n elif stomp_message.command == 'MESSAGE':\n self.process_message(stomp_message)\n\n elif stomp_message.command == 'ERROR':\n self.log(message.content)\n\n else:\n self.log(stomp_message)", "title": "" }, { "docid": "47305fd5576808a5cb62bc63437d0172", "score": "0.5967261", "text": "def process_messages(self, messages):\n message_ids = [m.message_id for m in messages]\n extra = {\n \"message_ids\": message_ids,\n \"queue_name\": self.queue.name,\n \"job_name\": self.job_name,\n }\n logger.debug(\n \"Processing batch for %s.%s\",\n self.queue.name,\n self.job_name,\n extra=extra,\n )\n\n try:\n self.process_batch(messages)\n except Exception:\n logger.exception(\n \"Error while processing batch for %s.%s\",\n self.queue.name,\n self.job_name,\n extra=extra,\n )\n return False\n else:\n return True", "title": "" }, { "docid": "0a63a89addd24398904f96b5e6bc49a6", "score": "0.5952686", "text": "def msg_sent_handler(self, payload):\n log.debug('msg_sent_handler function started')\n stream_type = payload['payload']['messageSent']['message']['stream']['streamType']\n message_sent_data = payload['payload']['messageSent']['message']\n if str(stream_type) == 'ROOM':\n for listener in self.room_listeners:\n listener.on_room_msg(message_sent_data)\n elif str(stream_type) == 'POST':\n for listener in self.wall_post_listeners:\n listener.on_wall_post_msg(message_sent_data)\n else:\n for listener in self.im_listeners:\n listener.on_im_message(message_sent_data)", "title": "" }, { "docid": "313b52c1c03280b94e46757846a4f63f", "score": "0.5932804", "text": "def process_message(data):\n global tasks\n channel = data[\"channel\"]\n text = data[\"text\"]\n\n # Treat DM (direct message \"channels\") separately, like the TODO app\n if channel.startswith(\"D\"):\n if channel not in tasks.keys():\n tasks[channel] = []\n # do command stuff\n if text.startswith(\"todo\"):\n tasks[channel].append(text[5:])\n outputs.append([channel, \"added\"])\n if text == \"tasks\":\n output = \"\"\n counter = 1\n for task in tasks[channel]:\n output += \"%i) %s\\n\" % (counter, task)\n counter += 1\n outputs.append([channel, output])\n if text == \"fin\":\n tasks[channel] = []\n if text.startswith(\"done\"):\n num = int(text.split()[1]) - 1\n tasks[channel].pop(num)\n if text == \"show\":\n print(tasks)\n json.dump(tasks, open(FILE, \"wb\"))\n # Treat other messages as roll call commands\n elif channel == HOME_CHANNEL:\n print(text)\n if re.match(r'^\\s*here\\b|pre?se?nt\\b|yo\\b', text, flags=re.IGNORECASE):\n tasks[channel].append(text[5:])\n outputs.append([channel, \"Welcome to class\"])", "title": "" }, { "docid": "cdaea9a52e4df786c0081e7c0563508e", "score": "0.5927449", "text": "def processIncoming(self):\n while self.queue.qsize():\n try:\n msg = self.queue.get(0)\n self.editor.insertLine(str(msg))\n except Queue.Empty:\n pass", "title": "" }, { "docid": "da75aad4aa1c94d4f55354499323707d", "score": "0.59172297", "text": "def processMessage(self, command: str, msg):\n pass", "title": "" }, { "docid": "c37b74662744836ecc1f9462ce7f0d5b", "score": "0.5915504", "text": "def statemachine(self, states: dict, q_pckt: MsgQueuePacket) -> None:\n\n if q_pckt.msg_type in states.keys():\n logger.info(\"Passing message to handler\")\n states[q_pckt.msg_type](q_pckt)\n else:\n logger.error(\"Unknown message type!\")", "title": "" }, { "docid": "958ca22bf00be37484f6ced6d5468b6b", "score": "0.5907349", "text": "def post_message(self, msg: pyimc.Message):\n if pyimc.Message in type(msg).__bases__:\n try:\n for fn in self._subs[type(msg)]:\n fn(msg)\n for fn in self._subs[pyimc.Message]:\n fn(msg)\n except KeyError:\n pass\n elif type(msg) is pyimc.Message:\n # Subscriptions to pyimc.Message receives all messages\n try:\n for fn in self._subs[pyimc.Message]:\n fn(msg)\n except KeyError:\n pass\n else:\n logger.warning('Unknown IMC message received: {} ({}) from {}'.format(msg.msg_name, msg.msg_id, msg.src))", "title": "" }, { "docid": "e4ab2972567afa18afe5250730ae28ec", "score": "0.59067786", "text": "def _message_event_handler(self, event):\n channel = event['channel']\n if channel[0] not in 'CGD':\n log.warning(\"Unknown message type! Unable to handle %s\", channel)\n return\n\n subtype = event.get('subtype', None)\n\n if subtype in (\"message_deleted\", \"channel_topic\", \"message_replied\"):\n log.debug(\"Message of type %s, ignoring this event\", subtype)\n return\n\n if subtype == \"message_changed\" and 'attachments' in event['message']:\n # If you paste a link into Slack, it does a call-out to grab details\n # from it so it can display this in the chatroom. These show up as\n # message_changed events with an 'attachments' key in the embedded\n # message. We should completely ignore these events otherwise we\n # could end up processing bot commands twice (user issues a command\n # containing a link, it gets processed, then Slack triggers the\n # message_changed event and we end up processing it again as a new\n # message. This is not what we want).\n log.debug(\n \"Ignoring message_changed event with attachments, likely caused \"\n \"by Slack auto-expanding a link\"\n )\n return\n\n if 'message' in event:\n text = event['message'].get('text', '')\n user = event['message'].get('user', event.get('bot_id'))\n else:\n text = event.get('text', '')\n user = event.get('user', event.get('bot_id'))\n\n text, mentioned = self.process_mentions(text)\n\n text = self.sanitize_uris(text)\n\n log.debug('Saw an event: %s', pprint.pformat(event))\n log.debug('Escaped IDs event text: %s', text)\n\n msg = Message(\n text,\n extras={\n 'attachments': event.get('attachments'),\n 'slack_event': event,\n },\n )\n\n if channel.startswith('D'):\n if subtype == \"bot_message\":\n msg.frm = SlackBot(\n self.sc,\n bot_id=event.get('bot_id'),\n bot_username=event.get('username', '')\n )\n else:\n msg.frm = SlackPerson(self.sc, user, event['channel'])\n msg.to = SlackPerson(self.sc, self.username_to_userid(self.sc.server.username),\n event['channel'])\n channel_link_name = event['channel']\n else:\n if subtype == \"bot_message\":\n msg.frm = SlackRoomBot(\n self.sc,\n bot_id=event.get('bot_id'),\n bot_username=event.get('username', ''),\n channelid=event['channel'],\n bot=self\n )\n else:\n msg.frm = SlackRoomOccupant(self.sc, user, event['channel'], bot=self)\n msg.to = SlackRoom(channelid=event['channel'], bot=self)\n channel_link_name = msg.to.name\n\n msg.extras['url'] = f'https://{self.sc.server.domain}.slack.com/archives/' \\\n f'{channel_link_name}/p{self._ts_for_message(msg).replace(\".\", \"\")}'\n\n self.callback_message(msg)\n\n if mentioned:\n self.callback_mention(msg, mentioned)", "title": "" }, { "docid": "0deaafb79b33d0f763250f5a8f638ea4", "score": "0.5905196", "text": "def process_update(self, update: types.Update):\n if update.message or update.edited_message:\n message = update.message or update.edited_message\n self._sender = message.sender\n self._chat = message.chat\n self._last_message_id = message.message_id\n self._process_private_message(message)\n\n elif update.channel_post or update.edited_channel_post:\n post = update.channel_post or update.edited_channel_post\n self._sender = post.sender\n self._chat = post.chat\n self._last_message_id = post.message_id\n self.handle_channel_post(post)\n\n elif update.inline_query:\n self._sender = update.inline_query.sender\n self.handle_inline_query(update.inline_query)\n\n elif update.chosen_inline_result:\n self._sender = update.chosen_inline_result.sender\n self.handle_chosen_inline_result(update.chosen_inline_result)\n\n elif update.callback_query:\n self._sender = update.callback_query.sender\n if update.callback_query.message:\n self._chat = update.callback_query.message.chat\n self._process_callback_query(update.callback_query)\n\n elif update.shipping_query:\n self._sender = update.shipping_query.sender\n self.handle_shipping_query(update.shipping_query)\n\n elif update.pre_checkout_query:\n self._sender = update.pre_checkout_query.sender\n self.handle_pre_checkout_query(update.pre_checkout_query)\n\n else:\n raise RuntimeError(\"Unsupported update request from telegram: {}\".format(update))", "title": "" }, { "docid": "d22f6c7a49968cb2b6d3c75dd619d353", "score": "0.5901159", "text": "def _process_message(self, message: str) -> None:\n\n move = moves.move_from_json_string(message)\n if move is None:\n return\n\n event = events.event_from_move(move, self._actor_id)\n if event is None:\n return\n\n self._engine.handle_event(event)", "title": "" }, { "docid": "d27ea1151b749a74ec804ff4b9fd25e3", "score": "0.58991116", "text": "def _handleMessage(self, msg):\n #msg_type = msg['type']\n #logger.warning(f\"unhandled message type: {msg['type']}\")", "title": "" }, { "docid": "8e3bd7cc9093017519832f9d7f058d15", "score": "0.58917636", "text": "def _handle_valid_message(self, message: Any) -> Any:\n\n if isinstance(message, GetLoadMessage):\n self.clock.receive_message(message.clock)\n return self._make_message(LoadMessage, self.get_load())\n elif isinstance(message, JobMessage):\n timestamp = self.clock.receive_message(message.clock)\n log_message = f\"Received job {message.job}\"\n self._log(timestamp, log_message)\n queued = self._queue_job(message.job)\n\n if queued:\n return self._make_message(JobAcceptedMessage)\n else:\n return self._offload_job_to_gs(message.job)\n elif isinstance(message, GiveJobMessage):\n timestamp = self.clock.receive_message(message.clock)\n log_message = f\"Received give job message\"\n self._log(timestamp, log_message)\n\n try:\n job = self.job_queue.get(block=False)\n except queue.Empty:\n job = None\n\n return self._make_message(OptionalJobMessage, job)\n else:\n timestamp = self.clock.register_event()\n log_message = f\"Received unknown message: {message}\"\n self._log(timestamp, log_message)\n return self._make_message(UnknownMessageError)", "title": "" }, { "docid": "b0d8a38d62b6b9cd62c506e0e74b7b5d", "score": "0.5885278", "text": "def handleMessage(self, message):\r\n # Check the message and it to the queue.\r\n if self.strict:\r\n if not message.m_type in halMessage.valid_messages:\r\n msg = \"Invalid message type '\" + message.m_type\r\n msg += \"' received from \" + message.getSourceName()\r\n raise halExceptions.HalException(msg)\r\n\r\n validator = halMessage.valid_messages[message.m_type].get(\"data\")\r\n halMessage.validateData(validator, message)\r\n \r\n message.logEvent(\"queued\")\r\n\r\n self.queued_messages.append(message)\r\n\r\n # Start the message timer, if it is not already running.\r\n self.startMessageTimer()", "title": "" }, { "docid": "6742980f43a15e8872306e43d0d27acf", "score": "0.58832127", "text": "def process_message(self, message):\n if message == MSG_NULL:\n # Empty message, remove client from list\n self.disconnect_client()\n elif message == MSG_PING:\n # Simple handshake\n self._write(MSG_PONG)\n elif MSG_RESOURCES_PREFIX in message:\n msg_payload = message.split(MSG_RESOURCES_PREFIX)[1]\n resources_list, name = msg_payload.split(MSG_SEPARATOR)\n resources_list = resources_list.split(',')\n logger.debug(\"delegate message with: {}\".format(resources_list))\n self.server.update_resources(self, resources_list, name)\n self._write(MSG_OK)\n return", "title": "" }, { "docid": "63f40d5fa6ea033a0ca0cad99024a35a", "score": "0.587121", "text": "def _on_message(self, message):\n message_json = json.loads(message)\n self.log(logging.DEBUG, 'Receive message from Web Socket')\n for singleMsg in message_json:\n self._process_message(singleMsg)", "title": "" }, { "docid": "4e5d872392ca93ab20859d78407b15d5", "score": "0.58677006", "text": "def process_message(self, message):\n\n # add to received objects\n msg_set = self._recvd_or_sent[message.content.id]\n msg_set.add(message.sender)\n new_message = Message(message.content, self.pid, message.is_block)\n\n # print \"Processing message id {} by peer {} sent by {}\".format(message.content.id, self.pid, message.sender)\n # Process as per type of message\n if not message.is_block:\n self._blockchain.add_transaction(message.content)\n else:\n if self._blockchain.add_block(message.content):\n self._block_timer.cancel()\n self.gen_block()\n self._blockchain.print_longest_chain()\n\n # send to connected peers, with conditions\n for p in self._connected_peers_ptrs:\n if p not in msg_set:\n # send to this!\n msg_set.add(p)\n p_recv_ptr = self._connected_peers_ptrs[p]\n delay = self._get_delay(self.pid, p, message.is_block)\n new_message.send(p_recv_ptr, delay)", "title": "" }, { "docid": "79c3de93963a8a8aa84acd400222e9a4", "score": "0.5837384", "text": "def __on_message(self, ws, message):\n\t\tparsed_message = json.loads(message)\n\t\tprint('received: ' + message)\n\t\tif 'state' in parsed_message:\n\t\t\tstate = parsed_message['state']\n\t\t\tif state == 'listening':\n\t\t\t\tself.__listening = True\n\t\t\telse:\n\t\t\t\tself.__listening = False\n\n\t\tif 'results' in parsed_message:\n\t\t\t# TODO: pass this in!\n\t\t\tresult_index = 0\n\t\t\tif 'result_index' in parsed_message:\n\t\t\t\tresult_index = parsed_message['result_index']\n\n\t\t\tresults = parsed_message['results'][0]\n\t\t\tfinal_results = False\n\t\t\tif 'final' in results:\n\t\t\t\tfinal_results = results['final']\n\t\t\talternatives = []\n\t\t\tif 'alternatives' in results:\n\t\t\t\talternatives = results['alternatives']\n\n\t\t\ttranscripts = [self.__get_transcript_from_alternative(alternative) for alternative in alternatives]\n\t\t\tspeech_result = SpeechResult(transcripts, result_index)\n\n\t\t\tfor callback in self.__callbacks:\n\t\t\t\tcallback(speech_result, final_results)", "title": "" }, { "docid": "cb0a9c4d3cc49a304acdf14e28e9abe3", "score": "0.5831866", "text": "def __on_message(self, data):\n if data[\"type\"] in (\"normal\", \"chat\"):\n # Got a message\n body = data[\"body\"].strip()\n if body:\n # Valid content, check the sender\n sender = data[\"from\"].full\n try:\n # Are we listening to this JID ?\n event = self.__waiting[sender].popleft()\n except (KeyError, IndexError):\n # Not waiting for a message from this JID,\n # treat the message in a the task thread\n self.__pool.enqueue(self.handle_message, sender, body)\n else:\n # Set the event, with the content of the message as data\n event.set(body)", "title": "" }, { "docid": "9c2a4c76411515b02b6a3ef9481f1570", "score": "0.58293813", "text": "def _on_message(self, message):\n msg = json.loads(message)\n try:\n stream = threading.current_thread().getName()\n # TICKER, ORDER BOOK, TRADES or CANDLES\n if 'ticker' in stream or \\\n 'depth' in stream or \\\n 'trade' in stream or \\\n 'kline' in stream:\n self._data[stream].update(msg)\n # AUTHENTICATED\n elif self._listenKey == stream:\n self._handle_auth_update(msg)\n else:\n self.logger.info('Update not handled for stream {}'.format(stream))\n self.logger.info('Message:\\n{}'.format(msg))\n except KeyError:\n pass\n except Exception as e:\n raise ExchangeException(self.name(), 'Exception caught while handling a websocket channel message',\n data=msg, orig_exception=e, logger=self.logger)", "title": "" }, { "docid": "9351ce0d31f282c1ed67968fc31c3d9a", "score": "0.5829037", "text": "def handle_message(self, msg):\n raise NotImplementedError", "title": "" }, { "docid": "d25ad699e149d341dbc06c534c88f3b8", "score": "0.58251023", "text": "def _parse_message_data(self, message, number):\n if message.get(\"type\") == \"message\":\n # 1 get the username\n subtype = message.get(\"subtype\")\n if subtype == \"bot_message\":\n username = message.get(\"username\") # Bot's name is stored in \"username\" property\n # TODO Bot's username to be deprecated\n else:\n username = self.get_user_display_name(message.get(\"user\"))\n\n # 2 does msg have replies\n # files, parent msg and threads behave differently\n # if there was a reply on the thread it will include \"reply_count\" property\n # but not all parent msg have \"reply_count\" property\n # we can establish whether this is a parent or a child msg by comparing \"ts\" and \"thread_ts\"\n ts = message.get(\"ts\")\n thread_ts = message.get(\"thread_ts\")\n\n is_msg_parent = False\n if thread_ts is None or thread_ts == ts:\n is_msg_parent = True\n\n reply_count = message.get(\"reply_count\")\n\n # 3 get the timestamp\n msg_time = readable_datetime(float(message.get(\"ts\")), False, '%Y-%m-%d %H:%M:%S')\n\n # 4 get the text message\n text, pretext = None, None\n attachments = message.get(\"attachments\")\n if not attachments:\n text = message.get(\"text\")\n else:\n at = attachments[0] # only one in the list\n if at:\n pretext = at.get(\"pretext\")\n text = at.get(\"text\")\n\n file_permalink, file_name = None, None\n file_uploads = message.get(\"files\")\n if file_uploads:\n f = file_uploads[0] # only one in the list\n if f:\n file_permalink = \"File url {}\".format(f.get(\"permalink\"))\n file_name = u\"File name {}\".format(f.get(\"name\")) # name can be unicode\n\n # 4 use the jinja template to combine all the data in a string\n data = data_for_template(number, username, reply_count, msg_time, pretext, text, file_permalink, file_name,\n is_msg_parent)\n return data\n return None", "title": "" }, { "docid": "894b0acc0cf8ff3581cfc0b1bcf650c6", "score": "0.5821233", "text": "def extract_messages(self):\n for topic, msg, bag_time in self.bag.read_messages(topics=self.topics):\n if self.bag_time_start is None:\n self.bag_time_start = bag_time\n self.extract_pose_topics(topic, msg, bag_time)\n self.extract_imu_topics(topic, msg, bag_time)\n self.extract_twist_topics(topic, msg, bag_time)\n self.extract_motor_velocity_topics(topic, msg, bag_time)\n self.extract_waypoint_topics(topic, msg, bag_time)\n self.extract_wrench_topics(topic, msg, bag_time)\n self.bag_time_end = bag_time", "title": "" }, { "docid": "384df08864dea8134f550f625eab98df", "score": "0.5809198", "text": "def treatMessage(self, message):\n self.server.treatMessage(message)", "title": "" }, { "docid": "2f39ca9f7a0befbee78c1fd56dae4599", "score": "0.5804041", "text": "def handle_read(self):\n\n # receive a chunK of data with the max size chunk_size from our client.\n data = self.recv(self.chunk_size)\n\n if len(data) == 0:\n # this only happens when the connection is dropped\n self.handle_close()\n return\n\n self.data_to_read.append(data.decode(\"utf-8\"))\n\n messages = ''.join(self.data_to_read).split('\\n')\n\n self.data_to_read = []\n\n for mesg in messages:\n if len(mesg) < 2:\n continue\n if mesg[0] == '{' and mesg[-1] == '}':\n self.handle_json_message(mesg)\n else:\n self.data_to_read.append(mesg)", "title": "" }, { "docid": "9dff7d4ba7d26de0cd1a789193fce6b2", "score": "0.57587475", "text": "def handle_muc_message(self, message):\n body = message[\"body\"]\n\n self.run_handler(EVENT_MUC_MESSAGE, message, None)\n\n if not body or (body[0] != COMMAND_CHAR \\\n and not body.startswith(self.mention + \", \") \\\n and not body.startswith(self.mention + \": \") \\\n and not body.startswith(\"@\" + self.mention)):\n # None to handle\n return\n\n if body[0] == COMMAND_CHAR:\n command_n = body.split()[0][1:]\n arguments = body.split()[1:]\n else:\n command_n = body.split()[1]\n arguments = body.split()[2:]\n\n command = getattr(self, \"cmd_%s\" % command_n, None)\n message.command = command\n\n if command:\n self.log.info(\"muc command %s %r\" % (command_n, arguments))\n result = command(message, arguments)\n if result is not None:\n if isinstance(result, list):\n for r in result:\n self.reply(message, r)\n elif isinstance(result, basestring):\n self.reply(message, result)\n self.run_handler(EVENT_MUC_COMMAND, message, arguments)", "title": "" }, { "docid": "02952449f2721350350a48f05b5e5118", "score": "0.5758241", "text": "def process_message(self, message):\n extra = {\n \"message_id\": message.message_id,\n \"queue_name\": self.queue.name,\n \"job_name\": self.job_name,\n }\n logger.debug(\"Process %s.%s\", self.queue.name, self.job_name, extra=extra)\n\n try:\n content_type, job_kwargs, job_context = self.deserialize_message(message)\n extra[\"job_content_type\"] = content_type\n self.process(job_kwargs, job_context)\n except Exception:\n logger.exception(\n \"Error while processing %s.%s\",\n self.queue.name,\n self.job_name,\n extra=extra,\n )\n return False\n else:\n return True", "title": "" }, { "docid": "47c6fc44f584d92127d42ba98319398b", "score": "0.57576627", "text": "def _process_private_message(self, msg: types.Message):\n # New command received\n if msg.text and (msg.text.startswith('/') or msg.text in self._command_aliases):\n if msg.text.startswith('/'):\n cmd_name = msg.text[1:].split(' ')[0]\n if cmd_name in self._command_aliases:\n self.call_command(self._command_aliases[cmd_name], msg, 0)\n else:\n self.call_command(cmd_name, msg, 0)\n else:\n self.call_command(self._command_aliases[msg.text], msg, 0)\n\n # Simple message received\n else:\n # Try to restore current command from state\n if self.command_name:\n self.call_command(self.command_name, msg)\n\n # No current command, simply handle message\n else:\n self.handle_private_message(msg)", "title": "" }, { "docid": "32dfd13c29807bc9586d5b94f5a20cd0", "score": "0.5746532", "text": "def handle_message(self, msg, status):\n\n # Parse body into utf8\n body = ensure_unicode(msg.Body)\n\n # Debug\n logger.debug('[UserHandler] got: {}'.format(body))\n\n # If the separators are not specified, runs of consecutive\n # whitespace are regarded as a single separator\n args = body.split()\n\n # Empty msg?\n if not len(args):\n return False\n\n # Find command\n for name, cmd in self.commands.items():\n if name == args[0]:\n cmd(msg, status, args)\n return True\n\n return False", "title": "" }, { "docid": "b94340bee2cef9819ac8ad59eead7258", "score": "0.57450736", "text": "def process_request(self, msg_body):\n print(\"Processing request: {}\".format(msg_body))\n\n if \"command_name\" not in msg_body:\n raise ValueError(\"No command found in message body\")\n command_name = msg_body[\"command_name\"]\n if \"args\" not in msg_body:\n raise ValueError(\"No args found in message body\")\n args = msg_body[\"args\"]\n\n if command_name in self.callbacks:\n self.callbacks[command_name](**args)\n else:\n raise ValueError(\"Command {} not recognised.\".format(command_name))", "title": "" }, { "docid": "cad11f13007f58d7b342eb16ed4b3a52", "score": "0.57299674", "text": "def handleRequest(self, s, request, client):\n try:\n logging.info(\"HANDLING message from %s: %r\" %\n (client, repr(request)))\n\n try:\n req = json.loads(request)\n except:\n logging.exception(\"Invalid message from client\")\n return\n\n if not isinstance(req, dict):\n log(logging.ERROR, \"Invalid message format from client\")\n return\n\n if 'type' not in req:\n log(logging.ERROR, \"Message has no TYPE field\")\n return\n\n if req['type'] in self.messageTypes:\n self.messageTypes[req['type']](req, client)\n else:\n log(logging.ERROR, \"Invalid message type: \" +\n str(req['type']) + \" Should be one of: \" + str(self.messageTypes.keys()))\n client.sendResult({\"error\": \"unknown request\"})\n\n except Exception as e:\n logging.exception(\"Could not handle request\")", "title": "" }, { "docid": "d742036d6bf2f83da86232bb8ec24db8", "score": "0.57243276", "text": "def process_message(self, message):\n try:\n self.logger.debug(\n \"Received message %s with id %r.\", message, message.message_id\n )\n if not message.failed:\n self.wakeup(*message.args, **message.kwargs)\n except SkipMessage:\n self.logger.warning(\"Message %s was skipped.\", message)\n except BaseException as e:\n message.stuff_exception(e)\n finally:\n self.consumers[message.queue_name].post_process_message(message)\n self.work_queue.task_done()\n message.clear_exception()", "title": "" }, { "docid": "312d94d72e9747c22215f3689f63a6e4", "score": "0.57076514", "text": "def processData(self, data):\n container = proto.Container()\n container.ParseFromString(data)\n print \"Recieved Data:\", str(container)[0:100]\n ## Server disconnect message ##\n if container.HasField('disconnect'):\n messenger.send('disconnected', [container.disconnect])\n self.running = False\n self.s.close()\n # return because nothing else matters!\n return\n \n if container.HasField(\"chat\"):\n if container.chat.to.startswith(\"#\"):\n # Chat room\n print container.chat.to + \" <\" + container.chat.sender + \"> \" + container.chat.message\n else:\n # Direct PM\n print \"<\" + container.sender + \"> \" + container.message\n # Chunk Region City management #\n if container.HasField(\"newCityResponse\"):\n messenger.send(\"newCityResponse\", [container.newCityResponse])\n if container.HasField(\"newCity\"):\n messenger.send(\"newCity\", [container.newCity])\n elif container.HasField(\"unfoundCity\"):\n messenger.send(\"unfoundCity\", [container.unfoundCity])\n elif container.HasField(\"enterCity\"):\n messenger.send(\"enterCity\", [container.enterCity])\n # End Chunk Region City management #\n \n ## THE POSITION OF THIS IS VERY IMPORTANT ##\n if len(container.updatedTiles):\n messenger.send(\"updatedTiles\", [container.updatedTiles])\n ## ##\n \n if container.HasField(\"serverState\"):\n if container.serverState is 0:\n # Nothing running?! Lets get us some maps!\n container = proto.Container()\n container.requestMaps = 1\n # TODO: Notice of incoming files, or loading bar, or something\n elif container.serverState is 1:\n # A map is loaded so we will just request the game state.\n container = proto.Container()\n container.requestGameState = 0\n self.send(container)\n # Because this is a repeated field we need to check for length as it will always be present\n elif len(container.maps):\n maps = {}\n import base64\n for map in container.maps:\n maps[map.name] = (base64.b64decode(map.heightmap))\n messenger.send(\"onGetMaps\", [maps])\n elif container.HasField(\"loginResponse\"):\n if container.loginResponse.type is 1:\n # Awesome, Server returns an int with our user status. We will use this to set up the UI for extra goodies.\n messenger.send(\"setSelfAccess\", [container.loginResponse.usertype, container.loginResponse.username])\n #now we send a request to the server asking for the game state\n container = proto.Container()\n container.requestServerState = True\n self.send(container)\n else:\n # Got to think of an error message process for here\n messenger.send(\"loginError\", container.loginResponse.message)\n elif container.HasField(\"gameState\"):\n messenger.send(\"loadRegion\", [container.gameState])", "title": "" }, { "docid": "be83f3bfe6a4702ad71260dfbcc665d3", "score": "0.5702735", "text": "def _process_packet(self, envelope):\n\t\tif self.debug_log:\n\t\t\tprint('--------------------------------------')\n\t\t\tprint('Received')\n\t\t\tprint('========')\n\t\t\tprint(envelope.msg)\n\n\t\tfor receiver in envelope.receiver:\n\t\t\tif receiver.name in self._rx_queues:\n\t\t\t\tself._rx_queues[receiver.name].put(envelope.msg)\n\t\t\telse:\n\t\t\t\tprint('\\nERROR: could not deliver packet to `{}`'.format(receiver.name))\n\n\t\tif self.debug_log:\n\t\t\tprint('--------------------------------------')", "title": "" }, { "docid": "89ebf8d479c0faa548ed25b4ee9bf262", "score": "0.5702389", "text": "def data_received(self, data):\n super().data_received(data)\n lines = data.decode().split(\"\\r\\n\")\n for line in filter(len, lines):\n try:\n message = irc.Message.parse(str(line))\n except ValueError as e:\n self.error(\"%s\", e)\n else:\n self._handle_irc_message(message)", "title": "" }, { "docid": "cf1b8467faa14d70eab9261d1929874d", "score": "0.56925976", "text": "def _messengerThreadMain(self):\n while True:\n message = self.queue.get()\n if not self.isActive:\n return\n self.handle(message[0], message[1], message[2])", "title": "" }, { "docid": "9c928bb4aa8efe0d60cd3ba648c2a952", "score": "0.5683822", "text": "def lineReceived(self, data):\n\n logging.debug(\"Recived a message %s\", data)\n\n try:\n payload = json.loads(data)\n except ValueError:\n self.send_error(\"Malformed message: Could not decode JSON\")\n return\n\n if 'message_type' not in payload:\n self.send_error(\"Malformed message: missing message_type\")\n return\n\n message_type = payload['message_type']\n try:\n func = getattr(self, 'handle_' + message_type)\n except AttributeError:\n self.send_error(\n \"Invalid message type: %s\" %\n payload['message_type'])\n return\n func(payload) # Run the actual command", "title": "" }, { "docid": "c8f307428ae5df162b701c9d10b97264", "score": "0.56823874", "text": "def handle(self, message):", "title": "" }, { "docid": "e3a2c49bd396fb6eda6cf27b93fb8eb8", "score": "0.5675562", "text": "def _check_for_messages(self):\n LOG.entry_often('Client._check_for_messages', self._id)\n if self.get_state() != STARTED or not self._subscriptions:\n LOG.exit_often('Client._check_for_messages', self._id, None)\n return\n try:\n messages = self._messenger.receive(self._sock)\n if messages:\n LOG.debug(\n self._id,\n 'received {0} messages'.format(len(messages)))\n for i, message in enumerate(messages):\n LOG.debug(\n self._id,\n 'processing message {0}'.format(i))\n self._process_message(message)\n if i < (len(messages) - 1):\n # Unless this is the last pass around the loop, call\n # pop() so that Messenger has a chance to respond to\n # any heartbeat requests that may have arrived from the\n # server.\n self._messenger.pop(self._sock, True)\n except Exception as exc:\n LOG.error('Client._check_for_messages', self._id, exc)\n\n def next_tick(exc):\n LOG.error('Client._check_for_messages', self._id, exc)\n if self._on_state_changed:\n self._on_state_changed(self, ERROR, exc)\n if _should_reconnect(exc):\n self._reconnect()\n timer = threading.Timer(0.2, next_tick, [exc])\n timer.start()\n\n LOG.exit_often('Client._check_for_messages', self._id, None)", "title": "" }, { "docid": "ba04e0aceb57b9290fad9af814060d6f", "score": "0.5675315", "text": "def check_messages(self):\n self.lgr.info(\"Agent started\")\n while True:\n msg = self.consumer.poll(1.0)\n if msg is None:\n self.lgr.debug(\"Agent don't receive message\")\n continue\n if msg.error():\n self.lgr.warn(\"Consumer error: {}\".format(msg.error()))\n continue\n message = json.loads(msg.value().decode('utf-8'))\n self.lgr.info(\"Agent received Message\")\n self.lgr.info(\"\\n\" + json.dumps(message, indent=4))\n message_answer = self.analyze_message(message)\n self.lgr.info(\"Agent prepeared the answer on the message\")\n self.lgr.info(\"\\n\" + json.dumps(message_answer, indent=4))\n self.return_to_server(message_answer)", "title": "" }, { "docid": "0d3d9b5e202d16f5af75fad19bfa58f1", "score": "0.5673789", "text": "def messages(self, msg):\n for sock in self.READ_SOCKETS: # incoming message from remote server\n if sock == self.MASTER_SOCK:\n data = sock.recv(4096)\n if not data:\n print('\\nDisconnected from chat server')\n sys.exit()\n else: # print data\n print(data.decode(), end=\"\")\n else: # user entered a message\n self.MASTER_SOCK.sendall(msg)", "title": "" }, { "docid": "970d92a1e597580ec27a48d9bdc3c78f", "score": "0.5670051", "text": "def handle(self):\n self.log.debug('Handling message \"%s\": %s', self.cmdobj.cmd, str(self.cmdobj)[:64])\n\n if self.app.shuttingdown:\n raise Exception('The server is shutting down.')\n\n cmdname = self.cmdobj.cmd\n connid = self.connid\n twwcid = self.twwcid\n\n if connid == 0:\n # A message not from any player!\n if twwcid == 0:\n # Internal message, from tworld itself.\n stream = None\n else:\n # This is from tweb, not relayed from a player.\n # (This is the rare case where we use twwcid; we have no\n # other path back.)\n stream = self.app.webconns.get(twwcid)\n\n try:\n if twwcid and not stream:\n raise ErrorMessageException('Server message from completely unrecognized stream.')\n \n cmd = self.app.all_commands.get(cmdname, None)\n if not cmd:\n raise ErrorMessageException('Unknown server command: \"%s\"' % (cmdname,))\n \n if not cmd.isserver:\n raise ErrorMessageException('Command must be invoked by a player: \"%s\"' % (cmdname,))\n\n if not cmd.noneedmongo and not self.app.mongodb:\n # Guess the database access is not going to work.\n raise ErrorMessageException('Tworld has lost contact with the database.')\n\n if cmd.doeswrite:\n # May cause display changes.\n self.set_writable()\n \n res = yield cmd.func(self.app, self, self.cmdobj, stream)\n if res is not None:\n self.log.info('Command \"%s\" result: %s', cmdname, res)\n \n except ErrorMessageException as ex:\n self.log.warning('Error message running \"%s\": %s', cmdname, str(ex))\n except MessageException as ex:\n # MessageException is usually not worth logging, but for\n # a server command, there's nobody else listening.\n self.log.info('Message running \"%s\": %s', cmdname, str(ex))\n\n # End of connid==0 case.\n return \n\n conn = self.app.playconns.get(connid)\n\n # Command from a player (via conn). A MessageException here passes\n # an error back to the player.\n\n try:\n cmd = self.app.all_commands.get(cmdname, None)\n if not cmd:\n raise ErrorMessageException('Unknown player command: \"%s\"' % (cmdname,))\n\n # Check various limitations on the command.\n\n if cmd.isserver:\n raise ErrorMessageException('Command may not be invoked by a player: \"%s\"' % (cmdname,))\n\n if cmd.restrict == 'admin':\n player = yield motor.Op(self.app.mongodb.players.find_one,\n {'_id':conn.uid},\n {'admin':1})\n if not (player and player.get('admin', False)):\n raise ErrorMessageException('Command may only be invoked by an administrator: \"%s\"' % (cmdname,))\n\n if cmd.restrict == 'creator':\n # Player must be the creator of the world he is in.\n ### And it must be an unstable version.\n # (Or an admin, anywhere.)\n player = yield motor.Op(self.app.mongodb.players.find_one,\n {'_id':conn.uid},\n {'admin':1, 'build':1})\n if not player:\n raise ErrorMessageException('Player not found!')\n if (player.get('admin', False)):\n # Admins always have creator rights.\n pass\n elif (not player.get('build', False)):\n raise ErrorMessageException('Command requires build permission: \"%s\"' % (cmdname,))\n else:\n playstate = yield motor.Op(self.app.mongodb.playstate.find_one,\n {'_id':conn.uid},\n {'iid':1})\n instance = yield motor.Op(self.app.mongodb.instances.find_one,\n {'_id':playstate['iid']})\n world = yield motor.Op(self.app.mongodb.worlds.find_one,\n {'_id':instance['wid']})\n if world.get('creator', None) != conn.uid:\n raise ErrorMessageException('Command may only be invoked by this world\\'s creator: \"%s\"' % (cmdname,))\n\n if not conn:\n # Newly-established connection. Only 'playeropen' will be\n # accepted. (Another twwcid case; we'll have to sneak the\n # stream in through the command object.)\n # (It's also possible that the connection closed since we\n # queued this, in which case we still reject.)\n if not cmd.preconnection:\n raise ErrorMessageException('Tworld has not yet registered this connection.')\n assert cmd.name=='playeropen', 'Command not playeropen should have already been rejected'\n stream = self.app.webconns.get(twwcid)\n if not stream:\n raise ErrorMessageException('Message from completely unrecognized stream')\n self.cmdobj._connid = connid\n self.cmdobj._stream = stream\n\n if not cmd.noneedmongo and not self.app.mongodb:\n # Guess the database access is not going to work.\n raise ErrorMessageException('Tworld has lost contact with the database.')\n\n if cmd.doeswrite:\n # May cause display changes.\n self.set_writable()\n \n res = yield cmd.func(self.app, self, self.cmdobj, conn)\n if res is not None:\n self.log.info('Command \"%s\" result: %s', cmdname, res)\n\n except ErrorMessageException as ex:\n # An ErrorMessageException is worth logging and sending back\n # to the player, but not splatting out a stack trace.\n self.log.warning('Error message running \"%s\": %s', cmdname, str(ex))\n try:\n # This is slightly hairy, because various error paths can\n # arrive here with no conn or no connid.\n if conn:\n conn.write({'cmd':'error', 'text':str(ex)})\n else:\n # connid may be zero or nonzero, really\n stream = self.app.webconns.get(twwcid)\n stream.write(wcproto.message(connid, {'cmd':'error', 'text':str(ex)}))\n except Exception as ex:\n pass\n\n except MessageException as ex:\n # A MessageException is not worth logging.\n try:\n # This is slightly hairy, because various error paths can\n # arrive here with no conn or no connid.\n if conn:\n conn.write({'cmd':'message', 'text':str(ex)})\n else:\n # connid may be zero or nonzero, really\n stream = self.app.webconns.get(twwcid)\n stream.write(wcproto.message(connid, {'cmd':'message', 'text':str(ex)}))\n except Exception as ex:\n pass", "title": "" }, { "docid": "203ee9a72ec663ea04b5c573726679fa", "score": "0.56674504", "text": "def receive_message(self, _message, data: dict):\n if data[MESSAGE_TYPE] == TYPE_MEDIA_STATUS:\n self._process_media_status(data)\n return True\n if data[MESSAGE_TYPE] == TYPE_LOAD_FAILED:\n self._process_load_failed(data)\n\n return True\n\n return False", "title": "" }, { "docid": "69cd97e355d503b14a43c754dbd9822c", "score": "0.56622636", "text": "async def msg_sent_handler(self, payload):\n log.debug('async msg_sent_handler function started')\n stream_type = payload['payload']['messageSent']['message']['stream']['streamType']\n message_sent_data = payload['payload']['messageSent']['message']\n if str(stream_type) == 'ROOM':\n for listener in self.room_listeners:\n await listener.on_room_msg(message_sent_data)\n else:\n for listener in self.im_listeners:\n await listener.on_im_message(message_sent_data)", "title": "" }, { "docid": "ee68762621f57aba65607985035011a0", "score": "0.5651365", "text": "def receive_and_filter_message(self, readable_socket):\n\n (msg_type, msg_text) = Message.receive_msg(msg, readable_socket)\n Server.write_to_logs(self, msg_type, msg_text, readable_socket)\n\n if msg_type == 0: # NORMAL\n Server.ao_normal_msg(self, msg_text, readable_socket)\n pass\n if msg_type == 1: # JOIN\n Server.ao_join_msg(self, msg_text, readable_socket)\n pass\n if msg_type == 2: # USER\n Server.ao_user_msg(self, msg_text, readable_socket)\n pass\n if msg_type == 3: # PASS\n Server.ao_pass_msg(self, msg_text, readable_socket)\n pass\n if msg_type == 4: # DIRECT\n Server.ao_direct_msg(self, msg_text, readable_socket)\n pass\n if msg_type == 5: # COMMAND\n Server.ao_command_msg(self, msg_text, readable_socket, msg_type)\n pass\n if msg_type == 6: # SERVER\n Server.ao_server_msg(self, msg_text, readable_socket)\n pass\n if msg_type == 7: # TEMP\n Server.ao_temp_msg(self, msg_text, readable_socket)\n pass", "title": "" }, { "docid": "ae593705fad65bdafcefe0fbb5393530", "score": "0.5644399", "text": "def _message_receive_handler(self, data: dict):\n log.debug(\"Saw an event: %s\", pprint.pformat(data))\n\n event = data.get(\"event\", {})\n message = event.get(\"message\", {})\n msg_type = message.get(\"message_type\", \"\")\n if msg_type != \"text\":\n log.warning(\"only support 'text' msg_type from now on, got:{msg_type}\")\n return\n \n text = self._get_text_without_mentions(message)\n chat_type = message.get(\"chat_type\", \"\").strip()\n sender = event.get(\"sender\", {})\n sender_id = sender.get(\"sender_id\", {})\n\n msg = Message(text, extras={\"lark_event\": data})\n if chat_type == 'p2p':\n msg.frm = LarkPerson(self.lc, sender_id.get(\"open_id\"))\n msg.to = self.bot_identifier\n elif chat_type == \"group\":\n msg.frm = LarkRoomOccupant(self.lc, sender_id.get(\"open_id\"), message.get(\"chat_id\"), self)\n msg.to = LarkRoom(self.lc, message.get(\"chat_id\"), bot=self)\n else:\n log.error(\n f\"unknown chat_type:{chat_type} not in ['p2p', 'group']\")\n\n self.callback_message(msg)", "title": "" }, { "docid": "7b1450cf575ca707a6af37d762598847", "score": "0.5639521", "text": "def analyseMessage(self, address):\n self.fromAddress = address\n listAttr = ()\n\n\n # TODO:\n # A tid for every connection\n # Check tid is one we sent and haven't had a reply to yet\n if self._pending.has_key(self.tid):\n #del self._pending[self.tid]\n pass\n elif self.mt == 0x1111: # Is a connection request\n # Add the tid to table (in \"connection request\" case)\n pass\n else:\n logging.error(\"error, unknown transaction ID %s, have %r\" \\\n % (self.tid, self._pending.keys()))\n return\n \n if self.mt == 0x1101:\n # -------------------------------------------------------------\n # Lookup Response\n logging.info(\"got punch response from %s\"%repr(address))\n \n dummy,family,port,addr = struct.unpack( \\\n '!ccH4s', self.avtypeList[\"PUBLIC-ADDRESSE\"])\n publicAddress = (socket.inet_ntoa(addr), port)\n dummy,family,port,addr = struct.unpack( \\\n '!ccH4s', self.avtypeList[\"PRIVATE-ADDRESSE\"])\n privateAddress = (socket.inet_ntoa(addr), port)\n # TODO: read NAT type\n # If Nat type is sym: wait for message from peer\n\n # Puts peer in the active connection list and try to contact it\n self.activeConnection = self.activeConnection + (publicAddress,)\n self.activeConnection = self.activeConnection + (privateAddress,)\n self.responseType = \"Connection to peer\" \n\n if self.protocol == 'TCP':\n self.reactor.listenTCP(self.port, self)\n print 'Listen on port:', self.port\n reactor.connectTCP(publicAddress[0], publicAddress[1], self)\n print 'Connect with:', publicAddress\n self.TCPsessionStarted = 1\n \n # Msg to the peer's public address\n self.sendMessage(publicAddress)\n # Msg to the peer's public address\n self.sendMessage(privateAddress) \n \n elif self.mt == 0x1003:\n # -------------------------------------------------------------\n # Registration Response\n dummy,family,port,addr = struct.unpack( \\\n '!ccH4s', self.avtypeList[\"PUBLIC-ADDRESSE\"]) \n self.publicAddr = (socket.inet_ntoa(addr), port)\n self.registrationMade()\n \n elif self.mt == 0x1111:\n # -------------------------------------------------------------\n # Connection Request\n print \"Connection Request!!!\"\n \n self.responseType = \"Connection to peer\"\n \n dummy,family,port,addr = struct.unpack( \\\n '!ccH4s', self.avtypeList[\"REQUESTOR-PUBLIC-ADDRESSE\"])\n publicAddress = (socket.inet_ntoa(addr), port)\n self.activeConnection = self.activeConnection + (publicAddress,)\n \n # Add tid: it's a new connection\n self._pending[self.tid] = (time.time(), publicAddress)\n\n # If the other peer client is bihind a NAT too\n if 'REQUESTOR-PRIVATE-IP' in self.avtypeList:\n dummy,family,port,addr = struct.unpack( \\\n '!ccH4s', self.avtypeList[\"REQUESTOR-PRIVATE-ADDRESSE\"])\n privateAddress = (socket.inet_ntoa(addr), port)\n self.activeConnection = self.activeConnection + (privateAddress,)\n # Send msg to the peer's private address\n #self.sendMessage(privateAddress)\n\n \n if self.protocol == 'TCP':\n self.reactor.listenTCP(self.port, self)\n print 'Listen on port:', self.port\n reactor.connectTCP(publicAddress[0], publicAddress[1], self)\n print 'Connect with:', publicAddress\n self.TCPsessionStarted = 1\n\n # Send msg to the peer's public address\n self.sendMessage(publicAddress)\n \n elif self.mt == 0x1102:\n # -------------------------------------------------------------\n # Connection to peer\n self.responseType = \"Connection to peer\"\n \n if self.fromAddress in self.activeConnection:\n # The connection is established\n self.connectionMade()\n else:\n # Send msg to the peer's address\n self.sendMessage(self.fromAddress)\n \n \n elif self.mt == 0x1112:\n # -------------------------------------------------------------\n # Error Response\n \n logging.error(\"STUN got an error response:\")\n # Extract the class and number\n error, phrase = self.getErrorCode()\n if error == 420:\n _listUnkAttr = self.getListUnkAttr()\n logging.error((error, phrase, _listUnkAttr))\n else:\n logging.error((error, phrase)) \n else:\n logging.error(\"STUN got unknown message\")", "title": "" }, { "docid": "d3fa837b836e12776df865f8787e9864", "score": "0.56381583", "text": "def _dispatch_message(self, message):\n if message.message_type in [JSONRPCMessageType.ResponseSuccess, JSONRPCMessageType.ResponseError]:\n # Responses need to be routed to the handler that requested them\n # TODO: Route to the handler or send error message\n return\n\n # Figure out which handler will execute the request/notification\n if message.message_type is JSONRPCMessageType.Request:\n if self._logger is not None:\n self._logger.info('Received request id=%s method=%s', message.message_id, message.message_method)\n handler = self._request_handlers.get(message.message_method)\n request_context = RequestContext(message, self._output_queue)\n\n # Make sure we got a handler for the request\n if handler is None:\n # TODO: Localize?\n request_context.send_error(f'Requested method is unsupported: {message.message_method}')\n if self._logger is not None:\n self._logger.warn('Requested method is unsupported: %s', message.message_method)\n return\n\n # Call the handler with a request context and the deserialized parameter object\n if handler.class_ is None:\n # Don't attempt to do complex deserialization\n deserialized_object = message.message_params\n else:\n # Use the complex deserializer\n deserialized_object = handler.class_.from_dict(message.message_params)\n try:\n handler.handler(request_context, deserialized_object)\n except Exception as e:\n error_message = f'Unhandled exception while handling request method {message.message_method}: \"{e}\"' # TODO: Localize\n if self._logger is not None:\n self._logger.exception(error_message)\n request_context.send_error(error_message, code=-32603)\n elif message.message_type is JSONRPCMessageType.Notification:\n if self._logger is not None:\n self._logger.info('Received notification method=%s', message.message_method)\n handler = self._notification_handlers.get(message.message_method)\n\n if handler is None:\n # Ignore the notification\n if self._logger is not None:\n self._logger.warn('Notification method %s is unsupported', message.message_method)\n return\n\n # Call the handler with a notification context\n notification_context = NotificationContext(self._output_queue)\n deserialized_object = None\n if handler.class_ is None:\n # Don't attempt to do complex deserialization\n deserialized_object = message.message_params\n else:\n # Use the complex deserializer\n deserialized_object = handler.class_.from_dict(message.message_params)\n try:\n handler.handler(notification_context, deserialized_object)\n except Exception:\n error_message = f'Unhandled exception while handling notification method {message.message_method}'\n if self._logger is not None:\n self._logger.exception(error_message)\n else:\n # If this happens we have a serious issue with the JSON RPC reader\n if self._logger is not None:\n self._logger.warn('Received unsupported message type %s', message.message_type)\n return", "title": "" }, { "docid": "94d5f5b2138601c20e04a2433de30c3a", "score": "0.5620065", "text": "def process_received_data(self, data):\n pass", "title": "" }, { "docid": "ba2773a8a48f360295f7ba72d4b54c74", "score": "0.5616038", "text": "def handle_message(self, msg):\n self.send(msg.get_bytes())", "title": "" }, { "docid": "75cae41f07cc097ab77489837afdf551", "score": "0.56125623", "text": "def _handle_flow_stats(self, stats_msgs):\n stat_count = sum(len(flow_stats) for flow_stats in stats_msgs)\n if stat_count == 0:\n return\n\n self.logger.debug(\"Processing %s stats responses\", len(stats_msgs))\n # Aggregate flows into rule records\n current_usage = defaultdict(RuleRecord)\n for flow_stats in stats_msgs:\n self.logger.debug(\"Processing stats of %d flows\", len(flow_stats))\n for stat in flow_stats:\n if stat.table_id != self.tbl_num:\n # this update is not intended for policy\n return\n current_usage = self._usage_from_flow_stat(current_usage, stat)\n\n # Calculate the delta values from last stat update\n delta_usage = _delta_usage_maps(current_usage, self.last_usage)\n self.last_usage = current_usage\n\n # Append any records which we couldn't send to session manager earlier\n delta_usage = _merge_usage_maps(delta_usage, self.failed_usage)\n self.failed_usage = {}\n\n # Send report even if usage is empty. Sessiond uses empty reports to\n # recognize when flows have ended\n self._report_usage(delta_usage)", "title": "" } ]
67f8b86bba66c28bd5cf6cd3dda68006
Updates the unified dataset for a categorization project
[ { "docid": "b14552d1c0b382b727de1624594b2ed6", "score": "0.6976473", "text": "def update_unified_dataset(project: CategorizationProject) -> List[Operation]:\n return _run_custom(\n project,\n run_update_unified_dataset=True,\n run_apply_feedback=False,\n run_update_results=False,\n )", "title": "" } ]
[ { "docid": "224d3823e703cb387135d9d394b89d58", "score": "0.60381895", "text": "def _run_custom(\n project: CategorizationProject,\n *,\n run_update_unified_dataset: bool = False,\n run_apply_feedback: bool = False,\n run_update_results: bool = False,\n) -> List[Operation]:\n if ProjectType[project.type] != ProjectType.CATEGORIZATION:\n error_msg = f\"Cannot use as a categorization project. Project type: {project.type}\"\n LOGGER.error(error_msg)\n raise TypeError(error_msg)\n else:\n project = project.as_categorization()\n\n completed_operations = []\n if run_update_unified_dataset:\n LOGGER.info(\n f\"Updating the unified dataset for project {project.name} (id={project.resource_id}).\"\n )\n op = project.unified_dataset().refresh()\n operation.enforce_success(op)\n completed_operations.append(op)\n if run_apply_feedback:\n LOGGER.info(\n f\"Applying feedback to the categorization model for project {project.name} \"\n f\"(id={project.resource_id}).\"\n )\n op = project.model().train()\n operation.enforce_success(op)\n completed_operations.append(op)\n if run_update_results:\n LOGGER.info(\n f\"Updating categorization results for project {project.name} \"\n f\"(id={project.resource_id}).\"\n )\n op = project.model().predict()\n operation.enforce_success(op)\n completed_operations.append(op)\n\n return completed_operations", "title": "" }, { "docid": "e0f3eb98fb0419e3522f57becf5df909", "score": "0.5904782", "text": "def update(self, dataset: Dataset) -> None:\n raise NotImplementedError", "title": "" }, { "docid": "65eb5023a6fd6650f09458c8ea7e4926", "score": "0.5813415", "text": "def update(self, category:int, labeled_datapoint: Dict[str, Any]) -> None:\n raise NotImplementedError", "title": "" }, { "docid": "5ad4049eee5fc3935871206172c67d2a", "score": "0.5441576", "text": "def update_classify_tab(self):\r\n category = self.selected_category()\r\n subcategory = self.selected_subcategory()\r\n unit = self.selected_unit()\r\n default_classes = unit['classes']\r\n field = self.selected_field()\r\n field_index = self.layer.dataProvider().fields().indexFromName(\r\n self.selected_field())\r\n field_type = self.layer.dataProvider().fields()[field_index].type()\r\n self.lblClassify.setText(classify_question %\r\n (subcategory['name'], category['name'],\r\n unit['name'], field.upper()))\r\n # Assign unique values to classes\r\n unassigned_values = list()\r\n assigned_values = dict()\r\n for default_class in default_classes:\r\n assigned_values[default_class['name']] = list()\r\n for value in self.layer.uniqueValues(field_index):\r\n value_as_string = value and unicode(value) or 'NULL'\r\n assigned = False\r\n for default_class in default_classes:\r\n if (field_type > 9\r\n and value_as_string in default_class['string_defaults']) \\\r\n or (field_type < 10\r\n and (default_class['numeric_default_min'] <=\r\n value < default_class[\r\n 'numeric_default_max'])):\r\n assigned_values[default_class['name']] += [value_as_string]\r\n assigned = True\r\n if not assigned:\r\n # add to unassigned values list otherwise\r\n unassigned_values += [value_as_string]\r\n self.populate_classified_values(\r\n unassigned_values, assigned_values, default_classes)", "title": "" }, { "docid": "a1976166fba9d2772c3088dda14937b7", "score": "0.53576636", "text": "def update(attr, old, new):\n hists_to_plot = [selection.labels[i] for i in selection.active]\n new_src = make_dataset(hists_to_plot,\n x_min = range_select.value[0],\n x_max = range_select.value[1],\n n_bins = nbin_select.value)\n cds.data.update(new_src.data)", "title": "" }, { "docid": "f094fde11c61f07450896b7c440e0904", "score": "0.534893", "text": "def dataset(ctx, dataset):\n ctx.obj['DATASET'] = dataset", "title": "" }, { "docid": "900ded97bb95d92e7f3fc04bfd2108db", "score": "0.52719396", "text": "def update_service_category(self, service_category_name=None, data=None):", "title": "" }, { "docid": "1a12945836e312a4025270390e722403", "score": "0.52345085", "text": "def change_dataset_selection(request):\n\n if not request.method == \"POST\":\n return HttpResponseForbidden(\"Change dataset selection method must be POST\")\n\n dataset_prefix = 'dataset_'\n\n if request.user.is_authenticated:\n selected_dataset_acronyms = []\n for attribute in request.POST:\n if attribute[:len(dataset_prefix)] == dataset_prefix:\n dataset_name = attribute[len(dataset_prefix):]\n selected_dataset_acronyms.append(dataset_name)\n if selected_dataset_acronyms:\n # check that the selected datasets exist\n for dataset_name in selected_dataset_acronyms:\n try:\n dataset = Dataset.objects.get(acronym=dataset_name)\n except ObjectDoesNotExist:\n print('Exception updating selected datasets, dataset acronym does not exist: ', dataset_name)\n return HttpResponseRedirect(reverse('admin_dataset_select'))\n\n user_profile = UserProfile.objects.get(user=request.user)\n user_profile.selected_datasets.clear()\n for dataset_name in selected_dataset_acronyms:\n try:\n dataset = Dataset.objects.get(acronym=dataset_name)\n user_profile.selected_datasets.add(dataset)\n except (ObjectDoesNotExist, TransactionManagementError, DatabaseError, IntegrityError):\n print('exception to updating selected datasets')\n pass\n user_profile.save()\n else:\n # no datasets selected\n user_profile = UserProfile.objects.get(user=request.user)\n user_profile.selected_datasets.clear()\n user_profile.save()\n else:\n # clear old selection\n selected_dataset_acronyms = []\n for attribute in request.POST:\n if attribute[:len(dataset_prefix)] == dataset_prefix:\n dataset_name = attribute[len(dataset_prefix):]\n selected_dataset_acronyms.append(dataset_name)\n new_selection = []\n successful = True\n for dataset_name in selected_dataset_acronyms:\n try:\n dataset = Dataset.objects.get(acronym=dataset_name)\n new_selection.append(dataset.acronym)\n except ObjectDoesNotExist:\n print('exception to updating selected datasets anonymous user')\n successful = False\n pass\n if successful:\n request.session['selected_datasets'] = new_selection\n # erase previous search results session variable since the dataset selection has changed\n request.session['search_results'] = []\n request.session.modified = True\n\n # check whether the last used dataset is still in the selected datasets\n if 'last_used_dataset' in request.session.keys():\n if selected_dataset_acronyms and request.session['last_used_dataset'] not in selected_dataset_acronyms:\n request.session['last_used_dataset'] = selected_dataset_acronyms[0]\n request.session.modified = True\n else:\n # set the last_used_dataset?\n pass\n return redirect(settings.PREFIX_URL + '/datasets/select')", "title": "" }, { "docid": "548e81ee1f73a1358b1f5667451bda1e", "score": "0.52059406", "text": "def set(self, dataset_version_id, name):\n\n dataset_tag = models.DatasetTag(name=name)\n\n repository = self.build_repository(repositories.SetDatasetTag)\n return repository.update(dataset_version_id, dataset_tag)", "title": "" }, { "docid": "b4e27480d8d170c800fb05fa292b015f", "score": "0.5186465", "text": "def update_categories(desired_name2id: dict, coco_dict: dict) -> dict:\n # so that original variable doesnt get affected\n coco_source = copy.deepcopy(coco_dict)\n\n # init target coco dict\n coco_target = {\"images\": [], \"annotations\": [], \"categories\": []}\n\n # init vars\n currentid2desiredid_mapping = {}\n # create category id mapping (currentid2desiredid_mapping)\n for category in coco_source[\"categories\"]:\n current_category_id = category[\"id\"]\n current_category_name = category[\"name\"]\n if current_category_name in desired_name2id.keys():\n currentid2desiredid_mapping[current_category_id] = desired_name2id[\n current_category_name\n ]\n else:\n # ignore categories that are not included in desired_name2id\n currentid2desiredid_mapping[current_category_id] = -1\n\n # update annotations\n for annotation in coco_source[\"annotations\"]:\n current_category_id = annotation[\"category_id\"]\n desired_category_id = currentid2desiredid_mapping[current_category_id]\n # append annotations with category id present in desired_name2id\n if desired_category_id != -1:\n # update cetegory id\n annotation[\"category_id\"] = desired_category_id\n # append updated annotation to target coco dict\n coco_target[\"annotations\"].append(annotation)\n\n # create desired categories\n categories = []\n for name in desired_name2id.keys():\n category = {}\n category[\"name\"] = category[\"supercategory\"] = name\n category[\"id\"] = desired_name2id[name]\n categories.append(category)\n\n # update categories\n coco_target[\"categories\"] = categories\n\n # update images\n coco_target[\"images\"] = coco_source[\"images\"]\n\n return coco_target", "title": "" }, { "docid": "3afd4ba4152ee1576aa60abcea7dc2b8", "score": "0.5137731", "text": "def load_dataset(self, dataset_dir, subset):\n processed_path = os.path.join(dataset_dir, \"processedBuildingLabels/\")\n annotation_path = os.path.join(processed_path, \"vectordata/summarydata/AOI_1_RIO_polygons_solution_3band.csv\")\n image_dir = os.path.join(processed_path, \"3band\")\n print(\"Annotation Path \", annotation_path)\n print(\"Image Dir \", image_dir)\n assert os.path.exists(annotation_path) and os.path.exists(image_dir)\n\n # Register building class.\n self.add_class(\"spacenet-rio\", 1, \"building\")\n\n # Load building annotations as DataFrame, dropping empty polygons.\n df = pd.read_csv(annotation_path, na_values=\"-1\").dropna()\n\n # No standard train/val split, so random 10% chosen as validation set.\n # image_ids = df[\"ImageId\"].unique()\n # VAL_IMAGE_IDS = np.random.choice(image_ids, len(image_ids) // 10,\n # replace=False)\n # print(list(VAL_IMAGE_IDS))\n\n assert subset in [\"train\", \"val\"]\n if subset == \"val\":\n image_ids = VAL_IMAGE_IDS\n else:\n # Get image ids from directory names\n image_ids = [fn[len('3band_'):-len('.tif')] for fn in os.listdir(image_dir)]\n if subset == \"train\":\n image_ids = list(set(image_ids) - set(VAL_IMAGE_IDS))\n\n # Add images, calculating stats.\n rgb_means = []\n counts = []\n widths = []\n heights = []\n for image_id, group in df.groupby([\"ImageId\"]):\n if image_id in image_ids:\n path = os.path.join(image_dir, \"3band_{}.tif\".format(image_id))\n image = plt.imread(path)\n height, width = image.shape[:2]\n rgb_mean = np.mean(image.reshape((-1, 3)), axis=0)\n rgb_means.append(rgb_mean)\n polygons = [loads(wkt) for wkt in group['PolygonWKT_Pix']]\n counts.append(len(polygons))\n bounds = [polygon.bounds for polygon in polygons]\n x_diffs = [bound[2] - bound[0] for bound in bounds]\n y_diffs = [bound[3] - bound[1] for bound in bounds]\n widths.extend(x_diffs)\n heights.extend(y_diffs)\n self.add_image(\n \"spacenet-rio\", image_id=image_id, path=path,\n height=height, width=width, polygons=polygons)\n print(\"RGB mean: {}\".format(np.mean(rgb_means, axis=0)))\n print(\"Building Counts:\")\n print(pd.Series(counts).describe())\n print(\"Building Widths (m):\")\n print(pd.Series(np.array(widths)/2).describe())\n print(\"Building Heights (m):\")\n print(pd.Series(np.array(heights)/2).describe())", "title": "" }, { "docid": "f5b264ec2fdfbf8ff2d7ab84d8c288cd", "score": "0.5136863", "text": "def update(self):\n if self.id:\n Category.logger.info(\"Update a category: {%s}\", self.id)\n try:\n document = self.database[self.id]\n except KeyError:\n document = None\n if document:\n document.update(self.serialize())\n document.save()", "title": "" }, { "docid": "f607cd796393b5f23be1fb02955eeb09", "score": "0.51346374", "text": "def update_category(id, new_cat):\n with sqlite3.connect(\"./rare.db\") as conn:\n db_cursor = conn.cursor()\n db_cursor.execute(\"\"\"\n UPDATE Categories\n SET\n label = ?\n WHERE id = ?\n \"\"\", (new_cat[\"label\"], id, ))\n rows_affected = db_cursor.rowcount\n if rows_affected == 0:\n return False\n else:\n return True", "title": "" }, { "docid": "d9e63ec004febb5666a45c1821578435", "score": "0.5118361", "text": "def update(self, data):\n if not isinstance(data, list):\n data = [data]\n\n for d in data:\n # this might be some other ids to, check it #TODO\n cls_id = d.pop(\"ChargerId\", None)\n if cls_id is not None:\n klass = self.map.get(cls_id)\n if klass:\n klass.set_attributes(d)\n\n self.is_built = True", "title": "" }, { "docid": "49a15cc4f14ed0064ea3003fac9aa20d", "score": "0.50850767", "text": "def update_data(labeled,collection):\n labeled_dict = labeled.to_dict(orient='records')\n\n db_dict = [{'_id':{u'id':x['id'],u'site':x['site']},'label':{'old_rel_score': x['old_scores'], 'rel_score': x['rel_scores']}, \\\n 'artifacts':x['item_predictions']} \\\n for x in labeled_dict]\n\n for f in db_dict:\n collection.update_one({'_id.id':str(f['_id']['id']).decode('utf-8'),'_id.site':str(f['_id']['site']).decode(\"utf-8\")},\\\n {'$set':{'label':f['label'],'artifacts':f['artifacts']}})", "title": "" }, { "docid": "abc294a29f41377b7441ca63fe6affb6", "score": "0.5082084", "text": "def updateCategory(self):\n\t\tself.tableWidget_category.cellChanged.disconnect()\n\t\tself.tableWidget_category.setRowCount(0)\n\t\tlistcat = []\n\t\t#listcolumns = ['Style', 'Mode', 'Folder', 'Family']\n\t\tlistcolumns = list(self.Json_params.getContentMember('categories', self.comboBox_cate.currentText())['folder001'].keys())\n\t\trow = 0\n\t\tfor col in listcolumns:\n\t\t\tlistcolumns[row] = listcolumns[row].title()\n\t\t\trow += 1\n\t\tself.content_category =\tself.Json_params.getContentMember('categories', self.comboBox_cate.currentText())\n\t\tfor key, value in self.content_category.items():\n\t\t\tfor key in listcolumns:\n\t\t\t\tlistcat.append(value[key.lower()])\n\t\tself.updateTable(self.tableWidget_category, listcat, listcolumns, False, True)\n\t\tself.tableWidget_category.cellChanged.connect(self.changeCategory)", "title": "" }, { "docid": "4748ac70f05d4cf0620bdb4a52f0f352", "score": "0.5082081", "text": "def update_classifier(self, inst):\n if self.is_updateable:\n javabridge.call(self.jobject, \"updateClassifier\", \"(Lweka/core/Instance;)V\", inst.jobject)\n else:\n logger.critical(wutils.get_classname(self.jobject) + \" is not updateable!\")", "title": "" }, { "docid": "75249eaed37ae00a3a3d245a817066a5", "score": "0.50729185", "text": "def on_category_data_added(self, category, added_data: pd.DataFrame):\n self.compute()", "title": "" }, { "docid": "53d54c36b716d286f657a5c294709d53", "score": "0.5065361", "text": "def updateDatasetType(self, **kwargs):\n validParameters = ['dataset', 'dataset_access_type']\n\n requiredParameters = {'forced': validParameters}\n\n checkInputParameter(method=\"updateDatasetType\", parameters=list(kwargs.keys()), validParameters=validParameters,\n requiredParameters=requiredParameters)\n\n return self.__callServer(\"datasets\", params=kwargs, callmethod='PUT')", "title": "" }, { "docid": "24f069ebc802f6d8ab0d807d7c8eb018", "score": "0.50602067", "text": "def update_category(self):\n cat_name = self.source.category.name if self.source else ''\n self.categoryLine.setText(cat_name)\n self.categoryLine.home(False)", "title": "" }, { "docid": "18f1428e8f674a8f2ff85b94aafab971", "score": "0.50575787", "text": "def update_category_tab(self):\r\n self.lstCategories.clear()\r\n self.lstSubcategories.clear()\r\n self.lstUnits.clear()\r\n self.lblDescribeCategory.setText('')\r\n self.lblIconCategory.setPixmap(QPixmap())\r\n self.lblSelectCategory.setText(\r\n category_question % self.layer.name())\r\n categories = IFM().categories_for_layer(\r\n self.layer_type, self.data_type)\r\n if self.data_type == 'polygon':\r\n categories += ['aggregation']\r\n if self.data_type == 'point':\r\n categories = ['hazard']\r\n for category in categories:\r\n if type(category) != dict:\r\n # pylint: disable=W0612\r\n # noinspection PyUnresolvedReferences\r\n category = eval('metadata.%s_definition' % category)\r\n # pylint: enable=W0612\r\n item = QListWidgetItem(category['name'], self.lstCategories)\r\n item.setData(QtCore.Qt.UserRole, unicode(category))\r\n self.lstCategories.addItem(item)", "title": "" }, { "docid": "26f16df6175b403d0a918af01d45474c", "score": "0.49941513", "text": "def changeDatasetView(self):\n\t\tval = int(self.sldDataset.value())\t\t\n\n\t\t# Change to the new numpy image:\n\t\tif self.__data.ndim == 4:\n\t\t\trep = int(self.sldRepetition.value())\n\t\t\tself.imagePanel.changeImage(self.__data[:,:,val,rep]) \n\t\t\t#self.sldRepetition.setValue(round(self.__data.shape[3]/2))\n\t\telse:\n\t\t\tself.imagePanel.changeImage(self.__data[:,:,val])\n\n\t\t# Set the index:\n\t\tself.indexLabel.setText(str(val + 1) + \"/\" + str(round(self.__data.shape[2])))", "title": "" }, { "docid": "523cb0ca1f9c6dca0a32105a5876c913", "score": "0.49826214", "text": "def update(self):\n self.info.display_dataset()\n self.overview.update()\n self.labels.update(labels=self.info.dataset.header['chan_name'])\n self.channels.update()\n\n try:\n self.info.markers = self.info.dataset.read_markers()\n except FileNotFoundError:\n lg.info('No notes/markers present in the header of the file')\n else:\n self.notes.update_dataset_marker()", "title": "" }, { "docid": "5e847842c3bee9bafad8bcabc01ce0bc", "score": "0.49657935", "text": "def update_dataset(self,\n name: str,\n dataset: DatasetClient,\n use_deltas: bool = True\n ) -> DatasetClient:\n name = name.lower()\n if name not in self.artifacts:\n raise ValueError('dataset \\'{}\\' doesn\\'t already exist'.format(name))\n if use_deltas:\n response = self.vizier_request(\"vizual_script\",\n output=name,\n name=dataset.existing_name,\n identifier=dataset.identifier,\n has_response=True,\n script=dataset.history\n )\n else:\n response = self.vizier_request(\"save_dataset\",\n name=name,\n has_response=True,\n dataset=dataset.to_json()\n )\n assert(response is not None)\n dataset.identifier = response[\"artifactId\"]\n self.datasets[name] = dataset\n self.artifacts[name] = Artifact(name=name,\n artifact_type=ARTIFACT_TYPE_DATASET,\n mime_type=MIME_TYPE_DATASET,\n artifact_id=response[\"artifactId\"]\n )\n return dataset", "title": "" }, { "docid": "91662a078cbc6b8cc1e75d0e75459df5", "score": "0.4958071", "text": "def set(category_path, keywords):", "title": "" }, { "docid": "24acfa963e4445bd332dcfdcde97246b", "score": "0.49446714", "text": "def _update_info(self, loaded=False, **kwargs):\n # Update data sizes\n self.unique_classes = np.unique(self.outcomes[:,0])\n self.num_datapoints = len(self.outcomes)\n # self.num_outcomes = len(self.unique_classes)\n # Update dataset statistics\n all_param = self.param_original.reshape(self.num_datapoints, -1)\n all_param = all_param[:, np.invert(np.isinf(all_param[0,:]))]\n self.param_stats['mu'] = np.mean(all_param, axis=0)\n self.param_stats['std'] = np.std(all_param, axis=0)\n self.param_stats['cov'] = np.cov(all_param, rowvar=False)\n # self.param_stats['mu'] = np.zeros(self.param_original.shape[1:])\n # self.param_stats['std'] = np.ones(self.param_original.shape[1:])\n # self.param_stats['cov'] = np.eye(np.prod(\n # self.param_original.shape[1:]))\n # Refresh datapoint-cluster information (prepare for reclustering)\n # CLUSTER STATISTICS make as a method\n if not self.skip_clust:\n if self.cluster_object.datapoint_losses is not None:\n new_data_zeros = np.zeros(self.outcomes.shape[0] - \\\n self.cluster_object.datapoint_losses.shape[0])\n self.cluster_object.datapoint_losses = \\\n np.append(self.datapoint_losses, new_data_zeros)\n self.cluster_object.has_loss = \\\n np.append(self.has_loss, new_data_zeros.astype(int))\n # reset datapoints to cluster\n self.cluster_object.datapoints_to_cluster = \\\n np.zeros_like(self.outcomes, int)", "title": "" }, { "docid": "73e89c695e13d4158d7728405d18255e", "score": "0.49417827", "text": "def test_dataset_update(client, runner, params):\n # Add dataset to project\n result = runner.invoke(\n cli, [\n 'dataset', 'add', '--create', 'remote', '--ref', 'v0.3.0', '-s',\n 'CHANGES.rst',\n 'https://github.com/SwissDataScienceCenter/renku-python.git'\n ],\n catch_exceptions=False\n )\n assert 0 == result.exit_code\n\n before = read_dataset_file_metadata(client, 'remote', 'CHANGES.rst')\n\n result = runner.invoke(\n cli, ['dataset', 'update'] + params, catch_exceptions=False\n )\n assert 0 == result.exit_code\n\n after = read_dataset_file_metadata(client, 'remote', 'CHANGES.rst')\n assert after._id == before._id\n assert after._label != before._label\n assert after.added == before.added\n assert after.url == before.url\n assert after.based_on._id == before.based_on._id\n assert after.based_on._label != before.based_on._label\n assert after.based_on.path == before.based_on.path\n assert after.based_on.based_on is None", "title": "" }, { "docid": "16c58d28000c4d30024931e857d555b8", "score": "0.4927639", "text": "def save_category(self):\n self.save()", "title": "" }, { "docid": "86420e1fbb41628053e661680a6609aa", "score": "0.49213928", "text": "def apply_feedback_and_update_results(project: CategorizationProject,) -> List[Operation]:\n return _run_custom(\n project,\n run_update_unified_dataset=False,\n run_apply_feedback=True,\n run_update_results=True,\n )", "title": "" }, { "docid": "f2c4047a7962841b0c4095866a2dfb80", "score": "0.4916006", "text": "def test_update_dataset():\n\n # we need \n catalog_url = os.environ.get('CKAN_BASE_URL', None)\n catalog_api_key = os.environ.get('CKAN_API_KEY', None)\n\n api_key_from_db = catalog_api_key == 'READ_FROM_DB'\n if api_key_from_db:\n sql_alchemy_url = os.environ.get('SQLALCHEMY_URL', None)\n api_key, error = helpers.read_ckan_api_key_from_db(sql_alchemy_url)\n if error is not None:\n raise Exception(error)\n\n os.environ['CKAN_API_KEY'] = api_key\n catalog_api_key = api_key\n logger.info('Read API KEY from database: {} ({})'.format(api_key, sql_alchemy_url))\n \n # use (and save with VCR) a source with just 4 datasets\n harvest_from = 'https://www.onhir.gov/data.json'\n\n cpa = CKANPortalAPI(base_url=catalog_url, api_key=catalog_api_key)\n\n organization = {\n 'name': 'test-organization',\n 'title': 'Test Organization',\n 'state': 'active'\n }\n res = cpa.create_organization(organization=organization)\n harvest_source = cpa.create_harvest_source(title=\"Test harvest source\",\n url=harvest_from,\n owner_org_id=organization['name'],\n source_type='datajson',\n notes='Test harvest source',\n frequency='WEEKLY',\n on_duplicated='SKIP')\n \n hsi = harvest_source['result']['id']\n\n destination = CKANHarvestDestination(catalog_url=catalog_url,\n api_key=catalog_api_key,\n organization_id=organization['name'],\n harvest_source_id=hsi)\n\n \n hdj = HarvestDataJSON(name='Test harvest',\n url=harvest_from,\n destination=destination)\n\n res = hdj.download()\n hdj.save_download_results(flow_results=res)\n res = hdj.compare()\n hdj.save_compare_results(flow_results=res)\n res = hdj.write_destination()\n hdj.save_write_results(flow_results=res)\n hdj.write_final_report()", "title": "" }, { "docid": "a8ac897176ca8b98ced1b0b65a2a6aed", "score": "0.49110413", "text": "def update_dataset(request, datasetid):\n\n if request.method == \"POST\":\n\n dataset = get_object_or_404(Dataset, id=datasetid)\n dataset.save() # This updates the lastUpdated field\n\n import guardian\n from django.contrib.auth.models import Group\n\n try:\n group_manager = Group.objects.get(name='Dataset_Manager')\n except:\n messages.add_message(request, messages.ERROR, _('No group Dataset_Manager found.'))\n return HttpResponseForbidden(\"Dataset Update Not Allowed\")\n\n groups_of_user = request.user.groups.all()\n if not group_manager in groups_of_user:\n messages.add_message(request, messages.ERROR,\n _('You must be in group Dataset Manager to modify dataset details.'))\n return HttpResponseForbidden(\"Dataset Update Not Allowed\")\n\n user_change_datasets = guardian.shortcuts.get_objects_for_user(request.user, 'change_dataset', Dataset, accept_global_perms=False)\n if not dataset in user_change_datasets:\n return HttpResponseForbidden(\"Dataset Update Not Allowed\")\n\n field = request.POST.get('id', '')\n value = request.POST.get('value', '')\n original_value = ''\n\n if field == 'description':\n original_value = getattr(dataset,field)\n setattr(dataset, field, value)\n dataset.save()\n return HttpResponse(str(original_value) + str('\\t') + str(value), {'content-type': 'text/plain'})\n elif field == 'copyright':\n original_value = getattr(dataset, field)\n setattr(dataset, field, value)\n dataset.save()\n return HttpResponse(str(original_value) + str('\\t') + str(value), {'content-type': 'text/plain'})\n elif field == 'reference':\n original_value = getattr(dataset, field)\n setattr(dataset, field, value)\n dataset.save()\n return HttpResponse(str(original_value) + str('\\t') + str(value), {'content-type': 'text/plain'})\n elif field == 'conditions_of_use':\n original_value = getattr(dataset, field)\n setattr(dataset, field, value)\n dataset.save()\n return HttpResponse(str(original_value) + str('\\t') + str(value), {'content-type': 'text/plain'})\n elif field == 'acronym':\n original_value = getattr(dataset, field)\n setattr(dataset, field, value)\n dataset.save()\n return HttpResponse(str(original_value) + str('\\t') + str(value), {'content-type': 'text/plain'})\n elif field == 'is_public':\n original_value = getattr(dataset, field)\n dataset.is_public = value == 'True'\n dataset.save()\n if dataset.is_public:\n newvalue = True\n else:\n newvalue = False\n return HttpResponse(str(original_value) + str('\\t') + str(newvalue), {'content-type': 'text/plain'})\n elif field == 'add_owner':\n update_owner(dataset, field, value)\n elif field == 'default_language':\n original_value = getattr(dataset, field)\n # variable original_value is used for feedback to the interface\n if original_value:\n original_value = original_value.name\n else:\n original_value = '-'\n if value == '-':\n # this option is not offered by the interface, value must be one of the translation languages (not empty '-')\n # this code is here if we want to user to be able to \"unset\" the default language in the interface\n setattr(dataset, field, None)\n dataset.save()\n else:\n try:\n new_default_language = Language.objects.get(name=value)\n setattr(dataset, field, new_default_language)\n dataset.save()\n except:\n value = original_value\n return HttpResponse(str(original_value) + str('\\t') + str(value), {'content-type': 'text/plain'})\n else:\n\n if not field in Dataset.get_field_names():\n return HttpResponseBadRequest(\"Unknown field\", {'content-type': 'text/plain'})\n\n # unknown if we need this code yet for the above fields\n whitespace = tuple(' \\n\\r\\t')\n if value.startswith(whitespace) or value.endswith(whitespace):\n value = value.strip()\n original_value = getattr(dataset,field)\n\n #This is because you cannot concat none to a string in py3\n if original_value == None:\n original_value = ''\n\n # The machine_value (value) representation is also returned to accommodate Hyperlinks to Handshapes in gloss_edit.js\n return HttpResponse(str(original_value) + str('\\t') + str(value), {'content-type': 'text/plain'})\n\n else:\n print('update dataset is not POST')\n return HttpResponseForbidden(\"Dataset Update Not Allowed\")", "title": "" }, { "docid": "466a73b32e6084c204dceedf9466e3f2", "score": "0.4907756", "text": "def setDataset(self, dataset): \n self.dataset = dataset", "title": "" }, { "docid": "5edf4407029a22d06c22a9fa9f3b0275", "score": "0.49038118", "text": "def remap_classes(dataset, class_map):\n class_new_names = list(set(class_map.values()))\n class_new_names.sort() # NOTE sort() is a NoneType return method, it sorts the list without outputting new vars\n class_originals = copy.deepcopy(dataset['categories'])\n dataset['categories'] = [] # removing all dependencies\n class_ids_map = {} # map from old id to new id\n\n # Check whether the category has background or not, assign index 0. Useful for panoptic segmentation.\n has_background = False\n if 'Background' in class_new_names:\n # Check whether the backgroun category has index zero.\n if class_new_names.index('Background') != 0:\n class_new_names.remove('Background')\n class_new_names.insert(0, 'Background')\n has_background = True\n\n # Catching duplicates - TACO had duplicates for id 4040 and 309. Re-id'd\n id_ann_all = []\n id_ann_repeated = []\n for index_old, ann_old in enumerate(dataset['annotations']):\n if ann_old['id'] in id_ann_all:\n # if found a duplicate, re-id at the end\n id_ann_repeated.append(ann_old['id'])\n ann_old['id'] = len(dataset['annotations'])+len(id_ann_repeated)-1\n else:\n id_ann_all.append(ann_old['id'])\n print(f'Found {len(id_ann_repeated)} annotations repeated.'\n f'\\nPlease double check input file, annotation id(s) {id_ann_repeated} are duplicated!\\n')\n\n # Replace categories, iterating through every class name\n for id_new, class_new_name in enumerate(class_new_names):\n # Make sure id:0 is reserved for background\n id_rectified = id_new\n if not has_background:\n id_rectified += 1\n\n # Creating new category dictionary, using new category ID and the new class name\n category = {\n 'supercategory': '',\n 'id': id_rectified, # Background has id=0\n 'name': class_new_name,\n }\n dataset['categories'].append(category) # assigning new categories\n\n # Map class names\n for class_original in class_originals:\n # If the new class exists in the value of the class map dict, create new class id\n if class_map[class_original['name']] == class_new_name:\n class_ids_map[class_original['id']] = id_rectified\n\n # Update annotations category id tag\n for ann in dataset['annotations']:\n ann['category_id'] = class_ids_map[ann['category_id']]\n\n # Saving the newly created file as a JSON file\n num_classes = str(len(class_new_names))\n ann_out_path = './data' + '/' + 'ann_'+ 'map_to_' + num_classes +'.json'\n with open(ann_out_path, 'w+') as f:\n f.write(json.dumps(dataset))\n\n # return path to new file, for loading somewhere else.\n return str(os.path.abspath(ann_out_path))", "title": "" }, { "docid": "f9b1e0040cf2e8d28308bc8a100184c2", "score": "0.49032316", "text": "def save(self, *args, **kwargs):\n self._select_correct_category()\n super().save(*args, **kwargs)", "title": "" }, { "docid": "f5a1535abb03709dd195a120b9a54505", "score": "0.49024558", "text": "def setup_dataset(self):\n raise NotImplementedError()", "title": "" }, { "docid": "f5a1535abb03709dd195a120b9a54505", "score": "0.49024558", "text": "def setup_dataset(self):\n raise NotImplementedError()", "title": "" }, { "docid": "513027be4741a6f9c4a69ad4ca231b48", "score": "0.48991293", "text": "def _process_dataset(name, directory, csv_file, num_shards, label_kind):\n\n\tfilenames = glob.glob(os.path.join(directory, '*.png'))\n\tfilebasenames = []\n\tfor filename in filenames:\n\t\tfilebasenames.append(os.path.basename(filename))\n\t\n\t# load the medical records dataframe, get the basenames, and add it as new column\n\tdf = pd.read_csv(csv_file, delimiter=';', dtype={'sourcefile':str})\n\tbasenames = []\n\tsourcefiles = []\n\tfor sourcefile in df['sourcefile']:\n\t\tsourceFileSplit = sourcefile.split('\\\\')\n\t\tsourceFileAdjusted = '/'.join(sourceFileSplit)\n\t\tsourcefiles.append(sourceFileAdjusted)\n\t\tbasename = os.path.basename(sourceFileAdjusted)\n\t\tbasename = basename[:-3]+'png'\n\t\tbasenames.append(basename)\n\tbasenames = pd.Series(basenames)\n\tdf['basename'] = basenames\n\tsourcefiles = pd.Series(sourcefiles)\n\tdf['sourcefile'] = sourcefiles\n\t\n\t# find the indices of images in the dataframe and shrink and reorder the dataframe using the indices\n\tinds = []\n\tfor filebasename in filebasenames:\n\t\tinds.append(basenames[basenames==filebasename].index[0])\n\tmedical_dicts = df.iloc[inds,:]\n\t\n\t# replace indices with the new order\n\tmedical_dicts.index = range(medical_dicts.shape[0])\n\n\t# fill NAN with -9999, be careful if -9999 actually might exist\n\tmedical_dicts = medical_dicts.fillna(FLAGS.nan_value)\n\n\t# generate labels based on its kind (risk/nonreliability/etc)\n\tif label_kind == 'risk':\n\t\tlabels = list(medical_dicts['outcome_risk'])\n\telif label_kind == 'nonreliability':\n\t\tlabels = list(medical_dicts['outcome_nonreliability'])\n\telse:\n\t\traise ValueError('could not understand the label_kind:' + label_kind)\n\t\n\tif FLAGS.class_only != -1:\n\t\tinds = [i for i,x in enumerate(labels) if x == FLAGS.class_only]\n\t\tlabels = [labels[i] for i in inds]\n\t\tfilenames = [filenames[i] for i in inds]\n\t\tmedical_dicts = medical_dicts.iloc[inds, :]\n\t\tmedical_dicts.index = range(medical_dicts.shape[0])\n\n\t_process_image_files(name, filenames, labels, medical_dicts, num_shards)", "title": "" }, { "docid": "f89c2acffaa10f340245ae35cde4d216", "score": "0.48895854", "text": "def UpdateCluster(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "title": "" }, { "docid": "8d37816128c0ce7865d767d76e431ef2", "score": "0.48800167", "text": "def update_dataset_entries(lookoutvision_client, project_name, dataset_type, updates_file):\n\n try:\n status = \"\"\n status_message = \"\"\n manifest_file = \"\"\n\n # Update dataset entries.\n logger.info(f\"Updating {dataset_type} dataset for project {project_name}\"\n f\"with entries from {updates_file}.\")\n\n with open(updates_file) as f:\n manifest_file = f.read()\n\n lookoutvision_client.update_dataset_entries(\n ProjectName=project_name,\n DatasetType=dataset_type,\n Changes=manifest_file,\n )\n\n finished = False\n \n while not finished:\n\n dataset = lookoutvision_client.describe_dataset(ProjectName=project_name,\n DatasetType=dataset_type)\n\n status = dataset['DatasetDescription']['Status']\n status_message = dataset['DatasetDescription']['StatusMessage']\n\n if status == \"UPDATE_IN_PROGRESS\":\n logger.info(\n (f\"Updating {dataset_type} dataset for project {project_name}.\"))\n time.sleep(5)\n continue\n\n if status == \"UPDATE_FAILED_ROLLBACK_IN_PROGRESS\":\n logger.info(\n (f\"Update failed, rolling back {dataset_type} dataset for project {project_name}.\"))\n time.sleep(5)\n continue\n\n if status == \"UPDATE_COMPLETE\":\n logger.info(\n f\"Dataset updated: {status} : {status_message} : {dataset_type} dataset for project {project_name}.\")\n finished = True\n continue\n\n if status == \"UPDATE_FAILED_ROLLBACK_COMPLETE\":\n logger.info(\n f\"Rollback completed after update failure: {status} : {status_message} : {dataset_type} dataset for project {project_name}.\")\n finished = True\n continue\n\n logger.exception(\n f\"Failed. Unexpected state for dataset update: {status} : {status_message} : \"\n \"{dataset_type} dataset for project {project_name}.\")\n raise Exception(\n f\"Failed. Unexpected state for dataset update: {status} : \"\n \"{status_message} :{dataset_type} dataset for project {project_name}.\")\n\n logger.info(f\"Added entries to dataset.\")\n\n return status, status_message\n\n except ClientError as err:\n logger.exception(\n f\"Couldn't update dataset: {err.response['Error']['Message']}\")\n raise", "title": "" }, { "docid": "d5b579f085f86db3218f1d0854e68276", "score": "0.48738012", "text": "def create_critical_dataset(train, k=10, scaling=False, col_target=-1, verbose=1): \n # from data_processor import scale, split_by_class\n\n N = len(train)\n T = np.array(train)\n if scaling: # put feature values on the same scale \n X = T[:, :N-1] # assuming that the class label is at the column\n # np.delete(T, col_target, axis=1) # to remove class label at an arbitrary column\n\n y = T[:, -1][:, None] # column vector format\n X = scale(X, scaler='standardize')\n T = np.hstack([X, y])\n # print(\"(create_critical_dataset) T:\\n{}\\ndim(train): {}, dim(T): {}\".format(T[:5], np.array(train).shape, T.shape))\n\n # separate miniority and majority classes\n Tmin, Tmaj = split_by_class(T) # rescaled data\n\n majIDs = set()\n test_cases = np.random.randint(0, len(Tmin), 1) # testing\n\n # foreach miniority class instance, assign kNNs from majority class \n for i, row in enumerate(Tmin): \n neighbors = get_neighbors(Tmaj, row, n_neighbors=k, \n verify=(verbose > 1) and (i in test_cases))\n majIDs.update(neighbors)\n\n Tmin, Tmaj = split_by_class(train) # use the original majority class \n Tc = np.array(Tmaj)[list(majIDs)] # select the kNN rows from the original Tmaj\n Tc = np.vstack([Tmin, Tc]) # add the minority class data back\n np.random.shuffle(Tc)\n\n if verbose: print(\"(critical_dataset) kNN-identified n={} unqiue instances in Tmaj => size(Tc)={}\".format(len(majIDs), Tc.shape[0]))\n # print(\"(create_critical_dataset) Tc:\\n{}\\n\".format(Tc[:5]))\n if isinstance(train, list): \n Tc = Tc.tolist() \n return Tc", "title": "" }, { "docid": "970b5516588eb571e96d6e3dc57bcb9c", "score": "0.48717517", "text": "def test_update_chosen_category_dataframe(bk_category, bk_categories_simple, category, expected_sum):\n\n categories = bk_categories_simple\n categories.remove(category)\n\n bk_category.chosen_category = category\n bk_category._Category__update_chosen_category_dataframe()\n\n assert isclose(bk_category.chosen_category_df[bk_category.price].sum(), expected_sum)\n\n for cat in categories:\n assert cat not in unique_values_from_column(bk_category.chosen_category_df, bk_category.category)", "title": "" }, { "docid": "912d9446fff8122ec1e26476a707affc", "score": "0.48687604", "text": "def updateCategory(features, category, args):\n xml = etree.parse(category, parser = etree.XMLParser(remove_comments=False))\n for feature in xml.findall('feature'):\n feature.attrib['url'] = 'features/%s_%s.qualifier.jar' % (feature.attrib['id'], args.version)\n feature.attrib['version'] = '%s.qualifier' % args.version\n print \"Updated version of %s feature\" % feature.attrib['id']\n \n writeXML(xml, category)", "title": "" }, { "docid": "c2845734b2a76e951ceedc598abe64fb", "score": "0.48619065", "text": "def transformCategoricalData(dataset_x, dataset_y):\r\n print(\"Transforming categorical values to numeric...\")\r\n no_of_attributes = dataset_x.shape[1]\r\n le = preprocessing.LabelEncoder()\r\n\r\n for col in range(no_of_attributes):\r\n column_values = dataset_x[:,col]\r\n column_values_t = le.fit_transform(column_values)\r\n # print(\"###col {0} unique values:{1} \".format(col, np.unique(column_values_t)))\r\n dataset_x[:,col] = column_values_t\r\n # print(dataset_x)\r\n dataset_x = encodeCategoricalFeatures(dataset_x)\r\n\r\n dataset_y = le.fit_transform(dataset_y)\r\n # print(dataset_y)\r\n\r\n return (dataset_x, dataset_y)", "title": "" }, { "docid": "9b4dd4441920afae5efeeb233785081e", "score": "0.4859133", "text": "def update(self, request, pk=None):\n \n resolutionuser = ResolutionUser.objects.get(user=request.auth.user)\n\n resolution = Resolution.objects.get(pk=pk)\n resolution.title = request.data[\"title\"]\n resolution.publication_date = request.data[\"publication_date\"]\n resolution.content = request.data[\"content\"]\n resolution.completed = request.data[\"completed\"]\n resolution.user = resolutionuser\n\n category = Category.objects.get(pk=request.data[\"category_id\"])\n resolution.category = category\n resolution.save()\n\n return Response({}, status=status.HTTP_204_NO_CONTENT)", "title": "" }, { "docid": "4352a6e6dd51cd856ecc2c97a1cb5b10", "score": "0.48586422", "text": "def build(self, c: ConfigurationIce):\n path_sar = './Dataset/Labelled_SAR_Data/'\n path_package = './Dataset/Packaged_Dataset/'\n ds_code = c.name.split('_')[0]\n ds_name = ds_code + '.pickle'\n path_ds = os.path.join(path_package, ds_name)\n\n if os.path.isfile(path_ds):\n print(f'Loading dataset found at {path_ds}')\n t0 = time()\n self.load(path_ds)\n t1 = time()\n print(f'Time to load: {(t1 - t0) / 60} minutes.')\n else:\n print(f'No dataset found at {path_ds}\\n'\n f'Building dataset.')\n path_f = check_input_path(path_sar, ds_code)\n with open(path_f, 'rb') as f:\n # dict containing dataset (ds) of paths and labels (pl)\n ds_pl = pickle.load(f)\n\n # Group crops based on their parent directory (source image) for\n # more efficient opening of auxiliary files in gather() such as\n # product.xml, lutBeta.xml, and lutSigma.xml for example.\n sar_paths = ds_pl['sar_paths']\n dirs = np.char.rsplit(sar_paths[:, 0], sep='/')\n dirs = np.vstack([np.array(row) for row in dirs])\n img_dirs = dirs[:, 4]\n _, n_samples_per_dir = np.unique(img_dirs, return_counts=True)\n groups_idx = np.cumsum(n_samples_per_dir)[:-1]\n groups = np.split(sar_paths, groups_idx)\n self.gather(groups, c)\n self.y = ds_pl['labels']\n\n # Reduce Water Class Dataset. (Balanced dataset preferred)\n # unique_labels = np.unique(np.squeeze(self.y), return_counts=True)\n # n_water = unique_labels[1][0]\n # n_max_ice = np.max(unique_labels[1][1:])\n # if n_water > (1.5 * n_max_ice):\n # water_indices = np.where(self.y == c.code_to_label['OW'])[0]\n # ice_indices = np.where(self.y != c.code_to_label['OW'])[0]\n # np.random.seed(0)\n # water_keep = np.random.choice(\n # water_indices, n_max_ice, replace=False)\n # keep_indices = np.sort(np.append(water_keep, ice_indices))\n # self.x = self.x[keep_indices]\n # self.y = self.y[keep_indices]\n # self.t = self.t[keep_indices]\n\n # Balance Dataset\n unique_labels = np.unique(np.squeeze(self.y), return_counts=True)\n n_min_class = np.min(unique_labels[1])\n for i, class_type in enumerate(unique_labels[0]):\n if unique_labels[1][i] != n_min_class:\n class_indices = np.where(self.y == class_type)[0]\n other_indices = np.where(self.y != class_type)[0]\n np.random.seed(0)\n class_keep = np.random.choice(\n class_indices, n_min_class, replace=False)\n keep_indices = np.sort(np.append(class_keep, other_indices))\n self.x = self.x[keep_indices]\n self.y = self.y[keep_indices]\n self.t = self.t[keep_indices]\n\n print(f'Saving Dataset in {path_ds}')\n self.save(path_ds)\n t0 = time()\n self.shuffle()\n self.split()\n self.normalize()\n t1 = time()\n print(f'Time to shuffle, split and normalize: '\n f'{(t1 - t0) / 60} minutes.')", "title": "" }, { "docid": "c67a6fe2c98c2a2a0b1a68466f385777", "score": "0.4854025", "text": "def _encode_categories(self):\n\n logging.info(f'#{self._index()} - Encoding categorical columns...')\n\n def encode(data):\n # encode Sex column\n data['Sex'] = data['Sex'] == 'male'\n\n # encode Name column\n name_cols = data['Name'].apply(\n lambda x: pd.Series(\n [str(x).split(\",\")[0], str(x).split(\", \")[1].split(\".\")[0]], index=['Family name', 'Title']\n )\n )\n data = data.join(name_cols)\n\n # identify Titles with same meaning\n data['Title'].replace({'Mlle': 'Miss', 'Ms': 'Miss', 'Mme': 'Mrs'}, inplace=True)\n\n # group rare Titles\n title_names = (data['Title'].value_counts() < 10)\n data['Title'] = data['Title'].apply(lambda x: 'Misc' if title_names.loc[x] else x)\n\n # create Family size and Alone column from SibSp, Parch cols\n data['Family size'] = data['SibSp'] + data['Parch'] + 1\n data['Alone'] = data['Family size'] == 1\n\n # make 5 equal size groups from Fares\n data['Fare'] = pd.qcut(data['Fare'], 5, labels=False)\n\n # make 5 groups from Ages\n data['Age'] = pd.cut(data['Age'], 5, labels=False)\n\n # rename columns and delete unnecessary features\n data = data.rename(columns={'Sex': 'Male', 'Fare': 'FareBins', 'Age': 'AgeBins'})\n data.drop(['Name', 'SibSp', 'Parch'], axis=1, inplace=True)\n\n return data\n\n self.X = encode(self.X)\n self.X_test = encode(self.X_test)\n\n for col in self.X.columns:\n if self.X[col].dtype != 'float64':\n table = self.X.join(self.y)[[col, 'Survived']].groupby(col, as_index=False).mean()\n table['Survived'] = (table['Survived'] * 100).map('{:.2f} %'.format)\n logging.info(f'Survival ratio by: {col}\\n{table}\\n{\"-\" * 10}\\n')\n\n one_hot_encoder = OneHotEncoder(use_cat_names=True)\n one_hot_columns = one_hot_encoder.fit_transform(self.X[['Title', 'Embarked']])\n one_hot_columns_test = one_hot_encoder.transform(self.X_test[['Title', 'Embarked']])\n self.X = self.X.join(one_hot_columns)\n self.X_test = self.X_test.join(one_hot_columns_test)\n\n self.X.drop(['Family name', 'Title', 'Embarked'], axis=1, inplace=True)\n self.X_test.drop(['Family name', 'Title', 'Embarked'], axis=1, inplace=True)\n\n logging.info(f'#{self._step_index} - DONE!')", "title": "" }, { "docid": "d1338b928d2e28e16cf22232db265655", "score": "0.4852019", "text": "def main(project_id=None, datasets=None, properties=None):\n # Creating Big Query Client\n bq_client = bigquery.Client(project=project_id)\n # Updating datasets\n dataset_updation_flag = update_datasets(\n bq_client=bq_client,\n project_id=project_id,\n datasets=datasets,\n properties=properties,\n )\n print(\n \"Dataset(s) updation success criteria is {}.\\nHelp: 0-SUCCESS, 1-FAIL\".format(\n dataset_updation_flag\n )\n )", "title": "" }, { "docid": "7fb541af4cbca0e86e503767261b2401", "score": "0.4847025", "text": "def projectDataset(self, sourceDataset, outpuDataset, outputSpatialRef, transform=None):\n arcpy.Project_management(sourceDataset, outpuDataset, outputSpatialRef, transform)\n arcpy.RepairGeometry_management(outpuDataset)\n arcpy.AddMessage('\\n Re-projected to ' + outpuDataset)", "title": "" }, { "docid": "ee7175b24ba39dc412b8dd3130d629ad", "score": "0.48452127", "text": "def _update_temp_dataset(self, temp_dataset: dict):\n self.temp_dataset = temp_dataset", "title": "" }, { "docid": "7291f8f60dcf30c5fd27a465f36a8ccf", "score": "0.48451146", "text": "def change_category(self):\n self.cm.navigate_to()\n if not self.cm.change_category(\"TestCategory1\", \"TestCategoryEdit\"):\n tc_fail(\"Failed during category changing...\")\n mws.recover()", "title": "" }, { "docid": "196a4c4e3388044219076375f0ae4397", "score": "0.48450607", "text": "def rename_dataset(self, name: str, new_name: str) -> None:\n name = name.lower()\n new_name = new_name.lower()\n if name not in self.artifacts:\n raise ValueError('dataset \\'{}\\' does not exist'.format(name))\n if new_name in self.artifacts:\n raise ValueError('dataset \\'{}\\' exists'.format(new_name.lower()))\n if not is_valid_name(new_name):\n raise ValueError('invalid dataset name \\'{}\\''.format(new_name))\n\n self.artifacts[new_name] = self.artifacts[name]\n del self.artifacts[name]\n if name in self.datasets:\n self.datasets[name].existing_name = new_name\n self.datasets[new_name] = self.datasets[name]\n del self.datasets[name]\n if name in self.py_objects:\n self.py_objects[new_name] = self.py_objects[name]\n del self.py_objects[name]", "title": "" }, { "docid": "1ba70374df00347ca76a3a294df238c4", "score": "0.48441643", "text": "def _copy_categories(self, other, include_index=True):\n for name, col, other_col in zip(\n self._column_names, self._columns, other._columns\n ):\n if is_categorical_dtype(other_col) and not is_categorical_dtype(\n col\n ):\n self._data[name] = build_categorical_column(\n categories=other_col.categories,\n codes=col,\n mask=col.mask,\n ordered=other_col.ordered,\n )\n if include_index:\n if self._index is not None:\n self._index._copy_categories(other._index)\n return self", "title": "" }, { "docid": "7d6fb86df67d7eeae4e99bfea0ab5349", "score": "0.48437545", "text": "def replace_dc_county_with_state_data(\n dataset_in: timeseries.MultiRegionDataset,\n) -> timeseries.MultiRegionDataset:\n dc_state_region = pipeline.Region.from_fips(DC_STATE_FIPS)\n dc_county_region = pipeline.Region.from_fips(DC_COUNTY_FIPS)\n\n dc_map = {dc_state_region: dc_county_region}\n\n # aggregate_regions only copies number columns. Extract them and re-add to the aggregated\n # dataset.\n dataset_with_dc_county, dataset_without_dc_county = dataset_in.partition_by_region(\n [dc_county_region]\n )\n static_excluding_numbers = dataset_with_dc_county.static.select_dtypes(exclude=\"number\")\n dc_county_dataset = region_aggregation.aggregate_regions(dataset_in, dc_map).add_static_values(\n static_excluding_numbers.reset_index()\n )\n\n return dataset_without_dc_county.append_regions(dc_county_dataset)", "title": "" }, { "docid": "f95ff73894c184dc4989e0d8359eea06", "score": "0.48437393", "text": "def update(self,features):\n raise NotImplementedError", "title": "" }, { "docid": "287c01a10e0d17e1cab5e99e7b8518f5", "score": "0.48385525", "text": "def select_categories(self):\n self.table_repository.load_by_date(self.start_date, self.end_date)\n\n self.table.data['category_code'] = self.table.data.apply(self._select_category, axis=1)\n self.table.data['category'] = self.table.data.apply(self._convert_category_code, axis=1)\n\n self.table_repository.update_categories()", "title": "" }, { "docid": "315dd1255f21dcb80630fa1a33a535a5", "score": "0.4837174", "text": "def setup_class(cls):\n cod.get_data_jhu(data_type=\"all\", region=\"global\", update=True)\n cod.get_data_jhu(data_type=\"all\", region=\"us\", update=True)\n\n cod.get_data_nyt(data_type=\"all\", counties=False, update=True)\n cod.get_data_nyt(data_type=\"all\", counties=True, update=True)", "title": "" }, { "docid": "7b2fd629ed3f215fae2a897ec77a0a89", "score": "0.48358727", "text": "def _update(self, dataset):\n\n if self.obs_normalizer:\n self._update_obs_normalizer(dataset)\n self._update_policy(dataset)\n self._update_vf(dataset)", "title": "" }, { "docid": "e5bbb5c3574615409607c66918d0f72d", "score": "0.48305824", "text": "def _update_meta_data(self, package, meta_data):\n\n for key in meta_data.keys():\n package.set_property(key, meta_data[key], category='simcore')\n\n package.update()", "title": "" }, { "docid": "f404d04cab1d3c39f43f8449fc32a78b", "score": "0.4824943", "text": "def update_category(self, category: str) -> None:\n self.category = category", "title": "" }, { "docid": "01ae13e8812f69b90c5ccc914b95e699", "score": "0.48090515", "text": "def test_label_metadata_categorical(self):\n \n data=[[\"# samples\",\"sample1\",\"sample2\"],[\"feature1\",\"1\",\"2.2\"],[\"feature2\",\"A\",\"B\"]]\n labels, new_data=utilities.label_metadata(data, categorical=[\"feature1\"]) \n \n expected_data=[[\"# samples\",\"sample1\",\"sample2\"],[\"feature1\",\"1\",\"2.2\"],[\"feature2\",\"A\",\"B\"]]\n expected_labels={\"feature1\":\"cat\",\"feature2\":\"cat\"}\n \n self.assertEqual(new_data,expected_data)\n self.assertEqual(labels, expected_labels)", "title": "" }, { "docid": "1f3de3ec50f1440a17cad5eeadb4ebd0", "score": "0.479528", "text": "def main(input_file, output_file):\n logger = logging.getLogger(__name__)\n logger.info('transforming dataset to categorized table')\n df = pd.read_csv(input_file)\n # put the records by category to help with downstream processing\n study_ids = []\n fnames = []\n disk_sizes = []\n obj_sizes = []\n categories = []\n\n for _, row in df.iterrows():\n study_ids.append(row.studyid)\n fnames.append(row.fname)\n disk_sizes.append(row.disk_size)\n obj_sizes.append(row.df_size)\n categories.append('DataFrame')\n study_ids.append(row.studyid)\n fnames.append(row.fname)\n disk_sizes.append(row.disk_size)\n obj_sizes.append(row.isa_size)\n categories.append('ISA')\n\n df_by_cat = pd.DataFrame({\n 'study_id': study_ids,\n 'fname': fnames,\n 'disk_size': disk_sizes,\n 'size': obj_sizes,\n 'log_size': np.log(obj_sizes),\n 'category': categories\n })\n\n df_sorted = df_by_cat.sort_values(by='category')\n df_sorted.to_csv(output_file, index=None)", "title": "" }, { "docid": "7c7e29fc1599926a9951f6f9ed421b65", "score": "0.47771797", "text": "def do_data_augmentation(self, input_all_dataset: Dataset) -> Dataset:\n try:\n # initialize\n from datamart_isi import entries\n isi_datamart_url = \"http://dsbox02.isi.edu:9001/blazegraph/namespace/datamart3/sparql\"\n datamart_unit = entries.Datamart(connection_url=isi_datamart_url)\n from common_primitives.datamart_augment import Hyperparams as hyper_augment, DataMartAugmentPrimitive\n hyper_augment_default = hyper_augment.defaults()\n hyper_augment_default = hyper_augment_default.replace({\"system_identifier\":\"ISI\"})\n\n # run wikifier first\n augment_times = 0\n\n if self.all_dataset.metadata.query(())['id'].startswith(\"DA_medical_malpractice\"):\n # # this special change only for running for DA_medical dataset, so that we can also use this column as a join candidate\n # # also, due to the reason that both supplied data and searched results are very large, skip wikidata part\n augment_res = self.all_dataset\n # meta = {\n # \"name\": \"SEQNO\",\n # \"structural_type\": str,\n # \"semantic_types\": [\n # \"http://schema.org/Text\",\n # \"http://schema.org/DateTime\",\n # \"https://metadata.datadrivendiscovery.org/types/UniqueKey\"\n # ],\n # \"description\": \"Record Number. SEQNO is a unique number assigned to each record. The assigned numbers are not necessarily continuous or sequential.\"\n # }\n # augment_res.metadata = augment_res.metadata.update(selector=('learningData', ALL_ELEMENTS, 1), metadata = meta)\n search_unit = datamart_unit.search_with_data(query=None, supplied_data=augment_res, need_wikidata=False)\n\n elif self.all_dataset.metadata.query(())['id'].startswith(\"DA_ny_taxi_demand\"):\n augment_res = self.all_dataset\n search_unit = datamart_unit.search_with_data(query=None, supplied_data=augment_res, need_wikidata=False)\n\n else:\n # in general condition, run wikifier first\n search_result_wikifier = entries.DatamartSearchResult(search_result={}, supplied_data=None, query_json={}, search_type=\"wikifier\")\n hyper_temp = hyper_augment_default.replace({\"search_result\":search_result_wikifier.serialize()})\n augment_primitive = DataMartAugmentPrimitive(hyperparams=hyper_temp)\n augment_res = augment_primitive.produce(inputs = self.all_dataset).value\n # this part's code is only used for saving the pipeline afterwards in TA2 system\n self.extra_primitive.add(\"augment\" + str(augment_times))\n self.dump_primitive(augment_primitive, \"augment\" + str(augment_times))\n augment_times += 1\n search_unit = datamart_unit.search_with_data(query=None, supplied_data=augment_res)\n\n # run search, it will return wikidata search results first (if found) and then the general search results with highest score first\n\n all_results1 = search_unit.get_next_page()\n\n for each_search in all_results1:\n if each_search.search_type == \"wikidata\" and len(each_search.search_result[\"p_nodes_needed\"]) > 0:\n hyper_temp = hyper_augment_default.replace({\"search_result\":each_search.serialize()})\n augment_primitive = DataMartAugmentPrimitive(hyperparams=hyper_temp)\n augment_res = augment_primitive.produce(inputs = augment_res).value\n # this part's code is only used for saving the pipeline afterwards in TA2 system\n self.extra_primitive.add(\"augment\" + str(augment_times))\n self.dump_primitive(augment_primitive, \"augment\" + str(augment_times))\n augment_times += 1\n\n # you can search another time if you want\n # all_results2 = datamart_unit.search_with_data(query=None, supplied_data=augment_res).get_next_page()\n all_results1.sort(key=lambda x: x.score(), reverse=True)\n\n for each_search in all_results1:\n if each_search.search_type == \"general\":\n # now only augment 1 times on gneral search results\n hyper_temp = hyper_augment_default.replace({\"search_result\":each_search.serialize()})\n augment_primitive = DataMartAugmentPrimitive(hyperparams=hyper_temp)\n augment_res = augment_primitive.produce(inputs = augment_res).value\n self.extra_primitive.add(\"augment\" + str(augment_times))\n self.dump_primitive(augment_primitive, \"augment\" + str(augment_times))\n augment_times += 1\n break\n\n # # return the augmented dataset\n original_shape = self.all_dataset[self.problem_info[\"res_id\"]].shape\n _, augment_res_df = d3m_utils.get_tabular_resource(dataset=augment_res, resource_id=None)\n augmented_shape = augment_res_df.shape\n self._logger.info(\"The original dataset shape is (\" + str(original_shape[0]) + \", \" + str(original_shape[1]) + \")\")\n self._logger.info(\"The augmented dataset shape is (\" + str(augmented_shape[0]) + \", \" + str(augmented_shape[1]) + \")\")\n\n return augment_res\n\n except:\n self._logger.error(\"Agument Failed!\")\n traceback.print_exc()\n return self.all_dataset", "title": "" }, { "docid": "e30c69dadd724ad37969f021735734f3", "score": "0.47725728", "text": "def update():\n conn = sqlite3.connect(\"data/plants/itis_plant_checklist.sqlite\")\n c = conn.cursor()\n\n # iterate through taxonomic_units and parse it\n # kingdom_id == 3 is where all the plant data is\n for row in c.execute(\"\"\"SELECT rank_id, rank_name\n FROM taxon_unit_types\n WHERE kingdom_id=3\"\"\"):\n print ETL_taxonomy(row)\n\n for row in c.execute(\"\"\"SELECT s.complete_name, s.rank_id,\n s.name_usage, s.unaccept_reason,\n p.complete_name, p.rank_id,\n p.name_usage\n FROM taxonomic_units s\n LEFT JOIN taxonomic_units p\n ON s.parent_tsn = p.tsn\n WHERE s.kingdom_id=3\n ORDER BY s.tsn ASC\"\"\"):\n print ETL_plant(row)", "title": "" }, { "docid": "d92d25a3c8ca49f234c975b6e17789b1", "score": "0.47674298", "text": "def save_category(self):\n return self.save()", "title": "" }, { "docid": "2a4c564960a221d71d374a9f844a9e71", "score": "0.47641313", "text": "def UpdateData(self):\n pass", "title": "" }, { "docid": "f6070a6b72688f6be7a30d7dbab337d2", "score": "0.47606385", "text": "def update_categories_from_file(\n desired_name2id: dict, coco_path: str, save_path: str\n) -> None:\n # load source coco dict\n coco_source = load_json(coco_path)\n\n # update categories\n coco_target = update_categories(desired_name2id, coco_source)\n\n # save modified coco file\n save_json(coco_target, save_path)", "title": "" }, { "docid": "6a7b5c650aa99328cffccfb913c70a0b", "score": "0.47576496", "text": "def pre_process_categories_file(df_input_data, partition_suffix):\r\n global out_path\r\n if partition_suffix.__contains__(\"test\"):\r\n test_features, test_labels = separate_feat_label(df_input_data) # np's for feat and label\r\n with open(os.path.join(out_path, \"test_features.csv\"), 'a', newline='') as f:\r\n pd.DataFrame(test_features).to_csv(f, header=False, index=False)\r\n\r\n with open(os.path.join(out_path, \"test_labels.csv\"), 'a', newline='') as f:\r\n pd.DataFrame(test_labels).to_csv(f, header=False, index=False)\r\n else:\r\n for dev_class in device_categories:\r\n df_data_per_category = df_input_data[df_input_data.device_category == dev_class]\r\n if partition_suffix.__contains__(\"train\"):\r\n print(f\"writing {dev_class}\")\r\n train_features, train_labels = separate_feat_label(df_data_per_category)\r\n with open(os.path.join(out_path, f\"train_features_{dev_class}.csv\"), 'a', newline='') as f:\r\n pd.DataFrame(train_features).to_csv(f, header=False, index=False)\r\n with open(os.path.join(out_path, f\"train_labels_{dev_class}.csv\"), 'a', newline='') as f:\r\n pd.DataFrame(train_labels).to_csv(f, header=False, index=False)\r\n else:\r\n valid_features, valid_labels = separate_feat_label(df_data_per_category)\r\n with open(os.path.join(out_path, f\"valid_features_{dev_class}.csv\"), 'a', newline='') as f:\r\n pd.DataFrame(valid_features).to_csv(f, header=False, index=False)\r\n with open(os.path.join(out_path, f\"valid_labels_{dev_class}.csv\"), 'a', newline='') as f:\r\n pd.DataFrame(valid_labels).to_csv(f, header=False, index=False)\r\n return", "title": "" }, { "docid": "03a573d977f64fa3b489c8a171e8c0cf", "score": "0.4756199", "text": "def _add_auxiliary_data_write_collective_information(self, info):\n data_type = info[\"data_type\"]\n if data_type not in self._auxiliary_data_group:\n self._auxiliary_data_group.create_group(data_type)\n group = self._auxiliary_data_group[data_type]\n\n ds = group.create_dataset(**info[\"dataset_creation_params\"])\n for key, value in info[\"dataset_attrs\"].items():\n ds.attrs[key] = value", "title": "" }, { "docid": "4c10e66ce6c701ca38babbe7f015d601", "score": "0.47490934", "text": "def update(self, trans, encoded_dataset_id, payload=None, **kwd):\n library_dataset = self.ld_manager.get(trans, managers_base.decode_id(self.app, encoded_dataset_id))\n updated = self.ld_manager.update(trans, library_dataset, payload)\n serialized = self.ld_manager.serialize(trans, updated)\n return serialized", "title": "" }, { "docid": "103bfc1fd70d719ef0859ff95da82e93", "score": "0.47488263", "text": "def load_dataset_custom(datadir, dset_name, feature, split_cfg, augVal=False, dataAug=True):\n \n if(not(os.path.exists(datadir))):\n os.mkdir(datadir)\n\n if(dset_name==\"cifar10\"):\n num_cls=10\n cifar_test_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])\n if(dataAug):\n cifar_transform = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])\n else:\n cifar_transform = cifar_test_transform\n \n fullset = torchvision.datasets.CIFAR10(root=datadir, train=True, download=True, transform=cifar_transform)\n test_set = torchvision.datasets.CIFAR10(root=datadir, train=False, download=True, transform=cifar_test_transform)\n if(feature==\"classimb\"):\n if(\"sel_cls_idx\" in split_cfg):\n train_set, val_set, lake_set, imb_cls_idx = create_perclass_imb(dset_name, fullset, split_cfg, num_cls, augVal)\n else:\n train_set, val_set, lake_set, imb_cls_idx = create_class_imb(dset_name, fullset, split_cfg, num_cls, augVal)\n print(\"CIFAR-10 Custom dataset stats: Train size: \", len(train_set), \"Val size: \", len(val_set), \"Lake size: \", len(lake_set))\n return train_set, val_set, test_set, lake_set, imb_cls_idx, num_cls\n if(feature==\"ood\"):\n train_set, val_set, test_set, lake_set, ood_cls_idx = create_ood_data(dset_name, fullset, test_set, split_cfg, num_cls, augVal)\n print(\"CIFAR-10 Custom dataset stats: Train size: \", len(train_set), \"Val size: \", len(val_set), \"Lake size: \", len(lake_set), \"Test set: \", len(test_set))\n return train_set, val_set, test_set, lake_set, ood_cls_idx, split_cfg['num_cls_idc']\n if(feature==\"vanilla\"): \n X_tr, y_tr, X_val, y_val, X_unlabeled, y_unlabeled, train_set, val_set, lake_set = getVanillaData(dset_name, fullset, split_cfg)\n print(\"CIFAR-10 Custom dataset stats: Train size: \", len(train_set), \"Val size: \", len(val_set), \"Lake size: \", len(lake_set))\n return train_set, val_set, test_set, lake_set, num_cls\n\n if(feature==\"duplicate\"):\n X_tr, y_tr, X_val, y_val, X_unlabeled_rep, y_unlabeled_rep, train_set, val_set, lake_set = getDuplicateData(dset_name, fullset, split_cfg)\n print(\"CIFAR-10 Custom dataset stats: Train size: \", len(train_set), \"Val size: \", len(val_set), \"Lake size: \", len(lake_set))\n return train_set, val_set, test_set, lake_set, num_cls\n\n if(dset_name==\"mnist\"):\n num_cls=10\n mnist_test_transform = transforms.Compose([transforms.Resize((28, 28)), transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n if(dataAug):\n mnist_transform = transforms.Compose([transforms.RandomCrop(28, padding=4), transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n else:\n mnist_transform = mnist_test_transform\n fullset = torchvision.datasets.MNIST(root=datadir, train=True, download=True, transform=mnist_transform)\n test_set = torchvision.datasets.MNIST(root=datadir, train=False, download=True, transform=mnist_test_transform)\n # fullset.data = torch.repeat_interleave(fullset.data.unsqueeze(1), 3, 1).float()\n if(feature==\"classimb\"):\n if(\"sel_cls_idx\" in split_cfg):\n train_set, val_set, lake_set, imb_cls_idx = create_perclass_imb(dset_name, fullset, split_cfg, num_cls, augVal)\n else:\n train_set, val_set, lake_set, imb_cls_idx = create_class_imb(dset_name, fullset, split_cfg, num_cls, augVal)\n print(\"MNIST Custom dataset stats: Train size: \", len(train_set), \"Val size: \", len(val_set), \"Lake size: \", len(lake_set))\n return train_set, val_set, test_set, lake_set, imb_cls_idx, num_cls\n if(feature==\"ood\"):\n train_set, val_set, test_set, lake_set, ood_cls_idx = create_ood_data(dset_name, fullset, test_set, split_cfg, num_cls, augVal)\n print(\"MNIST Custom dataset stats: Train size: \", len(train_set), \"Val size: \", len(val_set), \"Lake size: \", len(lake_set), \"Test set: \", len(test_set))\n return train_set, val_set, test_set, lake_set, ood_cls_idx, split_cfg['num_cls_idc']\n if(feature==\"vanilla\"): \n X_tr, y_tr, X_val, y_val, X_unlabeled, y_unlabeled, train_set, val_set, lake_set = getVanillaData(dset_name, fullset, split_cfg)\n print(\"MNIST Custom dataset stats: Train size: \", len(train_set), \"Val size: \", len(val_set), \"Lake size: \", len(lake_set))\n return train_set, val_set, test_set, lake_set, num_cls\n\n if(feature==\"duplicate\"):\n X_tr, y_tr, X_val, y_val, X_unlabeled_rep, y_unlabeled_rep, train_set, val_set, lake_set = getDuplicateData(dset_name, fullset, split_cfg)\n print(\"MNIST Custom dataset stats: Train size: \", len(train_set), \"Val size: \", len(val_set), \"Lake size: \", len(lake_set))\n return train_set, val_set, test_set, lake_set, num_cls\n\n if(dset_name==\"svhn\"):\n num_cls=10\n SVHN_test_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])\n if(dataAug):\n SVHN_transform = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])\n else:\n SVHN_transform = SVHN_test_transform\n \n fullset = torchvision.datasets.SVHN(root=datadir, split=\"train\", download=True, transform=SVHN_transform)\n test_set = torchvision.datasets.SVHN(root=datadir, split=\"test\", download=True, transform=SVHN_test_transform)\n if(feature==\"classimb\"):\n if(\"sel_cls_idx\" in split_cfg):\n train_set, val_set, lake_set, imb_cls_idx = create_perclass_imb(dset_name, fullset, split_cfg, num_cls, augVal)\n else:\n train_set, val_set, lake_set, imb_cls_idx = create_class_imb(dset_name, fullset, split_cfg, num_cls, augVal)\n print(\"SVHN Custom dataset stats: Train size: \", len(train_set), \"Val size: \", len(val_set), \"Lake size: \", len(lake_set))\n return train_set, val_set, test_set, lake_set, imb_cls_idx, num_cls\n if(feature==\"ood\"):\n train_set, val_set, test_set, lake_set, ood_cls_idx = create_ood_data(dset_name, fullset, test_set, split_cfg, num_cls, augVal)\n print(\"SVHN Custom dataset stats: Train size: \", len(train_set), \"Val size: \", len(val_set), \"Lake size: \", len(lake_set), \"Test set: \", len(test_set))\n return train_set, val_set, test_set, lake_set, ood_cls_idx, split_cfg['num_cls_idc']\n if(feature==\"vanilla\"): \n X_tr, y_tr, X_val, y_val, X_unlabeled, y_unlabeled, train_set, val_set, lake_set = getVanillaData(dset_name, fullset, split_cfg)\n print(\"SVHN Custom dataset stats: Train size: \", len(train_set), \"Val size: \", len(val_set), \"Lake size: \", len(lake_set))\n return train_set, val_set, test_set, lake_set, num_cls\n\n if(feature==\"duplicate\"):\n X_tr, y_tr, X_val, y_val, X_unlabeled_rep, y_unlabeled_rep, train_set, val_set, lake_set = getDuplicateData(dset_name, fullset, split_cfg)\n print(\"SVHN Custom dataset stats: Train size: \", len(train_set), \"Val size: \", len(val_set), \"Lake size: \", len(lake_set))\n return train_set, val_set, test_set, lake_set, num_cls\n\n if(dset_name==\"cifar100\"):\n num_cls=100\n cifar100_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))\n ])\n fullset = torchvision.datasets.CIFAR100(root=datadir, train=True, download=True, transform=cifar100_transform)\n test_set = torchvision.datasets.CIFAR100(root=datadir, train=False, download=True, transform=cifar100_transform)\n if(feature==\"classimb\"):\n if(\"sel_cls_idx\" in split_cfg):\n train_set, val_set, lake_set, imb_cls_idx = create_perclass_imb(dset_name, fullset, split_cfg, num_cls, augVal)\n else:\n train_set, val_set, lake_set, imb_cls_idx = create_class_imb(dset_name, fullset, split_cfg, num_cls, augVal)\n print(\"CIFAR-100 Custom dataset stats: Train size: \", len(train_set), \"Val size: \", len(val_set), \"Lake size: \", len(lake_set))\n return train_set, val_set, test_set, lake_set, imb_cls_idx, num_cls\n if(feature==\"ood\"):\n train_set, val_set, test_set, lake_set, ood_cls_idx = create_ood_data(dset_name, fullset, test_set, split_cfg, num_cls, augVal)\n print(\"CIFAR-100 Custom dataset stats: Train size: \", len(train_set), \"Val size: \", len(val_set), \"Lake size: \", len(lake_set), \"Test set: \", len(test_set))\n return train_set, val_set, test_set, lake_set, ood_cls_idx, num_cls\n \n if(feature==\"vanilla\"):\n X_tr, y_tr, X_val, y_val, X_unlabeled, y_unlabeled, train_set, val_set, lake_set = getVanillaData(dset_name, fullset, split_cfg)\n print(\"CIFAR-100 Custom dataset stats: Train size: \", len(train_set), \"Val size: \", len(val_set), \"Lake size: \", len(lake_set))\n return train_set, val_set, test_set, lake_set, num_cls\n\n if(feature==\"duplicate\"):\n X_tr, y_tr, X_val, y_val, X_unlabeled_rep, y_unlabeled_rep, train_set, val_set, lake_set = getDuplicateData(dset_name, fullset, split_cfg)\n print(\"CIFAR-100 Custom dataset stats: Train size: \", len(train_set), \"Val size: \", len(val_set), \"Lake size: \", len(lake_set))\n return train_set, val_set, test_set, lake_set, num_cls\n \n \n if(dset_name==\"breast_density\"):\n num_cls=4\n input_size=224\n data_transforms = {\n 'train': transforms.Compose([\n transforms.RandomResizedCrop(input_size),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n 'test': transforms.Compose([\n transforms.Resize(input_size),\n transforms.CenterCrop(input_size),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n }\n\n fullset = datasets.ImageFolder(os.path.join(datadir, 'train'), data_transforms['train'])\n test_set = datasets.ImageFolder(os.path.join(datadir, 'test'), data_transforms['test'])\n if(feature==\"classimb\"):\n train_set, val_set, lake_set, imb_cls_idx = create_perclass_imb(dset_name, fullset, split_cfg, num_cls, augVal)\n print(\"Breast-density Custom dataset stats: Train size: \", len(train_set), \"Val size: \", len(val_set), \"Lake size: \", len(lake_set))\n return train_set, val_set, test_set, lake_set, imb_cls_idx, num_cls", "title": "" }, { "docid": "12ed3e996382e4908e6b20e156d7482c", "score": "0.4745683", "text": "def set_dataset(self, data_name, params={}):\n self.logger.info(\"load_basic dataset...\")\n if data_name == Dataset.KASTEREN:\n from hassbrain_algorithm.datasets.kasteren.kasteren import DatasetKasteren\n self._dataset_enm = Dataset.KASTEREN\n self._dataset = DatasetKasteren(**params)\n self._dataset.set_file_paths(self.load_paths(Dataset.KASTEREN))\n\n elif data_name == Dataset.PENDIGITS:\n from hassbrain_algorithm.datasets.pendigit.pendigits import DatasetPendigits\n self._dataset_enm = Dataset.PENDIGITS\n self._dataset = DatasetPendigits(**params)\n self._dataset.set_file_paths(self.load_paths(Dataset.PENDIGITS))\n\n elif data_name == Dataset.HASS_TESTING:\n from hassbrain_algorithm.datasets.homeassistant import DatasetHomeassistant\n self._dataset_enm = Dataset.HASS_TESTING\n self._dataset = DatasetHomeassistant(**params)\n self._dataset.set_file_paths(self.load_paths(Dataset.HASS_TESTING))\n\n elif data_name == Dataset.HASS_CHRIS:\n from hassbrain_algorithm.datasets.homeassistant import DatasetHomeassistant\n self._dataset_enm = Dataset.HASS_CHRIS\n self._dataset = DatasetHomeassistant(**params)\n self._dataset.set_file_paths(self.load_paths(Dataset.HASS_CHRIS))\n\n elif data_name == Dataset.HASS_SIMON:\n from hassbrain_algorithm.datasets.homeassistant import DatasetHomeassistant\n self._dataset_enm = Dataset.HASS_SIMON\n self._dataset = DatasetHomeassistant(**params)\n self._dataset.set_file_paths(self.load_paths(Dataset.HASS_SIMON))\n\n elif data_name == Dataset.HASS:\n from hassbrain_algorithm.datasets.homeassistant import DatasetHomeassistant\n self._dataset_enm = Dataset.HASS\n self._dataset = DatasetHomeassistant(**params)\n #elif data_name == Dataset.MAVPAD2005:\n # return\n #elif data_name == Dataset.ARAS:\n # return\n #elif data_name == Dataset.CASAS_ARUBA:\n # return", "title": "" }, { "docid": "5ef816177782aa2aa8fa7a202e639b51", "score": "0.47423664", "text": "def test_dataset_update_multiple_datasets(\n client, runner, data_repository, directory_tree, params\n):\n path1 = client.path / 'data' / 'dataset-1' / 'CHANGES.rst'\n path2 = client.path / 'data' / 'dataset-2' / 'CHANGES.rst'\n # Add dataset to project\n result = runner.invoke(\n cli, [\n 'dataset', 'add', '--create', 'dataset-1', '--ref', 'v0.3.0', '-s',\n 'CHANGES.rst',\n 'https://github.com/SwissDataScienceCenter/renku-python.git'\n ],\n catch_exceptions=False\n )\n assert 0 == result.exit_code\n result = runner.invoke(\n cli, [\n 'dataset', 'add', '--create', 'dataset-2', '--ref', 'v0.3.0', '-s',\n 'CHANGES.rst',\n 'https://github.com/SwissDataScienceCenter/renku-python.git'\n ],\n catch_exceptions=False\n )\n assert 0 == result.exit_code\n\n assert 'v0.4.0' not in path1.read_text()\n assert 'v0.4.0' not in path2.read_text()\n\n result = runner.invoke(\n cli, ['dataset', 'update'] + params, catch_exceptions=False\n )\n assert 0 == result.exit_code\n\n assert 'v0.4.0' in path1.read_text()\n assert 'v0.4.0' in path2.read_text()", "title": "" }, { "docid": "644b181921a3acddad7729d41a530213", "score": "0.47401753", "text": "def set_dataset_info():\n task_id = request.form.get('task_id')\n name_path = request.form.get('name_path')\n dataset = DataSetApi(name_path)\n result = dataset.set_dataset_info(task_id)\n if result:\n return jsonify({'status': 200, 'task_id': task_id,\n 'data': _('The dataset is set successfully.')})\n return jsonify({'status': 400, 'task_id': task_id,\n 'data': _('The dataset fail to be set.')})", "title": "" }, { "docid": "5dee81f5ada5f42cf540045ead574195", "score": "0.47339278", "text": "def update(self):\n self.data = self._upcloud.data.get(self.uuid)", "title": "" }, { "docid": "6f54403452c8304d2db4371305c880d4", "score": "0.47331655", "text": "def update_dataset(ds):\n\n # Create the data directory if it doesn't already exist\n if not os.path.exists(CONFIG['data_path']):\n os.makedirs(CONFIG['data_path'])\n\n # Get the url endpoint for this dataset\n target = '%s.%s' % (ds['endpoint'], CONFIG['ds_format'])\n target = target + '?$limit=50000'\n req = requests.get(target)\n\n # Generate valid file path\n write_path = CONFIG['data_path'] + create_valid_file_name(ds)\n\n # If API call successful\n if req.status_code == 200:\n confirm = 'Writting from {0} to {1}'.format(target, write_path)\n logging.info(confirm)\n \n with open(write_path, 'w') as f:\n f.write(req.text.encode('utf8'))\n\n else:\n logging.info('Call to \"update_dataset\" for \"%s\" failed' % ds['name'])", "title": "" }, { "docid": "5b41c773dad198f3d785860892b50f3b", "score": "0.47160593", "text": "def _convert_2d_dataset(self):\n new_samples, new_labels = [], []\n is_not_empty_slice = lambda fname: self.get_dcm_arr(fname).any()\n for i, case_id in enumerate(self.samples):\n files = glob.glob(\n f'{self.datadir}/{self.mode}/{case_id}/{self.seq_type}/*.dcm')\n files = list(filter(is_not_empty_slice, files))\n new_samples.extend(files)\n if self.labels is not None:\n new_labels.extend([self.labels[i]] * len(files))\n\n if self.labels is not None:\n self.labels = np.array(new_labels)\n self.samples = np.array(new_samples)", "title": "" }, { "docid": "262240fb66ab84db80fa8d8674f91170", "score": "0.4711422", "text": "def __init__(self, cfg):\n super(CommonVisionDataset, self).__init__(cfg)\n\n dataset_cls = getattr(paddle.vision.datasets, cfg.pop('class_name'))\n transform = build_transforms(cfg.pop('transforms', None))\n self.return_cls = cfg.pop('return_cls', True)\n\n param_dict = {}\n param_names = list(dataset_cls.__init__.__code__.co_varnames)\n if 'transform' in param_names:\n param_dict['transform'] = transform\n for name in param_names:\n if name in cfg:\n param_dict[name] = cfg.get(name)\n\n self.dataset = dataset_cls(**param_dict)", "title": "" }, { "docid": "f541dbe69d6eb9b6c6c8d8f9ff45e856", "score": "0.47109142", "text": "def give_CUB200_datasets(opt):\n image_sourcepath = opt.source_path+'/images'\n #Find available data classes.\n image_classes = sorted([x for x in os.listdir(image_sourcepath) if '._' not in x], key=lambda x: int(x.split('.')[0]))\n #Make a index-to-labelname conversion dict.\n conversion = {int(x.split('.')[0]):x.split('.')[-1] for x in image_classes}\n #Generate a list of tuples (class_label, image_path)\n image_list = {int(key.split('.')[0]):sorted([image_sourcepath+'/'+key+'/'+x for x in os.listdir(image_sourcepath+'/'+key) if '._' not in x]) for key in image_classes}\n image_list = [[(key,img_path) for img_path in image_list[key]] for key in image_list.keys()]\n image_list = [x for y in image_list for x in y]\n\n #Image-dict of shape {class_idx:[list of paths to images belong to this class] ...}\n image_dict = {}\n for key, img_path in image_list:\n key = key-1\n if not key in image_dict.keys():\n image_dict[key] = []\n image_dict[key].append(img_path)\n\n keys = sorted(list(image_dict.keys()))\n\n #Following \"Deep Metric Learning via Lifted Structured Feature Embedding\", we use the first half of classes for training.\n train,test = keys[:len(keys)//2], keys[len(keys)//2:]\n train_image_dict, val_image_dict = {key:image_dict[key] for key in train},{key:image_dict[key] for key in test}\n\n\n train_dataset = BaseTripletDataset(train_image_dict, opt, samples_per_class=opt.samples_per_class)\n val_dataset = BaseTripletDataset(val_image_dict, opt, is_validation=True)\n eval_dataset = BaseTripletDataset(train_image_dict, opt, is_validation=True)\n\n train_dataset.conversion = conversion\n val_dataset.conversion = conversion\n eval_dataset.conversion = conversion\n\n return {'training':train_dataset, 'testing':val_dataset, 'evaluation':eval_dataset}", "title": "" }, { "docid": "4f63820ffc0c20c46bb39e2f5accfaea", "score": "0.47106323", "text": "def test_put_request_updates_a_category_and_returns_200(self):\n response = self.client.put(\"/categories/2/\", data={\n 'title': 'API',\n 'color': '#987654',\n }, follow=True)\n self.assertEquals(response.status_code, status.HTTP_200_OK)", "title": "" }, { "docid": "1ead71b12ccfc6e8bb29f7721084b2bf", "score": "0.47086912", "text": "def transform_dataset(dataset, inv_idx, fwd_idx):\n \n def featurizer(inst):\n global i\n start_id = dataset.total_features()\n n1 = float\n n2 = int\n n3 = int\n fvec = metapy.learn.FeatureVector(inst.weights)\n f = open('hygiene/hygiene.dat.additional','r')\n line = f.readlines()\n n1 = float(line[inst.id].strip().split(',')[len(line[inst.id].strip().split(','))-1])\n n2 = int(line[inst.id].strip().split(',')[len(line[inst.id].strip().split(','))-2])\n f.close()\n n1 = round(n1,1)\n \"# modify fvec to add new features\"\n fvec[start_id + 1] = n1\n fvec[start_id + 2] = n2\n \"fvec[start_id + (larger_offset)] = value\"\n \"# etc...\"\n return fvec\n\n def labeler(inst):\n return dataset.label(inst)\n \n total_feature_count = dataset.total_features()+2\n instances = [inst for inst in dataset]\n new_dset = metapy.classify.MulticlassDataset(instances,\n total_feature_count,\n featurizer,\n labeler)\n\n return new_dset # change me, if you want", "title": "" }, { "docid": "12cc3c658ba862fb372afb938a0537c0", "score": "0.4701693", "text": "def main():\n dados_json = get_data_api('https://data.cityofnewyork.us/resource/bqiq-cu78.json')\n\n # Os condados de NY\n counties = {}\n categoria_ofensa = []\n\n for dado in dados_json:\n\n # add as categorias num array, pq todos os bairros tem que ter todas\n # as categorias, msm que não tenha nenhum crime daquela categoria\n cat_ofensa_atual = dado['offense_category']\n if cat_ofensa_atual not in categoria_ofensa:\n categoria_ofensa.append(cat_ofensa_atual)\n\n # Caso ainda não tenha aparecido o condado\n if dado['county'] not in counties:\n counties[dado['county']] = {}\n counties[dado['county']][dado['offense_category']] = 1 \n\n # Caso o condado já tenha aparecido, mas ainda não apareceu um crime desta categoria\n elif dado['offense_category'] not in counties[dado['county']]:\n counties[dado['county']][dado['offense_category']] = 1 \n\n counties[dado['county']][dado['offense_category']] += 1 \n \n finalera = []\n\n for categoria in categoria_ofensa:\n data = []\n for bairro in counties:\n if categoria not in counties[bairro]:\n counties[bairro][categoria] = 0\n data.append(counties[bairro][categoria])\n finalera.append({\n 'name': categoria,\n 'data': data\n })\n print (finalera)\n\n # ! esse comentado inverte o gráfico\n\n # for bairro in counties:\n # data = []\n # # Coloca todas as categorias que n tem naquele bairro com o valor \n # for categoria in categoria_ofensa:\n # if categoria not in counties[bairro]:\n # counties[bairro][categoria] = 0\n # # data.append(counties[bairro][categoria])\n\n # finalera.append({\n # 'name': categoria,\n # 'data': data\n # })\n # print(json.dumps(finalera,indent=4))\n \n json_retorno = {\n \"chart\": {\n \"type\": 'column'\n },\n \"title\": {\n \"text\": 'Crimes de ódio em NYC'\n },\n \"subtitle\": {\n \"text\": 'Categoria da Ofensa por bairro'\n },\n \"series\": finalera,\n \"xAxis\": {\n 'categories': list(counties.keys())\n },\n 'yAxis': {\n 'min': 0,\n 'title': {\n \"text\": 'Quantidade crimes'\n }\n },\n 'tooltip': {\n 'headerFormat': '<span style=\"font-size:10px\">{point.key}</span><table>',\n 'pointFormat': '<tr><td style=\"color:{series.color};padding:0\">{series.name}: </td>' +\n '<td style=\"padding:0\"><b>{point.y} mm</b></td></tr>',\n 'footerFormat': '</table>',\n 'shared': True,\n 'useHTML': True\n },\n 'plotOptions': {\n 'column': {\n 'pointPadding': 0.2,\n 'borderWidth': 0\n }\n },\n }\n \n \n # print(categoria_ofensa)\n\n with open('basic_column.json','w') as file:\n # file.write(json.dumps(finalera,indent=4))\n file.write(json.dumps(json_retorno,indent=4))", "title": "" }, { "docid": "f07b0c1f68cc7270ca62a94f80c50b1e", "score": "0.47005016", "text": "def create(self, dataset=None, user=None, collect_type='', impl_details=None, **fields):\n\n if dataset is None:\n # Create a new dataset (TITLE is for automatic GUID creation)\n dataset = Dataset.objects.create(user=user, type=collect_type, title=fields['title'])\n\n #meta_text = '{\"field_values\": [{\"cust-dataid\": \"dataid-%s\"}]}' % dataset.id\n if not fields.get('language', None):\n fields['language'] = user.language\n\n # Si estoy editando un tipo SELF_PUBLISH, no vienen los datos del archivo entonces lo recupero\n if int(collect_type) == CollectTypeChoices().SELF_PUBLISH and 'file_size' not in fields.keys():\n prev_revision = DatasetRevision.objects.filter(dataset=dataset).order_by('-id').first()\n size = prev_revision.size\n file_name = prev_revision.filename\n elif int(collect_type) == CollectTypeChoices().URL or int(collect_type) == CollectTypeChoices().WEBSERVICE:\n size = 0\n file_name = fields['file_name']\n else:\n size = fields['file_size']\n file_name = fields['file_name']\n\n dataset_revision = DatasetRevision.objects.create(\n dataset=dataset,\n user_id=user.id,\n status=fields['status'],\n category=Category.objects.get(id=fields['category']),\n filename=file_name,\n end_point=fields['end_point'],\n impl_type=fields['impl_type'],\n impl_details=impl_details,\n size=size,\n license_url=fields['license_url'],\n spatial=fields['spatial'],\n frequency=fields['frequency'],\n mbox=fields['mbox'],\n doc=fields['doc'] if 'doc' in fields else None\n )\n\n DatasetI18n.objects.create(\n dataset_revision=dataset_revision,\n language=fields['language'],\n title=fields['title'].strip().replace('\\n', ' '),\n description=fields['description'].strip(),\n notes=fields['notes'].strip().replace('\\n', ' ')\n )\n\n dataset_revision.add_tags(fields['tags'])\n dataset_revision.add_sources(fields['sources'])\n\n return dataset, dataset_revision", "title": "" }, { "docid": "4ca95f83b656b99300177460d0278d40", "score": "0.46972984", "text": "def set_project(self, project: ResearchProject):\n self.data_context.data_model.set_project(project)\n for screen in self.screens.values():\n screen.update_project(project)", "title": "" }, { "docid": "fb6d8fc109aec2b2f0c1067cb9e344b1", "score": "0.46866286", "text": "async def update_data_status(self, **kwargs):\n await self._send_manager_command(\n ExecutorProtocol.UPDATE,\n extra_fields={ExecutorProtocol.UPDATE_CHANGESET: kwargs},\n )", "title": "" }, { "docid": "5b72c250d754da3cb5ee02027346fed7", "score": "0.46825007", "text": "def apply_target_transf(dataset: DataFrame,\n target_name: str,\n dict_cat: Dict[str, int]) -> DataFrame:\n dataset[target_name] = dataset[target_name].map(dict_cat)\n return dataset", "title": "" }, { "docid": "9577ee910344061f258bf89bfcbd01e0", "score": "0.46791086", "text": "def changeCategory(self, newCategory):\n self.workflow.setRequestCategory(newCategory)\n return", "title": "" }, { "docid": "4ad69639936c67af9347f07e153af640", "score": "0.4678754", "text": "def setCategory(self, val='True', **kwargs):\n \n pass", "title": "" }, { "docid": "1c283cbf18de9e1c40f7ac1c38f16630", "score": "0.46779895", "text": "async def handle_patch_dataset(name: str, featuretypes: UploadFile = File(...)):\n return patch_dataset(name, featuretypes)", "title": "" }, { "docid": "664da01d83555f8ac90543c886df3672", "score": "0.4675458", "text": "def set_category_for_drone(self, drone, category):\n query = 'UPDATE drones SET category=? WHERE designation=?'\n self._execute(query, category, drone)", "title": "" }, { "docid": "5b4090c18c82eda815bd89477d394f41", "score": "0.46751812", "text": "def build_dataset(self):\n return", "title": "" }, { "docid": "d8a86d5d25327412a8190c3c7131ef13", "score": "0.46704447", "text": "def create_dataset(self):\n dataset_title = unique_dataset_name()\n logging.info(f\"Creating a new dataset {dataset_title}\")\n self.driver.get(os.environ['GIGANTUM_HOST'] + \"/datasets/local\")\n self.datasets_header.wait_to_appear()\n add_project_elts = AddProjectElements(self.driver)\n add_project_elts.create_new_button.click()\n add_project_elts.project_title_input.find().send_keys(dataset_title)\n add_project_elts.project_description_input.find().send_keys(unique_project_description())\n add_project_elts.project_continue_button().click()\n self.dataset_title.wait_to_appear()\n # Time sleep is consistent and necessary\n time.sleep(1)\n return dataset_title", "title": "" }, { "docid": "03153844dd1a9ba8434e6c43dee22c13", "score": "0.46693575", "text": "def replace_deps(data):\n datasets=['train','test','dev']\n\n for dataset in datasets:\n d = data[dataset]\n for i in range(len(d['context_deEdges'])):\n edges = data[dataset]['context_deEdges'][i]\n for j,edge in enumerate(edges):\n data[dataset]['context_deEdges'][i][j][2] = data['dep2id'][data[dataset]['context_deEdges'][i][j][2]]\n q_edges = data[dataset]['q_edges'][i]\n for k,q_edge in enumerate(q_edges):\n data[dataset]['q_edges'][i][k][2] = data['dep2id'][data[dataset]['q_edges'][i][k][2]]\n \n return data", "title": "" }, { "docid": "b68567c0f343fe65ca2a0b8566d46aa9", "score": "0.46676457", "text": "def update_datasets(**kwargs):\n bq_client = kwargs.get(\"bq_client\")\n project_id = kwargs.get(\"project_id\")\n datasets = kwargs.get(\"datasets\", [])\n properties = kwargs.get(\"properties\", {})\n dataset_updation_flag = 0\n try:\n for dataset_id in datasets:\n dataset_id = project_id + \".\" + dataset_id\n try:\n dataset = bq_client.get_dataset(dataset_id)\n for k1, v1 in properties.items():\n if v1:\n if k1 == \"description\":\n dataset.description = v1\n dataset = bq_client.update_dataset(dataset, [\"description\"])\n elif k1 == \"default_table_expiration_ms\":\n dataset.default_table_expiration_ms = v1\n dataset = bq_client.update_dataset(\n dataset, [\"default_table_expiration_ms\"]\n )\n elif k1 == \"labels\":\n dataset.labels = v1\n dataset = bq_client.update_dataset(dataset, [\"labels\"])\n elif k1 == \"access_controls\":\n v1_vals = v1.values()\n if None in v1_vals or True in [\n str(elem).isspace() for elem in v1_vals\n ]:\n pass\n else:\n entry = bigquery.AccessEntry(\n role=properties[\"access_controls\"][\"role\"],\n entity_type=properties[\"access_controls\"][\n \"entity_type\"\n ],\n entity_id=properties[\"access_controls\"][\n \"entity_id\"\n ],\n )\n entries = list(dataset.access_entries)\n entries.append(entry)\n dataset.access_entries = entries\n dataset = bq_client.update_dataset(\n dataset, [\"access_entries\"]\n )\n except NotFound:\n print(\"Dataset {} does not exist.\".format(dataset_id))\n dataset_updation_flag = dataset_updation_flag + 1\n except Exception as e:\n print(\"Exception occurred: {}\".format(e))\n dataset_updation_flag = dataset_updation_flag + 1\n finally:\n return dataset_updation_flag", "title": "" }, { "docid": "42d7b992aaf6daaf5b9aa7a987c61c78", "score": "0.46641824", "text": "def test_setattr_to_branch():\n new_dataset = merge(X_bin, y_bin)\n new_dataset.iat[0, 3] = 4 # Change one value\n\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.dataset = new_dataset\n assert atom.dataset.iat[0, 3] == 4 # Check the value is changed", "title": "" }, { "docid": "4b3a7df9df09fbe3b0f3aef5df39d986", "score": "0.46637458", "text": "def update_product_categ(self):\n product_categ_obj = self.env['woo.product.categ.ept']\n instance_obj = self.env['woo.instance.ept']\n woo_categ_ids = self._context.get('active_ids')\n\n if woo_categ_ids and self._context.get('process'):\n instances = instance_obj.search([(\"active\", \"=\", True)])\n for instance in instances:\n woo_product_categs = product_categ_obj.search(\n [('woo_categ_id', '!=', False), ('woo_instance_id', '=', instance.id),\n ('exported_in_woo', '=', True), ('id', 'in', woo_categ_ids)])\n woo_product_categs and product_categ_obj.update_product_categs_in_woo(instance,\n woo_product_categs)\n else:\n woo_product_categs = product_categ_obj.search(\n [('woo_categ_id', '!=', False),\n ('woo_instance_id', '=', self.woo_instance_id.id),\n ('exported_in_woo', '=', True)])\n woo_product_categs and product_categ_obj.update_product_categs_in_woo(\n self.woo_instance_id, woo_product_categs)\n return True", "title": "" }, { "docid": "acf234320732aa40c16e4a87fbfe028f", "score": "0.46626407", "text": "def update_conclave(cls):\n cls.update_table(ConclaveRule,ImperiumSheetService.conclave_rules(),cls.init_dict_from_conclave_rule, 'name')", "title": "" }, { "docid": "4658ba28b18c8f0eed8fb87ccb5347b8", "score": "0.46618074", "text": "def reindex(self):\n self._datasets = []\n self._datasets_info = []\n self._active_index = None\n self._all_tags = set()\n base_uri = self._base_uri_model.get_base_uri()\n if base_uri is None:\n return\n for ds in dtoolcore.iter_datasets_in_base_uri(base_uri):\n append_okay = True\n ds_tags = set(ds.list_tags())\n self._all_tags.update(ds_tags)\n if self.tag_filter is not None and self.tag_filter not in ds_tags:\n append_okay = False\n if append_okay:\n self._datasets.append(ds)\n self._datasets_info.append(_dataset_info(ds))\n\n # The initial active index is 0 if there are datasets in the model.\n if len(self._datasets) > 0:\n self._active_index = 0", "title": "" }, { "docid": "47f9283937813738108a2e84ef7e6302", "score": "0.4659271", "text": "def update(self,features,prediction,gold):\n pass", "title": "" }, { "docid": "29b023531e6707506f121fc5edccde33", "score": "0.4644675", "text": "def refresh_dim_data(self):", "title": "" }, { "docid": "8e562a663865379ef5f139546570ec96", "score": "0.4640237", "text": "def update_results_only(project: CategorizationProject) -> List[Operation]:\n return _run_custom(\n project,\n run_update_unified_dataset=False,\n run_apply_feedback=False,\n run_update_results=True,\n )", "title": "" } ]
77fc82eec2c34b0bdb8c79d642aff7a1
Close the current video and open a new one
[ { "docid": "c2bd4085de8a0e8b75fba907f65ecd0c", "score": "0.0", "text": "def _reset_video_recorder(self):\n\n # Close any existing video recorder\n if self.video_recorder is not None:\n self.video_recorder.close()\n\n # episode_id = self.env.monitor.num_episodes\n # mode = self.env.monitor.mode\n episode_id = self.num_episodes\n mode = 'train'\n video_file = \"{}_video_episode_{:06}\".format(mode, episode_id)\n video_file = os.path.join(self.video_dir, video_file)\n\n # Start recording the next video\n self.video_recorder = VideoRecorder(\n env=self.env,\n path=video_file,\n enabled=self._video_enabled(episode_id),\n metadata={'episode_id': episode_id},\n )\n self.video_recorder.capture_frame()", "title": "" } ]
[ { "docid": "3609bc86e612012f506154c4f5c57e99", "score": "0.6926669", "text": "def close_video(self):\n if hasattr(self, 'mraw'):\n self.mraw._mmap.close()\n del self.mraw", "title": "" }, { "docid": "3131771d735ab8e2cb6dda2d85bccb0c", "score": "0.66836154", "text": "def close_clip(video):\n if video is not None:\n video.close()\n del video", "title": "" }, { "docid": "c8f2161837846402bc5aa1280f4d2d56", "score": "0.63805264", "text": "def end_video_capture(self):\r\n\r\n #Realse the video (end the video capture)\r\n self.video.release()\r\n\r\n #Close all the open imshow() windows\r\n cv2.destroyAllWindows()", "title": "" }, { "docid": "bbc35f113c97dcfd949603ae0a508837", "score": "0.63214195", "text": "def closeEvent(self, event):\n if type(self.media) is Video:\n self.stop()\n\n self.display_widget.close()", "title": "" }, { "docid": "e4d0883f25bb0bd8e40df1d69089a47c", "score": "0.631488", "text": "def end_video(self):\n if self.video_showing:\n self.video_showing = False\n self.frame_read.stop()\n self.video_thread.join()", "title": "" }, { "docid": "69305a89b0474c18b10bdbc093fadfbe", "score": "0.61965007", "text": "def close_camera(video_camera):\n video_camera.release()\n cv2.destroyAllWindows()", "title": "" }, { "docid": "32277c5cb8deac46b8f59931447bd8bf", "score": "0.6173392", "text": "def release(self):\n \n self.input_video.release()\n self.output_video.release()\n cv2.destroyAllWindows()\n print(\"Finished processing video, saving file to {}\".format(\n self.output_video_name))", "title": "" }, { "docid": "1156acb23926f47bd9c2be9b746e66da", "score": "0.6148317", "text": "def stop_video(self):\n\n print(\"Stopped: \", self._current_video)\n self._current_video = None\n\n print(\"Cannot stop: no video are playing\")", "title": "" }, { "docid": "62343ef09f7e2585f6eb5e632d765e9a", "score": "0.6103919", "text": "def video(self):\n self.video_button.click()\n if self.video_button.counted_clicks % 2 == 1:\n self.video_button.change_image(\"icons/stop.png\")\n self.camera.prepare_recording()\n self.camera.isrecording = True\n else:\n self.video_button.change_image(\"icons/rec.png\")\n self.camera.isrecording = False\n self.camera.video_file.release()", "title": "" }, { "docid": "14ef6ef6d97d73d87f0f7596502da00d", "score": "0.61027354", "text": "def stop_video(self):\n if self._currentVideo is None:\n print(\"Cannot stop video: No video is currently playing\")\n return\n \n print(f\"Stopping video: {self._currentVideo.title}\")\n self._currentVideo = None\n self._isPaused = False", "title": "" }, { "docid": "b87aba3911f2fd0e467cf6777a2c319b", "score": "0.610141", "text": "def stop_video(self):\n # if a video is playing, display a note that this video will be stoppped (even if it is the same one)\n if self.video_playing:\n print(\"Stopping video:\", self.video_playing.title)\n self.video_playing = None\n else:\n print(\"Cannot stop video: No video is currently playing\")", "title": "" }, { "docid": "c4e11c419ce5fd7865a267fd96204b34", "score": "0.6068448", "text": "def stop_video(self):\r\n\r\n if self.playing != \"\":\r\n print(\"Stopping video:\", self.playing)\r\n self.playing = \"\"\r\n else:\r\n print(\"Cannot stop video: No video is currently playing\")", "title": "" }, { "docid": "5e63bc88ba90486bb8893cce26e51060", "score": "0.60308343", "text": "def stop_video(self):\n if not self._video_library.get_video(self._curr_video_id):\n print('Cannot stop video: No video is currently playing')\n \n else:\n print('Stopping video:',self._video_library.get_video(self._curr_video_id).title)\n self._curr_video_id='NULL'\n self._curr_state='NULL'\n #print(\"stop_video needs implementation\")", "title": "" }, { "docid": "f812e8ae72a742345715771e372a43e7", "score": "0.6011175", "text": "def stop_video(self):\n\n videoToPlay = self._video_library.get_video(self._video_playing)\n if videoToPlay == None:\n print('Cannot stop video: No video is currently playing')\n else:\n print('Stopping video: '+videoToPlay._title)\n self._video_playing = ''\n self._video_status = 'stopped'", "title": "" }, { "docid": "a873f42962544096b268f30f9603386a", "score": "0.59940416", "text": "def stop_video(self):\n\n if self._current_playing is None:\n print(\"Unable to stop video as no video is currently being played\")\n return\n print(\"Stopping current video:\", self._current_playing.video.title)\n self._current_playing = None\n self._paused = False", "title": "" }, { "docid": "30a9b4410e8e3fe770002739c7864fa7", "score": "0.59646875", "text": "def stop_video(self):\n if self.status is not None:\n print(f\"Stopping video: {self.status}\")\n self.paused = False\n self.status = None\n else:\n print(\"Cannot stop video: No video is currently playing\")", "title": "" }, { "docid": "3960b6a48b29e163e38d65ac8614291a", "score": "0.59363705", "text": "def stop_video(self):\r\n if self.playing:\r\n print(\"Stopping video: \" + self.current_video._title)\r\n self.playing = False\r\n self.paused = False\r\n else:\r\n print(\"Cannot stop video: No video is currently playing\")", "title": "" }, { "docid": "eb853d62be7d6f33bd7130742cd9f930", "score": "0.5928374", "text": "def select_and_open_source(self) -> None:\n self.pause = False\n\n # Select file\n file = tkinter.filedialog.askopenfilename(\n title=\"Select video source\",\n filetypes=(\n (\"MP4 files\", \"*.mp4\"),\n (\"AVI files\", \"*.avi\"),\n ),\n )\n logger.info(\"Video file selected: %s.\", file)\n\n # Save annotations for diff files as new dict entry\n # Warning: will overwrite existing annotations for a file if loaded again\n self.filename = pathlib.Path(file).stem\n self.annotation_logs[self.filename] = dict()\n self.annotation_logs[self.filename][\"points\"] = dict()\n self.annotation_logs[self.filename][\"arrows\"] = dict()\n\n # Open video file\n self.vid = cv2.VideoCapture(file)\n if not self.vid.isOpened():\n raise ValueError(\"Unable to open video source\", file)\n\n # Set appropriate dimensions for canvas\n width = self.vid.get(cv2.CAP_PROP_FRAME_WIDTH)\n height = self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT)\n self.canvas.config(width=width, height=height)\n logger.info(\"Canvas set with width %d and height %d\", width, height)\n\n # Reset counters and kick off video loop\n self.arrow_head_x, self.arrow_head_y = 0, 0\n self.frame_counter, self.mouse_x, self.mouse_y = 0, 0, 0\n self.btn_pause[\"state\"] = \"normal\"\n self.btn_play[\"state\"] = \"disabled\"\n self.play_video()", "title": "" }, { "docid": "311ecdfa9b2a3e87c954c7f3667f3de7", "score": "0.5922245", "text": "def stopVideoPreview(self):\n err = OpticstarDLL.video_preview(False, 0, -1)\n if err != 0:\n raise Exception(\"Could not stop video preview\")", "title": "" }, { "docid": "a37040111250fba44c1c0fe046a2b239", "score": "0.5911002", "text": "def close(self):\n if not self.enabled:\n return\n\n if self.encoder:\n self.encoder.close()\n self.encoder = None\n else:\n self.metadata['empty'] = True\n\n # If broken, get rid of the output file, otherwise we will leak it.\n if self.broken:\n logger.info('Broken video {}'.format(self.path))\n\n if os.path.exists(self.path):\n os.remove(self.path)\n\n self.metadata['broken'] = True\n\n self.write_metadata()", "title": "" }, { "docid": "83e82c52dea0ffe2c8816d402b246a38", "score": "0.5885278", "text": "def open_video(path):\n \n # open video file\n cap = cv2.VideoCapture(path)\n j = 0 # j helps to reduce fps on the video\n while(cap.isOpened()):\n \n if j == 0:\n ret, img = cap.read()\n img = np.asarray(img, dtype=np.uint8)\n# img = rescale(img, 0.4)\n try:\n img = pipeline(img)\n except: \n pass\n elif j > 20:\n j = 0\n else:\n j += 1\n \n cv2.imshow('frame', img)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n \n # Release everything if job is finished\n cv2.destroyAllWindows()", "title": "" }, { "docid": "1381fcf9ddba16e2e312ac18a99a8ef0", "score": "0.58781695", "text": "def play_video(self, video_id):\n video = self._video_library.get_video(video_id)\n\n if not video:\n print(\"Cannot play video: Video does not exist\")\n return\n\n if self._current_video != None:\n print(f\"Stopping video: {self._current_video}\")\n print(f\"Playing video: {video.title}\")\n self._current_video = video.title\n return\n\n print(f\"Playing video: {video.title}\")\n self._current_video = video.title", "title": "" }, { "docid": "db6e4972be40081b2cd18ddaf1ccb96b", "score": "0.5867531", "text": "def play_trailer(self):\n webbrowser.open(self.trailer_youtube_url)", "title": "" }, { "docid": "54e308db6f776355eae8c04384c687fe", "score": "0.58438873", "text": "def continue_video(self):\n\n print(\"continue_video needs implementation\")", "title": "" }, { "docid": "54e308db6f776355eae8c04384c687fe", "score": "0.58438873", "text": "def continue_video(self):\n\n print(\"continue_video needs implementation\")", "title": "" }, { "docid": "cd7728c4e7749395ee9b8191594c7af9", "score": "0.58332795", "text": "def stop_video(self):\n if self._currently_playing == None:\n print(\"Cannot stop video: No video is currently playing\")\n else:\n print(\"Stopping video:\",self._video_library.get_video(self._currently_playing)._title)\n self._currently_playing = None\n self._pause = True", "title": "" }, { "docid": "95467c22272e0da6857376ce5958d3b3", "score": "0.5831089", "text": "def close(self):\n self._reader.close()\n if self._video_writer is not None:\n self._video_writer.close()\n if self._video_clip_writer is not None:\n self._video_clip_writer.close()", "title": "" }, { "docid": "1cd9a7e1b07a8f4285f2431906451d7f", "score": "0.57912165", "text": "def stop_video(self):\n if self._video_playing: # Check if video is playing\n print(f\"Stopping video: {self._video_playing.title}\") # PRINT: Stop video\n self._video_playing = False # Remove video from currently playing video\n else: # No video playing\n print(\"Cannot stop video: No video is currently playing\") # PRINT: error msg", "title": "" }, { "docid": "88b7b3b56d2caadb4673f0ccd849a639", "score": "0.577262", "text": "def continue_video(self):\n\n if self._video_status == 'playing':\n print('Cannot continue video: Video is not paused')\n return\n if self._video_status == 'stopped':\n print('Cannot continue video: No video is currently playing')\n return\n \n self._video_status = 'playing'\n print('Continuing video: '+self._video_library.get_video(self._video_playing)._title)", "title": "" }, { "docid": "a471219a88dc59e900f3da5579a07852", "score": "0.5701382", "text": "def player(video):\r\n print(video)\r\n cap = cv2.VideoCapture(video)\r\n while (True):\r\n # Capture frame-by-frame\r\n ret, frame = cap.read()\r\n if not ret:\r\n break\r\n cv2.imshow('frame', frame)\r\n if cv2.waitKey(30) & 0xFF == ord('q'):\r\n break\r\n\r\n # When everything done, release the capture\r\n cap.release()\r\n cv2.destroyAllWindows()", "title": "" }, { "docid": "0420094e599501db10991e08f95f7782", "score": "0.56979096", "text": "def stop_video(self):\n all_videos = self._video_library.get_all_videos()\n sorted_videos=sorted(all_videos, key=lambda x:x.video_id)\n if self.isplaying==True:\n print(\"Stopping Video: \"+str(self.vidplaying))\n else:\n print(\"Cannot stop playing: no video is currently playing\")\n\n\n #print(\"stop_video needs implementation\")", "title": "" }, { "docid": "149c4674485fba82078840a2a9947153", "score": "0.56949675", "text": "def show_trailer(self):\r\n webbrowser.open(self.youtube_trailer)", "title": "" }, { "docid": "f43353312a4206701bb197a6b536cd84", "score": "0.5686986", "text": "def continue_video(self):\n if self._currently_playing == None:\n print(\"Cannot continue video: No video is currently playing\")\n else:\n if self._pause == True:\n self._pause = False\n print(\"Continuing video:\",self._video_library.get_video(self._currently_playing)._title)\n else:\n print(\"Cannot continue video: Video is not paused\")", "title": "" }, { "docid": "18462f618a4b233d155fc77d3bbf6204", "score": "0.5686753", "text": "def continue_video(self):\n\n if self._current_playing is None:\n print(\"Unable to resume video as no video is being played\")\n return\n if not self._current_playing.paused:\n print(\"Video is already playing\")\n return\n print(\"Resuming current video:\", self._current_playing.video.title)\n self._current_playing.paused = False", "title": "" }, { "docid": "977a10ae9776baa9de511fda40be0bd5", "score": "0.5686586", "text": "def run(self):\r\n name = getName()\r\n cap = cv2.VideoCapture('../GUI/videos/'+str(name)+'.mp4')\r\n while True:\r\n ret, frame = cap.read()\r\n if ret:\r\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\r\n img = QImage(frame, frame.shape[1], frame.shape[0], QImage.Format_RGB888)\r\n pix = QPixmap.fromImage(img)\r\n pix = pix.scaled(400, 300, Qt.KeepAspectRatio, Qt.SmoothTransformation)\r\n # OpenFace reads the frames of the video every 0.33 seconds therefore, sleep time = 0.33\r\n time.sleep(0.033)\r\n self.changePixmap.emit(pix)", "title": "" }, { "docid": "2d5d2ab750ef1e8c33be1b10a098ef10", "score": "0.5684467", "text": "def guiStartVideo(self, hwnd):\n self.openCamera()\n # setStreamMode NOT needed per API doc\n rc = self.dll.CxStartVideo(self.h, hwnd)\n if rc == 0:\n logging.error(\"CxStartVideo experiment had problem\")", "title": "" }, { "docid": "3865105a5d8fbc0a9fe700f70087edd9", "score": "0.5682645", "text": "def play_random_video(self):\n rand_video = random.choice(self._video_library.get_all_videos())\n\n print(\"Playing video: \", rand_video.title)\n self._current_video = rand_video.title", "title": "" }, { "docid": "88d1c10c7d302b326e069d38ef177497", "score": "0.56801", "text": "def continue_video(self):\n if self._currentVideo is None:\n print(\"Cannot continue video: No video is currently playing\")\n elif self._isPaused:\n print(f\"Continuing video: {self._currentVideo.title}\")\n self._isPaused = False\n else:\n print(\"Cannot continue video: Video is not paused\")", "title": "" }, { "docid": "fdbebb562a70f53697cd9fd1cf9fe53e", "score": "0.5676206", "text": "def continue_video(self):\n \n\n\n print(\"continue_video needs implementation\")", "title": "" }, { "docid": "01d87db561a3d0bfb41ff01862de3aeb", "score": "0.56597507", "text": "def show_trailer(self):\n webbrowser.open(self.trailer_youtube_url)", "title": "" }, { "docid": "d5086ba2cf700179723f4e6e0e39a404", "score": "0.5658091", "text": "def stop_video(self):\n i = 0\n for video in self._video_library.get_all_videos():\n if video._playing:\n i = 1\n setattr(video, '_playing', False)\n setattr(video, '_paused', False)\n print('Stopping video: ' + video._title)\n break \n if i == 0:\n print('Cannot stop video: No video is currently playing')", "title": "" }, { "docid": "0af80aa67147530e06089bc08ba8547f", "score": "0.5648974", "text": "def play_video_logic():\n if self._video_playing:\n self.stop_video()\n self._video_paused = False\n print(f\"Playing video: {video.title}\") # PRINT: Play video\n self._video_playing = video # Add video to currently playing video", "title": "" }, { "docid": "073719527b60199faa3a53acc417c13a", "score": "0.56397706", "text": "def load_single(self):\r\n\t\tprint('Please select the file corresponding to the video you would like to process')\r\n\t\troot = tk.Tk()\r\n\t\troot.withdraw()\r\n\t\tself.filename = filedialog.askopenfilename() # Set the filename of the video\r\n\t\tself.root = self.parent(self.filename) # Set the video's folder\r\n\t\tself.name, self.fullname = self.get_fname(self.filename)\r\n\t\troot.destroy()", "title": "" }, { "docid": "03ff95d8eabf40f0ce72be706205a386", "score": "0.5586247", "text": "def media_stop(self):\n self.aftv.back()", "title": "" }, { "docid": "14ba48a877db8da86699c3fc6958aefb", "score": "0.5586243", "text": "def playVideoPreview(self):\n err = OpticstarDLL.video_preview(True, 0, -1)\n if err != 0:\n raise Exception(\"Could not start video preview\")", "title": "" }, { "docid": "3c9c3d9a8385a6458c747c6b7ab415fb", "score": "0.558584", "text": "def show_trailer(self):\n\t\twebbrowser.open(self.trailer_youtube_url)", "title": "" }, { "docid": "3c9c3d9a8385a6458c747c6b7ab415fb", "score": "0.558584", "text": "def show_trailer(self):\n\t\twebbrowser.open(self.trailer_youtube_url)", "title": "" }, { "docid": "3c9c3d9a8385a6458c747c6b7ab415fb", "score": "0.558584", "text": "def show_trailer(self):\n\t\twebbrowser.open(self.trailer_youtube_url)", "title": "" }, { "docid": "272110bb15b0ed8a771e1867b7fc772f", "score": "0.5582024", "text": "def stop_video(self):\n if self.is_playing == False and self.is_paused != True:\n print(\"Cannot stop video: No video is currently playing\")\n\n elif self.is_playing == False and self.is_paused != True:\n print(f\"Stopping video: {self.currently_playing.title} \")\n self.is_playing = False\n self.is_paused = False\n\n else:\n print(f\"Stopping video: {self.currently_playing.title} \")\n self.is_playing = False", "title": "" }, { "docid": "48790ef3a93d767006e8e8c924795164", "score": "0.5581208", "text": "def continue_video(self):\n if not self._video_library.get_video(self._curr_video_id):\n print('Cannot continue video: No video is currently playing')\n \n elif self._curr_state!='paused':\n print('Cannot continue video: Video is not paused')\n \n elif self._curr_state=='paused':\n print('Continuing video:',self._video_library.get_video(self._curr_video_id).title)\n self._curr_state='playing'\n \n #print(\"continue_video needs implementation\")", "title": "" }, { "docid": "3a46b4a2e921e1dde6df2c0ac786eda9", "score": "0.55678517", "text": "def pause_video(self):\n\n print(\"pause_video needs implementation\")", "title": "" }, { "docid": "5709c3189b4beef9bb9877d98faae403", "score": "0.55657625", "text": "def OnRunningVid(self, event): # wxGlade: wxVidFrame.<event_handler>\n if self.button_1.GetValue():\n self.vidWindow.StartLiveVideo()\n else:\n self.vidWindow.StopLiveVideo()\n event.Skip()", "title": "" }, { "docid": "6e0cca050e8567c14bd9f20b8a7129db", "score": "0.55581105", "text": "def show_trailer(self):\r\n webbrowser.open(self.trailer_youtube_url)", "title": "" }, { "docid": "6e0cca050e8567c14bd9f20b8a7129db", "score": "0.55581105", "text": "def show_trailer(self):\r\n webbrowser.open(self.trailer_youtube_url)", "title": "" }, { "docid": "0c94ef123394576edf3b2c29ba82a5de", "score": "0.5551091", "text": "def play(self,vn):\n if self.vidpipe is not None:\n self.stop()\n self.commands = [self.videoplayer]\n vn -= 1 # start from 1\n if vn > len(self.vidfiles):\n print \"no such video file\"\n return\n\n vidfile = self.vidfiles[vn]\n try:\n with open(vidfile): \n pass\n except IOError:\n print \"Can't find file \" + vidfile\n return\n\n self.commands.append(vidfile)\n #print repr(self.commands)\n self.vidpipe = subprocess.Popen(self.commands,\n stdin = subprocess.PIPE, \n stdout = subprocess.PIPE)", "title": "" }, { "docid": "1f5dfbddb193e01c229a0e13e1218a81", "score": "0.554461", "text": "def stop(self):\n if self.vidpipe is not None: \n self.send_cmd('q')\n #self.vidpipe.terminate()\n self.vidpipe = None", "title": "" }, { "docid": "b227d01b4ce9009eb1cd9700411d5607", "score": "0.5537313", "text": "def live_preview():\n if request.method == 'GET':\n video = CameraVideo()\n setattr(g, 'video', video)\n filename = video.start_live_preview()\n #client = get_dropbox_session(session['username'])\n #client.upload(filename)\n elif request.method == 'DELETE' and hasattr(g,'video'):\n video = g.video\n video.end_live_preview()", "title": "" }, { "docid": "76b13109c38ad3bb1976dedfdbac54ae", "score": "0.55250937", "text": "def close(self):\n self._camera.close()", "title": "" }, { "docid": "cda046d3a514774594903421747d5b8d", "score": "0.5523912", "text": "def continue_video(self):\n if self.status is not None: # video playing\n if self.paused is True:\n print(f\"Continuing video: {self.status}\")\n self.paused = False\n elif self.paused is not True:\n print(f\"Cannot continue video: Video is not paused\")\n elif self.status is None: # no video playing\n print(\"Cannot continue video: No video is currently playing\")", "title": "" }, { "docid": "8c46198cbcc5c0b8da374b8566a915be", "score": "0.5511273", "text": "def continue_video(self):\r\n if self.paused and self.playing:\r\n print(\"Continuing video: \" + self.current_video._title)\r\n self.paused = False\r\n elif not self.paused and self.playing:\r\n print(\"Cannot continue video: Video is not paused\")\r\n else:\r\n print(\"Cannot continue video: No video is currently playing\")", "title": "" }, { "docid": "b4c707f8cd2da5b2535fb33ec1a6fe71", "score": "0.55072516", "text": "def video_choose_button_clicked(self):\n dialog = QtWidgets.QFileDialog(self.parent, \"Choose Video File\")\n result = dialog.getOpenFileName()\n file_path = result[0]\n if file_path:\n self.video_file_field.setText(file_path)", "title": "" }, { "docid": "1c6788cd6a59f823a4ee7252544c50d3", "score": "0.54993176", "text": "def continue_video(self):\n if self._video_playing: # Check if video playing exists\n if self._video_paused: # Check if video paused\n print(f\"Continuing video: {self._video_playing.title}\") # Play video\n self._video_paused = False # Update paused status\n else: # If not paused\n print(\"Cannot continue video: Video is not paused\") # Err - Video not paused\n else: # Video not playing\n print(\"Cannot continue video: No video is currently playing\") # Err - not playing", "title": "" }, { "docid": "8b099def270b167ea42c9f77af0bd814", "score": "0.54942465", "text": "def show_trailer(self):\n webbrowser.open(self.trailer_youtube_url)", "title": "" }, { "docid": "8b099def270b167ea42c9f77af0bd814", "score": "0.54942465", "text": "def show_trailer(self):\n webbrowser.open(self.trailer_youtube_url)", "title": "" }, { "docid": "8b099def270b167ea42c9f77af0bd814", "score": "0.54942465", "text": "def show_trailer(self):\n webbrowser.open(self.trailer_youtube_url)", "title": "" }, { "docid": "8b099def270b167ea42c9f77af0bd814", "score": "0.54942465", "text": "def show_trailer(self):\n webbrowser.open(self.trailer_youtube_url)", "title": "" }, { "docid": "8b099def270b167ea42c9f77af0bd814", "score": "0.54942465", "text": "def show_trailer(self):\n webbrowser.open(self.trailer_youtube_url)", "title": "" }, { "docid": "8b099def270b167ea42c9f77af0bd814", "score": "0.54942465", "text": "def show_trailer(self):\n webbrowser.open(self.trailer_youtube_url)", "title": "" }, { "docid": "8b099def270b167ea42c9f77af0bd814", "score": "0.54942465", "text": "def show_trailer(self):\n webbrowser.open(self.trailer_youtube_url)", "title": "" }, { "docid": "8b099def270b167ea42c9f77af0bd814", "score": "0.54942465", "text": "def show_trailer(self):\n webbrowser.open(self.trailer_youtube_url)", "title": "" }, { "docid": "8b099def270b167ea42c9f77af0bd814", "score": "0.54942465", "text": "def show_trailer(self):\n webbrowser.open(self.trailer_youtube_url)", "title": "" }, { "docid": "8b099def270b167ea42c9f77af0bd814", "score": "0.54942465", "text": "def show_trailer(self):\n webbrowser.open(self.trailer_youtube_url)", "title": "" }, { "docid": "b3c2649b982ded386b8eaa04e9bcc1b8", "score": "0.54939944", "text": "def run_video_test(self):\n print(\"Press q to exit the stream\")\n while True:\n frame = self.videoStream.read()\n frame = imutils.resize(frame)\n\n frame = self.scanner.main(frame)\n cv2.imshow('video', frame)\n key = cv2.waitKey(1) & 0xFF\n if key == ord('q'):\n cv2.destroyAllWindows()\n break\n \n self.videoStream.stop()", "title": "" }, { "docid": "574b89d91aea9b7f5daecf50c4831caf", "score": "0.5491", "text": "def continue_video(self):\r\n if self.paused:\r\n if self.playing != \"\":\r\n print(\"Continuing video:\", self.playing)\r\n self.paused = False\r\n else:\r\n print(\"Cannot continue video: No video is not paused\")", "title": "" }, { "docid": "f4b2af2d2a25501e65a87c572e29c836", "score": "0.5488733", "text": "def next(self):\n if self._player:\n self._player.stop()\n\n if self._videos_queue:\n video = self._videos_queue.pop(0)\n self.play(video)\n\n return self.status()", "title": "" }, { "docid": "40174e305a1230664fcb96e4d2da75f1", "score": "0.5476206", "text": "def play_random_video(self):\n if len(self._video_library.get_all_videos())==0:\n print('No videos available')\n else:\n random_id = random.choice(self._video_library.get_all_videos()).video_id\n if self.status is not None:\n print(f\"Stopping video: {self.status}\")\n self.paused = False\n self.status = self._video_library.get_video(random_id).title\n print(f\"Playing video: {self.status}\")", "title": "" }, { "docid": "7545189d1619af3a4040c5b7fe761424", "score": "0.54733557", "text": "def continue_video(self):\n if self.playing == \"none\":\n print(\"Cannot continue video: No video is currently playing\")\n elif self.pause == False:\n print(\"Cannot continue video: Video is not paused\")\n else:\n print(f\"Continuing video: {self.playing}\")\n self.pause = False", "title": "" }, { "docid": "ad6f1748f272466ae51b569897eff36f", "score": "0.5448197", "text": "def connectVideo():\n return cv.VideoCapture(self._cameraId)", "title": "" }, { "docid": "0424293124a6867efcca2c83e647cad3", "score": "0.5435484", "text": "def continue_video(self):\n\n if self.is_paused == True:\n print(f\"Continuing video: {self.currently_playing.title}\")\n self.is_paused = False\n\n elif self.is_playing == True:\n print(\"Cannot continue video: Video is not paused\")\n\n elif self.currently_playing == None:\n print(\"Cannot continue video: No video is currently playing\")", "title": "" }, { "docid": "7aae1c0ba7fff133060ae5fa59f943c7", "score": "0.54257846", "text": "def click_view_video_file(self, file_name):\n self._folder_content.click_view_video_file(file_name)", "title": "" }, { "docid": "368bf02cc442845f02f552589299a348", "score": "0.5418241", "text": "def play_random_video(self):\n\n print(\"play_random_video needs implementation\")", "title": "" }, { "docid": "8f42fa3a83d009055ee6c58abc5cc041", "score": "0.54029924", "text": "def stop_capture(self):\n self.record = False\n self.end_rec_time = time.time()\n print(f\".\\n.\\n.\\n.\\nFinished Recording in {self.end_rec_time - self.start_rec_time} seconds!\")\n self.recorded_video.release()\n print(f\"\\n\\nYour file \\'{self.file_name}\\' has been successfully saved in \"\n f\"\\'MSS\\\\tutorials\\\\recordings\\' folder.\")\n shutil.copyfile(os.path.join('recordings', self.file_name),\n os.path.join('recordings', 'last_recording.mp4'))\n cv2.destroyAllWindows()\n self.app.exit()", "title": "" }, { "docid": "5282a7c001dff680c8749f6160bcdadd", "score": "0.5398842", "text": "def play_video(self, video_id):\r\n if video_id in self.video_id_list and not self.playing:\r\n self.current_video = self._video_library.get_video(video_id)\r\n self.playing = True\r\n self.paused = False\r\n print(\"Playing video: \" + self.current_video._title)\r\n elif video_id in self.video_id_list and self.playing:\r\n print(\"Stopping video: \" + self.current_video._title)\r\n self.current_video = self._video_library.get_video(video_id)\r\n self.playing = True\r\n self.paused = False\r\n print(\"Playing video: \" + self.current_video._title)\r\n else:\r\n print(\"Cannot play video: Video does not exist\")", "title": "" }, { "docid": "1ba3ed4a6ab88f3f2d0175448d1cf6cc", "score": "0.5398208", "text": "def media_stop(self):\n self._remote.cmd('Stop')", "title": "" }, { "docid": "7b4e3769a055fde091316740182623ad", "score": "0.53968394", "text": "def play_random_video(self):\n \n# if not self._video_library.get_video(self._curr_video_id):\n# print('No videos available')\n \n if self._curr_video_id=='NULL':\n self._curr_video_id=self._video_library.get_all_videos()[3].video_id\n self._curr_state='playing'\n print('Playing video:',self._video_library.get_video(self._curr_video_id).title)\n \n else:\n play_number=random.randint(0,len(self._video_library.get_all_videos())-1)\n print('Stopping video:',self._video_library.get_video(self._curr_video_id).title)\n print('Playing video:',self._video_library.get_all_videos()[play_number].title)\n self._curr_video_id=self._video_library.get_all_videos()[play_number].video_id\n self._curr_state='playing'\n #print(\"play_random_video needs implementation\")", "title": "" }, { "docid": "e88b11a742709f0f5725e709dc72dcca", "score": "0.5391432", "text": "def exit():\n vlc.tell(\"set fullscreen mode to false\")", "title": "" }, { "docid": "dd31c104a6394ff91d157e9b5043aebf", "score": "0.538919", "text": "def play_video(self, video_id):\r\n video_title = \"\"\r\n\r\n for video in self.video_list:\r\n if video.video_id == video_id:\r\n video_title = video.title\r\n\r\n if video_title == \"\":\r\n print(\"Cannot play video: Video does not exist\")\r\n elif self.playing == \"\":\r\n print(\"Playing video:\", video_title)\r\n self.playing = video_title\r\n elif self.playing != \"\":\r\n print(\"Stopping video:\", self.playing)\r\n self.playing = video_title\r\n print(\"Playing video:\", self.playing)", "title": "" }, { "docid": "881b780b5b7278f5c24d92832bd02aeb", "score": "0.53719944", "text": "def stop_recording(self):\n print('Stop Button has been pressed')\n try:\n img_array = []\n self.status = False\n for filename in glob.glob('screenshots/*.jpg'):\n img = cv2.imread(filename)\n height, width, layers = img.shape\n size = (width, height)\n img_array.append(img)\n out = cv2.VideoWriter('project.avi', cv2.VideoWriter_fourcc(*'DIVX'), 24, size)\n for i in range(len(img_array)):\n out.write(img_array[i])\n out.release()\n shutil.rmtree('screenshots')\n exit()\n except:\n exit()", "title": "" }, { "docid": "ad3784e4651ff498e851e4303736d84f", "score": "0.53715605", "text": "def stop_writing_video(self):\n self._videoFileName = None\n self._videoEncoding = None\n self._videoWriter = None", "title": "" }, { "docid": "db936d076ab218dea17f6881ad7c6248", "score": "0.53710914", "text": "def _showvid(video_name, from_frame = 0):\n cap = cv2.VideoCapture(str(video_name))\n cap.set(1, from_frame)\n for i in range(from_frame , from_frame + 999999):\n ret, frame = cap.read()\n if not ret:\n print(\"Grab frame unsuccessful. ABORT MISSION!\")\n break\n cv2.imshow('frame: ' + str(i), frame)\n # Set waitKey\n key = cv2.waitKey()\n if key == ord('q'):\n break\n cv2.destroyAllWindows()\n cv2.destroyAllWindows()\n cv2.destroyAllWindows()", "title": "" }, { "docid": "55d4612f5db8c9223e6ab5be5a81f01a", "score": "0.5369697", "text": "def close_camera(self):\n if self.cam_state != 'open':\n raise TypeError('Can only close camera if it has been opened')\n self.stop()\n self.cam_state = 'closing'\n self.can_play = False\n self.send_camera_request('close_cam', None)", "title": "" }, { "docid": "3a0906b12cc80ec507928f456467136d", "score": "0.5354748", "text": "def _Close(self):\n self._vsapm_volume.close()\n self._vsapm_volume = None\n self._file_object = None", "title": "" }, { "docid": "71e929e3743ec3fde997c590192effd4", "score": "0.53490585", "text": "def play_video(self) -> None:\n ret, frame = self.get_frame()\n if ret:\n self.img = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))\n img_uid = self.canvas.create_image(0, 0, image=self.img, anchor=tkinter.NW)\n self.canvas.lower(img_uid) # Allows marker for clicks to be visible\n\n if not self.pause:\n self.window.after(DELAY, self.play_video)\n else:\n # To make sure user doesn't click play after pausing faster than DELAY\n self.btn_play[\"state\"] = \"normal\"", "title": "" }, { "docid": "99671512b13eedcb5bbec5d6490a32b1", "score": "0.5346177", "text": "def search_window(self):\n #self.window.quit()\n #self.window.after_cancel()\n self.vid.__del__()\n self.window.destroy()\n Search(Tk(), \"Tkinter and OpenCV\")", "title": "" }, { "docid": "dcebd81f100c8507bacdc6a726e336e7", "score": "0.5341241", "text": "def play_video(self, video_id):\n if not self._video_library.get_video(video_id):\n print('Cannot play video: Video does not exist')\n \n elif self._curr_video_id=='NULL':\n print('Playing video:',self._video_library.get_video(video_id).title)\n self._curr_video_id=video_id\n self._curr_state='playing'\n \n elif video_id == self._curr_video_id or video_id != self._curr_video_id:\n print('Stopping video:', self._video_library.get_video(self._curr_video_id).title)\n print('Playing video:', self._video_library.get_video(video_id).title)\n self._curr_video_id=video_id\n self._curr_state='playing'", "title": "" }, { "docid": "2b0782deb10f3259eda6da7871969a4d", "score": "0.5339132", "text": "def goto_add_movie(self):\r\n\r\n self.destroy_all()\r\n self.window.Movie(username=self.username, window=self.window, old_window=self.old_window)", "title": "" }, { "docid": "6906401290baeefe662c08f780bf4e7f", "score": "0.530981", "text": "def onClose(self, evt):\n \n try:\n self.terminate_video()\n except:\n pass\n \n dlg = wx.MessageDialog(self, 'Do you want to save data?', 'Exit', \\\n wx.YES_NO | wx.ICON_QUESTION | wx.CANCEL)\n if dlg.ShowModal() == wx.ID_CANCEL:\n dlg.Destroy()\n else:\n dlg.Destroy()\n self.Destroy()", "title": "" }, { "docid": "f2867bb85ffb2f36adab3167ed7ef689", "score": "0.5299906", "text": "def play_video(self, video_path, period):\n self.video_playing = True\n self.state_time_remain = period\n abs_path = QFileInfo(video_path).absoluteFilePath()\n QMetaObject.invokeMethod(self.video_player, \"setMedia\", Qt.QueuedConnection,\n Q_ARG(QMediaContent, QMediaContent(QUrl.fromLocalFile(abs_path))))", "title": "" }, { "docid": "ddd7eea46a44783a1952d2b79b07ca51", "score": "0.52913725", "text": "def closelive():\n g_releaseliveimport()", "title": "" }, { "docid": "c0dc18889bb61ea0aea87b5bb6dd6446", "score": "0.5288152", "text": "def show_trailer(self):\n webbrowser.open(self.trailer_youtube_url) # Python provided BIF", "title": "" }, { "docid": "bc0af4385f7279c2e5503fc5dc452d3c", "score": "0.5284081", "text": "def stop_looking(self):\n self.__send('action_video', 'stop watching')", "title": "" } ]
1781dbf8a3e658963b942dd4d150a21b
Calculate correction sensor from bandpass calibration solution sensor. Given the bandpass calibration solution `sensor`, this extracts the time series of bandpasses (channelised by `cal_freqs`) for the input specified by `index` (in the form (pol, ant)) and builds a categorical sensor for the corresponding complex correction terms (channelised by `data_freqs`). Invalid solutions (NaNs) are replaced by linear interpolations over frequency (separately for magnitude and phase), as long as some channels have valid solutions.
[ { "docid": "3adb898c8601e51ebd344b8d201712fa", "score": "0.75315213", "text": "def calc_bandpass_correction(sensor, index, data_freqs, cal_freqs):\n corrections = []\n for segment, value in sensor.segments():\n bp = value[(slice(None),) + index]\n valid = np.isfinite(bp)\n if valid.any():\n # Don't extrapolate to edges of band where gain typically drops off\n bp = complex_interp(data_freqs, cal_freqs[valid], bp[valid],\n left=INVALID_GAIN, right=INVALID_GAIN)\n else:\n bp = np.full(len(data_freqs), INVALID_GAIN)\n corrections.append(ComparableArrayWrapper(np.reciprocal(bp)))\n return CategoricalData(corrections, sensor.events)", "title": "" } ]
[ { "docid": "c8d0ff7fe87cbfad400603128b53311a", "score": "0.6032222", "text": "def calc_delay_correction(sensor, index, data_freqs):\n delays = [np.nan_to_num(value[index]) for segm, value in sensor.segments()]\n # Delays produced by cal pipeline are raw phase slopes, i.e. exp(2 pi j d f)\n corrections = [np.exp(-2j * np.pi * d * data_freqs).astype('complex64')\n for d in delays]\n corrections = [ComparableArrayWrapper(c) for c in corrections]\n return CategoricalData(corrections, sensor.events)", "title": "" }, { "docid": "c6138d7bc7f8b8f4ab5bda47bbebaa43", "score": "0.589976", "text": "def calc_gain_correction(sensor, index):\n dumps = np.arange(sensor.events[-1])\n events = sensor.events[:-1]\n gains = np.array([value[index] for segment, value in sensor.segments()])\n valid = np.isfinite(gains)\n if not valid.any():\n return CategoricalData([INVALID_GAIN], [0, len(dumps)])\n smooth_gains = complex_interp(dumps, events[valid], gains[valid])\n return np.reciprocal(smooth_gains)", "title": "" }, { "docid": "fb41cdf9757a5bc326d37b3e1a0a3682", "score": "0.55260384", "text": "def apply(self, data):\n\n # find best skyfreq for each channel\n skyfreqs = n.unique(self.skyfreq[self.select]) # one per spw\n nch_tot = len(self.freqs)\n chan_bandnum = [range(nch_tot*i/len(skyfreqs), nch_tot*(i+1)/len(skyfreqs)) for i in range(len(skyfreqs))] # divide chans by number of spw in solution\n self.logger.info('Solutions for %d spw: (%s)' % (len(skyfreqs), skyfreqs))\n\n for j in range(len(skyfreqs)):\n skyfreq = skyfreqs[j]\n chans = chan_bandnum[j]\n self.logger.info('Applying gain solution for chans from %d-%d' % (chans[0], chans[-1]))\n\n # define freq structure to apply delay solution\n nch = len(chans)\n chanref = nch/2 # reference channel at center\n relfreq = self.chansize*(n.arange(nch) - chanref) # relative frequency\n\n for i in range(len(self.blarr)):\n ant1, ant2 = self.blarr[i] # ant numbers (1-based)\n for pol in self.polind:\n # apply gain correction\n invg1g2 = self.calcgain(ant1, ant2, skyfreq, pol)\n data[:,i,chans,pol-self.polind[0]] = data[:,i,chans,pol-self.polind[0]] * invg1g2 # hack: lousy data pol indexing\n\n # apply delay correction\n d1d2 = self.calcdelay(ant1, ant2, skyfreq, pol)\n delayrot = 2*n.pi*(d1d2[0] * 1e-9) * relfreq # phase to rotate across band\n data[:,i,chans,pol-self.polind[0]] = data[:,i,chans,pol-self.polind[0]] * n.exp(-1j*delayrot[None, None, :]) # do rotation", "title": "" }, { "docid": "e1dbc147f040a0825ff734fb03e74880", "score": "0.5342718", "text": "def apply(self, data):\n\n # flag bad ants\n if self.flagants:\n badants = self.calc_flag()\n else:\n badants = n.array([[]])\n\n # apply gain correction\n if hasattr(self, 'bandpass'):\n corr = n.ones_like(data)\n flag = n.ones_like(data.real).astype('int')\n chans_uncal = range(len(self.freqs))\n for spwi in range(len(self.spwind)):\n chsize = n.round(self.bpfreq[1]-self.bpfreq[0], 0)\n ww = n.where( (self.freqs >= self.bpfreq[self.spwind[spwi]*1000]) & (self.freqs <= self.bpfreq[(self.spwind[spwi]+1)*1000-1]+chsize) )[0]\n if len(ww) == 0:\n self.logger.info('Gain solution frequencies not found in data for spw %d.' % (self.spwind[spwi]))\n firstch = ww[0]\n lastch = ww[-1]+1\n for ch in ww:\n chans_uncal.remove(ch)\n self.logger.info('Combining gain sol from spw=%d with BW chans from %d-%d' % (self.spwind[spwi], firstch, lastch))\n for badant in n.transpose(badants):\n if badant[1] == spwi:\n badbl = n.where((badant[0] == n.array(self.ant1ind)) | (badant[0] == n.array(self.ant2ind)))[0]\n flag[:, badbl, firstch:lastch, badant[2]] = 0\n\n corr1 = self.gain[self.ant1ind, spwi, :][None, :, None, :] * self.bandpass[self.ant1ind, firstch:lastch, :][None, :, :, :]\n corr2 = (self.gain[self.ant2ind, spwi, :][None, :, None, :] * self.bandpass[self.ant2ind, firstch:lastch, :][None, :, :, :]).conj()\n\n corr[:, :, firstch:lastch, :] = corr1 * corr2\n if len(chans_uncal):\n self.logger.info('Setting data without bp solution to zero for chans %s.' % (chans_uncal))\n flag[:, :, chans_uncal,:] = 0\n data[:] *= flag/corr\n else:\n for spw in range(len(self.gain[0,0])):\n pass", "title": "" }, { "docid": "5d3109d2b61a3405c76f27bf4cd34edc", "score": "0.53314567", "text": "def main(station, freq, ax):\n # NO! we have data for all channels in ONE file for every FREQuency\n FR = '%.2f' % freq\n \n # for evenry channel in N, E do\n # save Q values per channel\n Qs = []\n \n CHANNEL = \"Z\"\n path = os.path.join(PATH, station)#, CHANNEL)\n # find filename like: `TRGvalues_E_0.75Hz_SD10_T35.txt`\n \n files = [os.path.join(path, f) for f in os.listdir(path) if FR in f]\n # just files no dirs\n files = [f for f in files if os.path.isfile(f)]\n # must be 1 file for this channel and frequency\n if not len(files) == 1:\n print(\"Must be 1 file for FR %s\" % freq)\n return 0, 0\n filename = files[0]\n \n # parse-read file\n try:\n data = load_values(filename, station=station)\n except IndexError as e:\n print(e)\n return\n \n #=== CALC values with Z(R) corr\n # make correction for Geom spreading...\n \n # distances array\n X = data['DIST']\n \n # geometrical spreading param for dist < 50 km is always == dist**-1\n Z_R = 1 / X\n # but for dist 50-70 may be 1/50 ???\n ind = np.where(X > 50)\n # make 1/50\n Z_R[ind] = 1/50\n \n #=======\n Y = np.log(data[\"Ap\"] / ( data[\"A100RMS\"] * Z_R ))\n #=======\n \n #===========================================================\n # ROBUST method Fit:\n a, b2 = linear_fit(Y, X)\n Y2 = a * X + b2\n \n # calc overall Q value\n # Q = Pi * f / (v * b)\n #!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n Q = -np.pi * freq / (Vs * a)\n #!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n Qs += [Q]\n \n # plot ROBUST results\n #===\n _label = \"$Q_P = %.0f$\" % Q\n \n Qline, = ax.plot(X, Y2, \"-b\", lw=1.5, \n label=_label, zorder=222)\n #===\n \n # how much items\n _NUM = X.size\n print(\"CH = %s \\t Freq = %.2f \\t Q = %.0f \\t N = %d\" % (CHANNEL, freq, Q, _NUM))\n \n #===\n Qall, = ax.plot(X, Y, \"ow\", markersize=7,\n markeredgecolor=\"k\", zorder=111) \n \n ax.legend(loc='lower left', prop={\"size\":FONTSIZE})\n \n return Qs", "title": "" }, { "docid": "1e79a88eefb46255e66e6d3ba7ae53d1", "score": "0.5283836", "text": "def getSingleTemp(self, channel, calIndex=-1):\n if calIndex == -1:\n calIndex = channel\n try:\n #print(\"lakeshore370: Computing temperature for channel %d\"%channel)\n #print(\"Resistance is %s\"%str(self.readings[channel][0]))\n #print(\"Calibration type %s\" % (self.calibrations[calIndex][0],))\n #print self.calibrations[calIndex]\n if self.calibrations[calIndex][0] == INTERPOLATION:\n # log-log interpolation\n return (np.exp(np.interp(np.log(self.readings[channel][0]['Ohm']),\n np.log(np.array(self.calibrations[calIndex][1])),\n np.log(np.array(self.calibrations[calIndex][2]))\n ))) * K\n elif self.calibrations[calIndex][0] == VRHOPPING:\n T0 = self.calibrations[calIndex][2]\n R0 = self.calibrations[calIndex][1]\n res = self.readings[channel][0]\n T = T0 / (np.log(R0/res)**4)\n return T\n elif self.calibrations[calIndex][0] == FUNCTION:\n # hack alert--using eval is bad:\n # (1) This depends on the function string being good python\n # code.\n # (2) It also depends on it having \"r\" as the variable for \n # resistance, in ohms.\n # (3) very unsafe if anyone ever hacks the registry. of course,\n # then we have bigger problems\n r = self.readings[channel][0][units.Ohm]\n return eval(self.calibrations[calIndex][1]) * units.K\n elif self.calibrations[calIndex][0] == DEFAULT:\n if calIndex > 0:\n # use calibration 0--the device calibration\n return self.getSingleTemp(channel, 0) \n else:\n #If there is no calibration at all use res2temp\n return res2temp(self.readings[channel][0])\n except Exception as e:\n print \"Exception getting temperature: \", e\n return 0.0*K", "title": "" }, { "docid": "fd1506ab788433af7ead7880a55a9190", "score": "0.5206002", "text": "def add_applycal_sensors(cache, attrs, data_freqs):\n cal_ants = attrs.get('cal_antlist', [])\n cal_pols = attrs.get('cal_pol_ordering', [])\n cal_input_map = {ant + pol: (pol_idx, ant_idx)\n for (pol_idx, pol) in enumerate(cal_pols)\n for (ant_idx, ant) in enumerate(cal_ants)}\n if not cal_input_map:\n return\n try:\n cal_spw = SpectralWindow(attrs['cal_center_freq'], None,\n attrs['cal_n_chans'], sideband=1,\n bandwidth=attrs['cal_bandwidth'])\n cal_freqs = cal_spw.channel_freqs\n except KeyError:\n logger.warning('Missing cal spectral attributes, disabling applycal')\n return\n\n def calc_correction_per_input(cache, name, inp, product):\n \"\"\"Calculate correction sensor for input `inp` from cal solutions.\"\"\"\n product_sensor = get_cal_product(cache, attrs, product)\n try:\n index = cal_input_map[inp]\n except KeyError:\n raise KeyError(\"No calibration solutions available for input \"\n \"'{}' - available ones are {}\"\n .format(inp, sorted(cal_input_map.keys())))\n if product == 'K':\n correction_sensor = calc_delay_correction(product_sensor, index,\n data_freqs)\n elif product == 'B':\n correction_sensor = calc_bandpass_correction(product_sensor, index,\n data_freqs, cal_freqs)\n elif product == 'G':\n correction_sensor = calc_gain_correction(product_sensor, index)\n else:\n raise KeyError(\"Unknown calibration product '{}'\".format(product))\n cache[name] = correction_sensor\n return correction_sensor\n\n correction_sensor_template = 'Calibration/{inp}_correction_{product}'\n cache.virtual[correction_sensor_template] = calc_correction_per_input", "title": "" }, { "docid": "71b6ddc70ff9375af7e6d9042fe62720", "score": "0.50737643", "text": "def filterbank(X, sfreq, idx_fb, peaks):\n\n # Calibration data comes in batches of trials\n if X.ndim == 3:\n num_chans = X.shape[1]\n num_trials = X.shape[0]\n\n # Testdata come with only one trial at the time\n elif X.ndim == 2:\n num_chans = X.shape[0]\n num_trials = 1\n\n sfreq = sfreq / 2\n\n min_freq = np.min(peaks)\n max_freq = np.max(peaks)\n\n if max_freq < 40:\n top = 100\n else:\n top = 115\n # Check for Nyquist\n if top >= sfreq:\n top = sfreq - 10\n\n diff = max_freq - min_freq\n # Lowcut frequencies for the pass band (depends on the frequencies of SSVEP)\n # No more than 3dB loss in the passband\n\n passband = [min_freq - 2 + x * diff for x in range(7)]\n\n # At least 40db attenuation in the stopband\n if min_freq - 4 > 0:\n stopband = [\n min_freq - 4 + x * (diff - 2) if x < 3 else min_freq - 4 + x * diff\n for x in range(7)\n ]\n else:\n stopband = [2 + x * (diff - 2) if x < 3 else 2 + x * diff for x in range(7)]\n\n Wp = [passband[idx_fb] / sfreq, top / sfreq]\n Ws = [stopband[idx_fb] / sfreq, (top + 7) / sfreq]\n\n N, Wn = scp.cheb1ord(Wp, Ws, 3, 40) # Chebyshev type I filter order selection.\n\n B, A = scp.cheby1(N, 0.5, Wn, btype=\"bandpass\") # Chebyshev type I filter design\n\n y = np.zeros(X.shape)\n if num_trials == 1: # For testdata\n for ch_i in range(num_chans):\n try:\n # The arguments 'axis=0, padtype='odd', padlen=3*(max(len(B),len(A))-1)' correspond\n # to Matlab filtfilt (https://dsp.stackexchange.com/a/47945)\n y[ch_i, :] = scp.filtfilt(\n B,\n A,\n X[ch_i, :],\n axis=0,\n padtype=\"odd\",\n padlen=3 * (max(len(B), len(A)) - 1),\n )\n except Exception as e:\n print(e)\n print(num_chans)\n else:\n for trial_i in range(num_trials): # Filter each trial sequentially\n for ch_i in range(num_chans): # Filter each channel sequentially\n y[trial_i, ch_i, :] = scp.filtfilt(\n B,\n A,\n X[trial_i, ch_i, :],\n axis=0,\n padtype=\"odd\",\n padlen=3 * (max(len(B), len(A)) - 1),\n )\n return y", "title": "" }, { "docid": "ca147fed3b5d634fe55387293abea336", "score": "0.50712883", "text": "def makespectrum(data, ratio=None, g_norm=None, corr_cal=None,\n #lowfreq=1.25, highfreq=7.9, badlags=None, numchan=256):\n lowfreq=1.3, highfreq=7.9, badlags=None, numchan=256):\n\n freq = (0.0078125*2.5) + numpy.arange(256)*0.03125\n if type(data) not in (numpy.ndarray, numpy.ma.core.MaskedArray):\n print(\"Needs to be array data type\")\n return\n\n if badlags is not None:\n for lag in badlags:\n data[lag] = numpy.nan\n if check_array(g_norm):\n normacf = data*g_norm\n normacf = normacf - nanmean(normacf[65:])\n normacf = normacf/g_norm\n else:\n normacf = data\n if check_array(ratio):\n normacf = normacf*ratio\n\n if not check_array(corr_cal):\n print(\"Corr cal not present, return acf\")\n return normacf\n \n spec = numpy.dot(corr_cal, numpy.nan_to_num(normacf))\n #x = 8.0*numpy.arange(len(spec))/(len(spec)-1.)\n #xx = 8.0*numpy.arange(numchan)/(numchan-1.)\n #intinst = Interpolate(x,spec)\n #return intinst(xx)\n if corr_cal.shape[0] > 256:\n spec = rebin(spec[1:])\n\n #spec = numpy.flipud(spec)\n ind = numpy.logical_or(freq<lowfreq, freq>highfreq)\n #spec[:lochan] = numpy.nan\n #spec[hichan:] = numpy.nan\n #spec = numpy.flipud(spec)\n spec[ind] = numpy.nan\n return spec", "title": "" }, { "docid": "32e11f21dd5a4e3cdd55f3512225bd33", "score": "0.5051424", "text": "def calculate_spectrum(obs_freqs,v0,radex_params,radex=True):\n\n\n\t#user supplies the observed frequency so doppler shift to emitted\n\t#tau dist makes this unnecessary\n\temit_freqs=obs_freqs*(1.0+v_0/light_speed)\n\t\n\t#we'll return a dataframe of Frequency, Intensity\n\tnew_df=DataFrame({\"Frequency\":obs_freqs})\n\tnew_df[\"Intensity\"]=0.0\n\t\n\t#solve the radex model and get all line properties\n\tif radex:\n\t\ttau_0_df=get_radex_taus(radex_params)\n\telse:\n\t\ttau_0_df=get_lte_taus(col_dens,gas_temp,delta_v)\n\t\t\t\t\t\t\t \n\t#now loop through line and build up the tau weighted radiation temperature average\n\tfor i,line in tau_0_df.iterrows():\n\t\t#get the relative velocity of all the emitting frequencies\n\t\tvelocities=((line[\"freq\"]/obs_freqs)-1.0)*light_speed\n\t\t\n\t\t#use that to get the tau values at those frequencies\n\t\ttaus=get_tau_dist(v_0,delta_v,line[\"tau\"],velocities)\n\t\t\n\t\t#store tau weighted radiation temp\n\t\tnew_df[f\"{line.freq:.3f}\"]=rad_temp(line[\"T_ex\"],emit_freqs)*taus\n\t\t\n\t\t#and add tau to running total\n\t\tnew_df[\"Intensity\"]+=taus\n\t\t\n\t\n\t#sum our tau weighted temperatures and divide by sum of taus\n\tline_cols=[x for x in new_df if x not in [\"Intensity\",\"Frequency\"]]\n\tnew_df[\"temp\"]=new_df[line_cols].sum(axis=1)/new_df[\"Intensity\"]\n\t#now get brightness temperature as a function of frequency\n\tnew_df[\"Intensity\"]=(new_df[\"temp\"]-rad_temp(2.73,emit_freqs))*(1.0-np.exp(-new_df[\"Intensity\"]))\n\tnew_df[\"Intensity\"]=new_df[\"Intensity\"].fillna(0.0)\n\treturn new_df[\"Intensity\"].values", "title": "" }, { "docid": "f5812fdde3a14e6ea94088ed7f744e8b", "score": "0.50481683", "text": "def run(self, exposure, catalog):\n bbox = exposure.getBBox()\n\n self.log.info(\"Measuring aperture corrections for %d flux fields\" % (len(self.toCorrect),))\n # First, create a subset of the catalog that contains only selected stars\n # with non-flagged reference fluxes.\n subset1 = [record for record in self.starSelector.selectStars(exposure, catalog).starCat\n if not record.get(self.refFluxKeys.flag)]\n\n apCorrMap = ApCorrMap()\n\n # Outer loop over the fields we want to correct\n for name, keys in self.toCorrect.iteritems():\n fluxName = name + \"_flux\"\n fluxSigmaName = name + \"_fluxSigma\"\n\n # Create a more restricted subset with only the objects where the to-be-correct flux\n # is not flagged.\n subset2 = [record for record in subset1 if not record.get(keys.flag)]\n\n # Check that we have enough data points that we have at least the minimum of degrees of\n # freedom specified in the config.\n if len(subset2) - 1 < self.config.minDegreesOfFreedom:\n raise RuntimeError(\"Only %d sources for calculation of aperture correction for '%s'; \"\n \"require at least %d.\"\n % (len(subset2), name, self.config.minDegreesOfFreedom+1))\n apCorrMap[fluxName] = ChebyshevBoundedField(bbox, numpy.ones((1,1), dtype=float))\n apCorrMap[fluxSigmaName] = ChebyshevBoundedField(bbox, numpy.zeros((1,1), dtype=float))\n continue\n\n # If we don't have enough data points to constrain the fit, reduce the order until we do\n ctrl = self.config.fitConfig.makeControl()\n while len(subset2) - ctrl.computeSize() < self.config.minDegreesOfFreedom:\n if ctrl.orderX > 0:\n ctrl.orderX -= 1\n if ctrl.orderY > 0:\n ctrl.orderY -= 1\n\n # Fill numpy arrays with positions and the ratio of the reference flux to the to-correct flux\n x = numpy.zeros(len(subset2), dtype=float)\n y = numpy.zeros(len(subset2), dtype=float)\n apCorrData = numpy.zeros(len(subset2), dtype=float)\n indices = numpy.arange(len(subset2), dtype=int)\n for n, record in enumerate(subset2):\n x[n] = record.getX()\n y[n] = record.getY()\n apCorrData[n] = record.get(self.refFluxKeys.flux)/record.get(keys.flux)\n\n for _i in range(self.config.numIter):\n\n # Do the fit, save it in the output map\n apCorrField = ChebyshevBoundedField.fit(bbox, x, y, apCorrData, ctrl)\n\n # Compute errors empirically, using the RMS difference between the true reference flux and the\n # corrected to-be-corrected flux.\n apCorrDiffs = apCorrField.evaluate(x, y)\n apCorrDiffs -= apCorrData\n apCorrErr = numpy.mean(apCorrDiffs**2)**0.5\n\n # Clip bad data points\n apCorrDiffLim = self.config.numSigmaClip * apCorrErr\n keep = numpy.fabs(apCorrDiffs) <= apCorrDiffLim\n x = x[keep]\n y = y[keep]\n apCorrData = apCorrData[keep]\n indices = indices[keep]\n\n # Final fit after clipping\n apCorrField = ChebyshevBoundedField.fit(bbox, x, y, apCorrData, ctrl)\n\n self.log.info(\"Aperture correction for %s: RMS %f from %d\" %\n (name, numpy.mean((apCorrField.evaluate(x, y) - apCorrData)**2)**0.5, len(indices)))\n\n # Save the result in the output map\n # The error is constant spatially (we could imagine being\n # more clever, but we're not yet sure if it's worth the effort).\n # We save the errors as a 0th-order ChebyshevBoundedField\n apCorrMap[fluxName] = apCorrField\n apCorrErrCoefficients = numpy.array([[apCorrErr]], dtype=float)\n apCorrMap[fluxSigmaName] = ChebyshevBoundedField(bbox, apCorrErrCoefficients)\n\n # Record which sources were used\n for i in indices:\n subset2[i].set(keys.used, True)\n\n return Struct(\n apCorrMap = apCorrMap,\n )", "title": "" }, { "docid": "a6f1a7283ff8e5ea4659b1b177fe4d89", "score": "0.5040674", "text": "def cylindrical_wave(antenna, frequency, slowness, coordinate=(0.0, 0.0)):\n\n x, y = antenna.get_xy()\n r = np.sqrt((x - coordinate[0]) ** 2 + (y - coordinate[1]) ** 2)\n wavenumber = 2 * np.pi * frequency * slowness\n focal = 1 / np.sqrt(r + 1e-6) * np.exp(-1j * wavenumber * r)\n return focal", "title": "" }, { "docid": "dce5937de114847c1d809958e1ae2f7a", "score": "0.49488792", "text": "def run_measurement(self, **kw):\n run_complement = kw.get('run_complement', True)\n\n # get qubits for setting temporary values\n qb_in_exp = self.find_qubits_in_tasks(self.qubits, self.task_list)\n temp_vals = []\n\n # analysis_kwargs to be passed to the DriveAmpCalib measurements\n ana_kw = self.init_kwargs.pop('analysis_kwargs_da_calib', {})\n opt_dict = ana_kw.pop('options_dict', {})\n for i, npp in enumerate(self.n_pulses_pi):\n if npp in [1, 2]:\n with temporary_value(*temp_vals):\n od = {}\n if npp == 1 and 'fit_t2_r' not in opt_dict:\n # do not fit T2 in this case\n od['fit_t2_r'] = False\n od.update(opt_dict)\n analysis_kwargs = {'options_dict': od}\n analysis_kwargs.update(ana_kw)\n DACalib = NPulseAmplitudeCalib(\n task_list=self.task_list,\n sweep_points=self.sweep_points,\n qubits=self.qubits,\n n_repetitions=self.n_repetitions,\n n_pulses_pi=npp,\n analysis_kwargs=analysis_kwargs,\n **self.init_kwargs)\n self.measurements += [DACalib]\n\n # set the corrections from this measurement as temporary values\n # for the next measurements\n if npp == 1:\n temp_vals = []\n for qb in qb_in_exp:\n amp180 = qb.ge_amp180() # current amp180\n # we need to adjust the amp90_scale as well since the\n # previously calibrated value is with respect to the\n # current amp180\n amp90_sc = qb.ge_amp90_scale() # current amp90_scale\n amp90 = amp180 * amp90_sc # calibrated amp90\n # calibrated amp180\n corr_amp180 = DACalib.analysis.proc_data_dict[\n 'analysis_params_dict'][qb.name][\n 'correct_scalings_mean'] * amp180\n # adjust amp90_scale based on the calibrated amp180\n corr_amp90_sc = amp90 / corr_amp180\n temp_vals.extend([(qb.ge_amp180, corr_amp180),\n (qb.ge_amp90_scale, corr_amp90_sc)])\n else:\n temp_vals = [\n (qb.ge_amp90_scale, DACalib.analysis.proc_data_dict[\n 'analysis_params_dict'][qb.name][\n 'correct_scalings_mean'])\n for qb in qb_in_exp]\n else:\n od = {}\n if 'fit_t2_r' not in opt_dict:\n # do not fit T2 in this case\n od['fit_t2_r'] = False\n od.update(opt_dict)\n analysis_kwargs = {'options_dict': od}\n analysis_kwargs.update(ana_kw)\n with temporary_value(*temp_vals):\n # measure for 1/npp\n DACalib = NPulseAmplitudeCalib(\n task_list=self.task_list,\n sweep_points=self.sweep_points,\n qubits=self.qubits,\n n_repetitions=self.n_repetitions,\n n_pulses_pi=npp,\n analysis_kwargs=analysis_kwargs,\n **self.init_kwargs)\n self.measurements += [DACalib]\n\n if run_complement:\n # measure for 1 - 1/npp\n tl = []\n for j, task in enumerate(self.task_list):\n # set the fixed_scaling to the calibrated value of\n # 1/npp from the previous measurement\n fixed_scaling = DACalib.analysis.proc_data_dict[\n 'analysis_params_dict'][task['qb']][\n 'correct_scalings_mean']\n tl_dict = {'fixed_scaling': fixed_scaling}\n tl_dict.update(task)\n tl += [tl_dict]\n with temporary_value(*temp_vals):\n DACalib = NPulseAmplitudeCalib(\n task_list=tl,\n sweep_points=self.sweep_points,\n qubits=self.qubits,\n n_repetitions=self.n_repetitions,\n n_pulses_pi=npp,\n analysis_kwargs=ana_kw,\n **self.init_kwargs)\n self.measurements += [DACalib]", "title": "" }, { "docid": "fd9eb670960efa8f9d0dd6b3839357ab", "score": "0.49342167", "text": "def cylindrical(antenna, frequency, slowness, coordinate=(0.0, 0.0)):\n\n x, y = antenna.get_xy()\n r = np.sqrt((x - coordinate[0]) ** 2 + (y - coordinate[1]) ** 2)\n wavenumber = 2 * np.pi * frequency * slowness\n focal = 1 / np.sqrt(r + 1e-6) * np.exp(-1j * wavenumber * r)\n covariance = xouter(focal)\n return covariance.view(ap.CovarianceMatrix).astype(complex)", "title": "" }, { "docid": "1a0adb9eaae3eb09eaaf5c708725af4b", "score": "0.48578247", "text": "def solve_cdma_sys_6(freq,r,c,null_angles,array_th):\n A = np.zeros((6,6),dtype=np.complex128)\n\n #These values are fixed, and will always be like this for this to work\n c1 = np.array([0,1,0,0,0,1]) # H2(omega) == H6(omega)\n c2 = np.array([0,0,1,0,1,0]) # H3(omega) == H5(omega)\n b = np.array([[1],[0],[0],[0],[0],[0]])\n\n A[0,:] = np.conj(ut.find_steering_vector_t(freq,r,c,array_th,0))\n A[1,:] = np.conj(ut.find_steering_vector_t(freq,r,c,array_th,null_angles[0]))\n A[2,:] = np.conj(ut.find_steering_vector_t(freq,r,c,array_th,null_angles[1]))\n A[3,:] = np.conj(ut.find_steering_vector_t(freq,r,c,array_th,null_angles[2]))\n A[4,:] = c1\n A[5,:] = c2\n\n H = (np.matmul(la.inv(A),b))\n H = H.flatten()\n return H", "title": "" }, { "docid": "8d84c6123daef1bad58c2e12cf88022e", "score": "0.48518595", "text": "def make_signal(self, waveform):\n\n # --- Set up timing\n\n # index of the absolute maximum peak\n #idx = np.argmax(abs(waveform['hplus'].data.data))\n\n # Just make this the start of the waveform for now\n idx = 0\n\n # Epoch = GPS start of time series. Want the peak time of the waveform\n # to be aligned to the geocenter, so set the epoch to the geocentric\n # peak time minus the time to the waveform peak. In other words:\n # (waveform epoch) = (geocentric peak time) - (# of seconds to peak)\n\n waveform['hplus'].epoch = self.ext_params.geocent_peak_time \\\n - idx/float(waveform['Fs'])\n waveform['hcross'].epoch = self.ext_params.geocent_peak_time \\\n - idx/float(waveform['Fs'])\n\n # Apply tapering\n if self.taper:\n\n lalsim.SimInspiralREAL8WaveTaper(waveform['hplus'].data,\n lalsim.SIM_INSPIRAL_TAPER_STARTEND)\n lalsim.SimInspiralREAL8WaveTaper(waveform['hcross'].data,\n lalsim.SIM_INSPIRAL_TAPER_STARTEND)\n\n\n # Scale for distance\n waveform['hplus'].data.data *= waveform['Dref'] / self.ext_params.distance\n waveform['hcross'].data.data *= waveform['Dref'] / self.ext_params.distance\n\n # Inclination dependence (see e.g.,\n # http://arxiv.org/abs/gr-qc/0308050v1), or any pulsar paper)\n waveform['hplus'].data.data *= 0.5 * ( 1.0 +\n np.cos(self.ext_params.inclination)*np.cos(self.ext_params.inclination) )\n waveform['hcross'].data.data *= np.cos(self.ext_params.inclination)\n\n\n # This function computes antenna factors every 250 ms and should be\n # perfect for our purposes\n tmp = lalsim.SimDetectorStrainREAL8TimeSeries(waveform['hplus'],\n waveform['hcross'], self.ext_params.ra, self.ext_params.dec,\n self.ext_params.polarization, self.det_site) \n\n #print waveform['hplus'].epoch\n #print self.det_site\n #print tmp.epoch\n\n # Project waveform onto these extrinsic parameters\n self.td_signal = \\\n pycbc.types.timeseries.TimeSeries(initial_array=tmp.data.data,\n delta_t=tmp.deltaT, epoch=tmp.epoch)\n\n self.epoch = tmp.epoch\n\n # Aplly scale factor \n self.td_signal *= self.scale_factor", "title": "" }, { "docid": "84797978c2c5a6339255e070623f874e", "score": "0.4850122", "text": "def scalp_coupling_index(\n raw,\n l_freq=0.7,\n h_freq=1.5,\n l_trans_bandwidth=0.3,\n h_trans_bandwidth=0.3,\n verbose=False,\n):\n _validate_type(raw, BaseRaw, \"raw\")\n picks = _validate_nirs_info(raw.info, fnirs=\"od\", which=\"Scalp coupling index\")\n\n raw = raw.copy().pick(picks).load_data()\n zero_mask = np.std(raw._data, axis=-1) == 0\n filtered_data = raw.filter(\n l_freq,\n h_freq,\n l_trans_bandwidth=l_trans_bandwidth,\n h_trans_bandwidth=h_trans_bandwidth,\n verbose=verbose,\n ).get_data()\n\n sci = np.zeros(picks.shape)\n for ii in range(0, len(picks), 2):\n with np.errstate(invalid=\"ignore\"):\n c = np.corrcoef(filtered_data[ii], filtered_data[ii + 1])[0][1]\n if not np.isfinite(c): # someone had std=0\n c = 0\n sci[ii] = c\n sci[ii + 1] = c\n sci[zero_mask] = 0\n sci = sci[np.argsort(picks)] # restore original order\n return sci", "title": "" }, { "docid": "c619451dbfdbea50f7e49d883cf1c5a4", "score": "0.48319927", "text": "def test_45_amplitude_calibration_solutions_field_1():\n\tcasalog.origin(\"test_45_amplitude_calibration_solutions_field_1\")\n\tcasalog.post(\"starting\")\n\n\tgaincal(vis='G192_flagged_6s.ms', caltable='calG192.G2.2', \\\n\t field='1', refant='ea05', solnorm=F, \\\n\t gaintable=['calG192.antpos', 'calG192.requantizer', 'calG192.gaincurve', \\\n\t 'calG192.opacity', 'calG192.K0.b.2', \\\n\t 'calG192.B0.b.2', 'calG192.G1.int.2'], \\\n\t gainfield=['', '', '', '', '3', '3', '1'], \\\n\t interp=['', '', '', '', 'nearest', 'nearest', 'nearest'], \\\n\t solint='inf', gaintype='G', calmode='a', append=True)", "title": "" }, { "docid": "fa5be338650174c0010f13c72b1ad290", "score": "0.48291108", "text": "def ct_calibrate(photons, material, sinogram, scale, correct=True):\n\n\t# Get dimensions and work out detection for just air of twice the side\n\t# length (has to be the same as in ct_scan.py)\n\tif len(sinogram.shape) > 1:\n\t\tn = sinogram.shape[1]\n\t\tangles = sinogram.shape[0]\n\telse:\n\t\tn = sinogram.shape[0]\n\n\t# perform calibration\n\tcalibration_value = ct_detect(photons, material.coeff('Air'), 2*scale*n)\n\t# Use log to turn intensities into attenuations\n\tcalibrated_sinogram = -np.log(sinogram / calibration_value)\n\n\tdepths = np.logspace(-3,3,1000)\n\n\twater_calibration = ct_detect(photons, material.coeff('Water'), depths)\n\twater_calibration_air = ct_detect(photons, material.coeff('Air'), depths)\n\n\twater_coeff = -np.log(water_calibration/water_calibration_air)\n\tfn = scipy.interpolate.interp1d(water_coeff, depths, fill_value = 'extrapolate')\n\n\treturn fn(calibrated_sinogram)", "title": "" }, { "docid": "b27d617c66ba541e334ac7f14bc81f79", "score": "0.48215657", "text": "def __calibrationWaveSolution(self):\n my_name = '__calibrationWaveSolution'\n # check that transient members are present\n if not (hasattr(self, \"_Spectrum__calibration_peaks_x\") and hasattr(self, \"_Spectrum__calibration_peaks_y\") and hasattr(self, \"_Spectrum__calibration_peaks_wl\")):\n plt.close()\n raise SpectrumNameError(my_name, \"One or more of __calibration_peaks_x, __calibration_peaks_y or __calibration_peaks_wl are missing.\")\n\n # check that transient members are present\n if (len(self.__calibration_peaks_x) < 3):\n plt.close()\n raise SpectrumCalibrationError(my_name, \"Could not compute wavelength solution: Three or more points are needed to calibrate\")\n\n # save calibration inputs\n self.__saveCalibrationPeaks()\n\n # compute the wavelength solution\n self.__wave_solution = np.poly1d(np.polyfit(self.__calibration_peaks_x, self.__calibration_peaks_wl, 3))", "title": "" }, { "docid": "2bcd7df10102c30d020185de6ed299c0", "score": "0.4807967", "text": "def test_44_amplitude_calibration_solutions_field_0():\n\tcasalog.origin(\"test_44_amplitude_calibration_solutions_field_0\")\n\tcasalog.post(\"starting\")\n\n\tgaincal(vis='G192_flagged_6s.ms', caltable='calG192.G2.2', \\\n\t field='0', refant='ea05', solnorm=F, \\\n\t gaintable=['calG192.antpos', 'calG192.requantizer', 'calG192.gaincurve', \\\n\t 'calG192.opacity', 'calG192.K0.b.2', \\\n\t 'calG192.B0.b.2', 'calG192.G1.int.2'], \\\n\t gainfield=['', '', '', '', '3', '3', '0'], \\\n\t interp=['', '', '', '', 'nearest', 'nearest', 'nearest'], \\\n\t solint='inf', gaintype='G', calmode='a')", "title": "" }, { "docid": "a55d8b72c22cc79459c9ae637682b1cd", "score": "0.4787873", "text": "def ct_calibrate(photons, material, sinogram, scale, correct=True):\n\n\t# Get dimensions and work out detection for just air of twice the side\n\t# length (has to be the same as in ct_scan.py)\n\n\tn = sinogram.shape[1]\n\n\t# work out value of a sinogram point of air\n\tv = ct_detect(photons, material.coeff('Air'), 2*n*scale,1)[0]\n\t\n\t# construct sinogram of air\n\tsinogram_air = v * np.ones(sinogram.shape)\n\t\n\t# perform calibration\n\tsinogram = -np.log( np.divide(sinogram, sinogram_air))\n\n\n\treturn sinogram", "title": "" }, { "docid": "9374964ad45102ff7b307bf9cb0a8173", "score": "0.47851512", "text": "def analyze(data, wavelet=DEFAULT_WAVELET, scales=None, dt=1., dj=0.125, \n mask_coi=False, frequency=False, axis=-1):\n n = data.shape[axis]\n if scales is None:\n s0 = find_s0(wavelet, dt)\n scales = find_optimal_scales(s0, dt, dj, n)\n if frequency:\n scalogram = cwt_tc_frequency_jit(data, wavelet.frequency, scales, dt, axis)\n else:\n scalogram = cwt_tc_time_jit(data, wavelet.time, scales, dt, axis)\n scales = jnp.asarray(scales)\n return WaveletAnalysis(data=data, wavelet=wavelet, \n dt=dt, dj=dj, mask_coi=mask_coi,\n frequency=frequency, axis=axis, scales=scales, \n scalogram=scalogram)", "title": "" }, { "docid": "456a118443822fbe8ea5b78105960526", "score": "0.47669578", "text": "def bandpass_correct(self):\n info(\"Applying scalar B-Jones amplitudes\")\n # Read in the file\n bjones_inp = np.loadtxt(self.bandpass_table,dtype=str)\n self.bpass_input_freq = bjones_inp[0][1:].astype(np.float64)\n self.bpass_input_freq *= 1e9 # convert from GHz to Hz\n self.bjones_ampl = bjones_inp[1:,1:].astype(np.float64)\n \n\n # Interpolate between the frequencies given in the bandpass table\n if self.bpass_input_freq[0] > self.chan_freq[0] or self.bpass_input_freq[-1] < self.chan_freq[-1]:\n warn(\"Input frequencies out of range of MS frequencies. Extrapolating in some places.\")\n\n bjones_interpolated=np.zeros((self.Nant,self.chan_freq.shape[0]))\n for ant in range(self.Nant):\n spl = ius(self.bpass_input_freq, self.bjones_ampl[ant],k=self.bandpass_freq_interp_order)\n bjones_interpolated[ant] = spl(self.chan_freq)\n\n # apply the B-Jones terms by iterating over baselines\n for a0 in range(self.Nant):\n for a1 in range(a0+1,self.Nant):\n for msfreq_ind in range(self.chan_freq.shape[0]):\n bl_ind = self.baseline_dict[(a0,a1)]\n self.data[bl_ind,msfreq_ind,:] *= bjones_interpolated[a0,msfreq_ind] * bjones_interpolated[a1,msfreq_ind]\n self.save_data()\n\n\n ### plot bandpasses", "title": "" }, { "docid": "381d53d496cab29cb4e0dd577840b973", "score": "0.47035894", "text": "def get_cd_coef_6(r,c,array_th,null_angles,freqs,freq_min,freq_max):\n\n if len(null_angles) != 3:\n print(\"Error! Wrong Number of Null Angles.\")\n print(\"Current Number of Nulls is \", len(null_angles))\n\n n_elements = len(array_th)\n if (n_elements != 6):\n print(\"Error! Wrong Number of Array Elements.\")\n n_coef = len(freqs)\n\n freq_max_index = int(min([ f for f in freqs if f >= freq_max ])/freqs[1])\n freq_min_index = int(max([ f for f in freqs if f <= freq_min ])/freqs[1])\n\n #Compute the frequency domain representation\n Hw = np.zeros((n_elements,n_coef),dtype=np.complex128)\n for freq in range(n_coef):\n if (freqs[freq] > freq_min and freqs[freq] < freq_max):\n Hw[:,freq] = solve_cdma_sys_6(freqs[freq],r,c,null_angles,array_th)\n\n\n filter_end = freq_max_index\n filter_start = n_coef - filter_end + 1\n Hw[:,filter_start:] = np.flip(np.conj(Hw[:,1:filter_end]),axis=1)\n\n plt.plot(np.abs(Hw[1]),label=\"abs\")\n hw = nf.fftshift((nf.ifft(Hw)))\n hw = hw*sig.hamming(n_coef)\n\n return hw", "title": "" }, { "docid": "ac3ad0f176c1069d02005ed96da1f0d8", "score": "0.46992806", "text": "def calc_correction_per_input(cache, name, inp, product):\n product_sensor = get_cal_product(cache, attrs, product)\n try:\n index = cal_input_map[inp]\n except KeyError:\n raise KeyError(\"No calibration solutions available for input \"\n \"'{}' - available ones are {}\"\n .format(inp, sorted(cal_input_map.keys())))\n if product == 'K':\n correction_sensor = calc_delay_correction(product_sensor, index,\n data_freqs)\n elif product == 'B':\n correction_sensor = calc_bandpass_correction(product_sensor, index,\n data_freqs, cal_freqs)\n elif product == 'G':\n correction_sensor = calc_gain_correction(product_sensor, index)\n else:\n raise KeyError(\"Unknown calibration product '{}'\".format(product))\n cache[name] = correction_sensor\n return correction_sensor", "title": "" }, { "docid": "3b7e65ffce4cad94b2c99fbfa250a351", "score": "0.46895957", "text": "def test_46_amplitude_calibration_solutions_field_3():\n\tcasalog.origin(\"test_46_amplitude_calibration_solutions_field_3\")\n\tcasalog.post(\"starting\")\n\n\tgaincal(vis='G192_flagged_6s.ms', caltable='calG192.G2.2', \\\n\t field='3', refant='ea05', solnorm=F, \\\n\t gaintable=['calG192.antpos', 'calG192.requantizer', 'calG192.gaincurve', \\\n\t 'calG192.opacity', 'calG192.K0.b.2', \\\n\t 'calG192.B0.b.2', 'calG192.G1.int.2'], \\\n\t gainfield=['', '', '', '', '3', '3', '3'], \\\n\t interp=['', '', '', '', 'nearest', 'nearest', 'nearest'], \\\n\t solint='inf', gaintype='G', calmode='a', append=True)", "title": "" }, { "docid": "840982b25bb415e0ab847b84d0968405", "score": "0.46889645", "text": "def detect_freqs(self):\r\n channel_avgs = []\r\n differences = []\r\n for i in range(config.settings[\"devices\"][self.board][\"configuration\"][\"N_FFT_BINS\"]):\r\n channel_avgs.append(sum(self.freq_channels[i])/len(self.freq_channels[i]))\r\n differences.append(((self.freq_channels[i][0]-channel_avgs[i])*100)//channel_avgs[i])\r\n for i in [\"beat\", \"low\", \"mid\", \"high\"]:\r\n if any(differences[j] >= self.min_percent_diff[i]\\\r\n and self.freq_channels[j][0] >= self.min_detect_amplitude[i]\\\r\n for j in range(*self.detection_ranges[i]))\\\r\n and (time.time() - self.prev_freq_detects[i] > 0.1)\\\r\n and len(self.freq_channels[0]) == self.freq_channel_history:\r\n self.prev_freq_detects[i] = time.time()\r\n self.current_freq_detects[i] = True\r\n #print(i)\r\n else:\r\n self.current_freq_detects[i] = False", "title": "" }, { "docid": "89d3a14572ed42ddb79f418f77654899", "score": "0.46814007", "text": "def couplecalc(hqs,vecs,**kwargs):\n\n\t#---default gets the functional form from tweak but could be overriden\n\tenergy_functional = kwargs.get('energy_functional',tweak['energy_form'])\n\n\tfallback = kwargs.get('fallback',False)\n\tlenscale = kwargs.get('lenscale',1.0)\n\tbinner = kwargs.get('binner','explicit')\n\t#---if no incoming curvatures then we use the zero field\n\t#---identified a major error here: cqs = kwargs.get('cqs',fft_field(np.zeros(hqs.shape)))\n\t#---note the misleading naming\n\tcfs = kwargs.get('cfs',np.zeros(hqs.shape))\n\tcqs = fft_field(np.array(cfs))\n\n\t#---identify the wavevectors\n\tnframes = len(vecs)\n\tm,n = mn = np.shape(hqs)[1:]\n\tLx,Ly = np.mean(vecs,axis=0)[:2]\n\tq2d = lenscale*np.array([[np.sqrt(\n\t\t((i-m*(i>m/2))/((Lx)/1.)*2*np.pi)**2+\n\t\t((j-n*(j>n/2))/((Ly)/1.)*2*np.pi)**2)\n\t\tfor j in range(0,n)] for i in range(0,m)])\n\tq_raw = np.reshape(q2d,-1)[1:]\n\tarea = (Lx*Ly/lenscale**2)\n\tbinner_function = {\n\t\t'blurry':blurry_binner,\n\t\t'perfect':perfect_collapser,\n\t\t'explicit':lambda x:x,\n\t\t}[binner]\n\n\t#---on the fallbacks we might have different numbers of frames\n\tif len(hqs) != len(cqs):\n\t\tassert fallback\n\t\tcqs = cqs[np.array([i%len(cqs) for i in range(len(hqs))])]\n\t\tassert len(hqs)==len(cqs)\n\n\t#---crucial steps\n\tif tweak.get('fft_flag','complex')=='complex': \n\t\tdef multipliers(x,y):\n\t\t\t#---use the fact that all real-valued functions are symmetric under FFT\n\t\t\treturn x*np.conjugate(y)\n\t\ttermlist = [multipliers(x,y) for x,y in [(hqs,hqs),(hqs,cqs),(cqs,hqs),(cqs,cqs)]]\n\telse: raise\n\t#---take the ensemble average, convert the matrix into a vector, and drop the zeroth-order term\n\ttermlist = [np.reshape(np.mean(k,axis=0),-1)[1:] for k in termlist]\n\t#---we confirm that the incoming functions obey the FFT before casting to real below\n\tfor t in [termlist[0],termlist[1]+termlist[2],termlist[3]]:\n\t\tassert np.all(np.absolute(np.imag(t))<machine_eps)\n\ttermlist = [np.real(k) for k in termlist]\n\t\n\t#---equation 23\n\tdef energy_raw(kappa,gamma):\n\n\t\t\"\"\"\n\t\tDefine the energy.\n\t\tThis function inherits the wavevector.\n\t\tShould the function be generic, or is it necessary to pack it up to optimize it later?\n\t\t\"\"\"\n\n\t\tsignterm = tweak.get('inner_sign',-1.0)\n\t\t#---included the half-factor for hcv3 v28,29 and removed for v30\n\t\tcurv = (kappa*area*(termlist[0]*q_raw**4+signterm*termlist[1]*q_raw**2\n\t\t\t+signterm*termlist[2]*q_raw**2+termlist[3])\n\t\t\t#---removed square on ther first term in front of the tension term\n\t\t\t+gamma*area*(termlist[0]*q_raw**2))\n\t\treturn curv\n\n\tdef energy(kappa,gamma,vibe):\n\n\t\t\"\"\"\n\t\tCorrected energy function.\n\t\t\"\"\"\n\n\t\tspectrum = energy_functional(energy_raw,q_raw,kappa,gamma,0)\n\t\treturn spectrum\n\n\t#---export\n\tout = {\n\t\t'area':area,\n\t\t'q2d':q2d,\n\t\t'wavevectors':q_raw,\n\t\t'energy':energy,\n\t\t'binner':binner_function,\n\t\t'grid':(m,n),\n\t\t#---HACKED then un-hacked because it was trying to send the function to store\n\t\t#'energy_raw':energy_raw,\n\t\t}\n\n\t#---return the raw energy function for debugging\n\tout_raw = kwargs.get('out_raw',False)\n\tif out_raw: out['energy_raw'] = energy_raw\n\treturn out", "title": "" }, { "docid": "a9804bf19d6d88f158c8361af1b59d55", "score": "0.4661667", "text": "def apply_charge_fluctuation(sns_df: pd.DataFrame, DataSiPM_idx: pd.DataFrame):\n\n def rand_normal(sig):\n return np.random.normal(0, sig)\n\n pe_resolution = DataSiPM_idx.Sigma / DataSiPM_idx.adc_to_pes\n pe_resolution = pe_resolution.reset_index().rename(columns={'SensorID': 'sensor_id'})\n sns_df = sns_df.join(pe_resolution.set_index('sensor_id'), on='sensor_id')\n sns_df.rename(columns={0:'pe_res'}, inplace=True)\n\n sns_df['charge'] += np.apply_along_axis(rand_normal, 0, sns_df.pe_res)\n\n columns = ['event_id', 'sensor_id', 'charge']\n\n return sns_df.loc[sns_df.charge > 0, columns]", "title": "" }, { "docid": "fcf6a450b7454d22e1092e74101d295e", "score": "0.46573895", "text": "def process(self):\n\n self.get_dataframe()\n d = self.d\n\n # Skip a chunk of data at the start, where Weird Things(TM) sometimes\n # happen\n d.iloc[:INIT_SKIP] = np.nan\n\n # Mask erroneous values in the sensitivity\n d.loc[d['AL52CO_sens'] <= SENS_CUTOFF, 'AL52CO_sens'] = np.nan\n d['AL52CO_sens'].fillna(method='bfill', inplace=True)\n d['AL52CO_sens'].fillna(method='ffill', inplace=True)\n\n # Mask erroneous values in the zero\n d.loc[d['AL52CO_zero'] == 0, 'AL52CO_zero'] = np.nan\n d['AL52CO_zero'].fillna(method='bfill', inplace=True)\n\n # Build a flag indicating where the sensitivity and zero have changed\n # after a calibration, with a 2 sec safety buffer\n d['CAL_FLAG'] = d.AL52CO_sens.diff() != 0\n indicies = np.where(d.CAL_FLAG != 0)[0]\n indicies_p2 = indicies + 2\n d.loc[d.index[indicies_p2], 'CAL_FLAG'] = 1\n d.loc[d.index[indicies], 'CAL_FLAG'] = 0\n\n # Interpolate the zero\n flagged_avg(d, 'CAL_FLAG', 'AL52CO_zero', out_name='ZERO', interp=True)\n d.ZERO.fillna(method='bfill', inplace=True)\n\n # Interpolate the sensitivity\n flagged_avg(d, 'CAL_FLAG', 'AL52CO_sens', out_name='SENS', interp=True)\n d.SENS.fillna(method='bfill', inplace=True)\n\n # Calculate concentration using interpolated sens & zero\n d['CO_AERO'] = (d.AL52CO_counts - d.ZERO) / d.SENS\n\n # Flag build the qa flag dataframe\n flag_df, flag_descs = self.flag()\n\n # AL52CO output\n co_out = DecadesVariable(d['CO_AERO'], name='CO_AERO',\n flag=DecadesBitmaskFlag)\n\n # Add flagging to the output\n for mask in flag_df.columns:\n co_out.flag.add_mask(flag_df[mask].values, mask, flag_descs[mask])\n\n # Write output\n self.add_output(co_out)", "title": "" }, { "docid": "e11fc2a6fdc7709e7e155424211d4871", "score": "0.46391523", "text": "def calculate_adc(dwi_array, bvals_array):\n z,y,x,b = np.shape(dwi_array)\n\n\n THRESHOLD_FRACTION = 0.004\n threshold = int(THRESHOLD_FRACTION * np.max(dwi_array))\n\n y_vals = np.transpose(dwi_array)\n y_vals = np.reshape(y_vals, (b, x*y*z))\n\n # threshold the values\n y_vals[y_vals <= threshold] = 0\n\n # create masked array for log values to handle log(0) cases\n logy = np.ma.log(y_vals)\n\n # Free y_vals\n y_vals = None\n\n mask = np.ma.getmask(logy)\n logy.filled(0)\n\n # calculate ADC for each point in volume\n # do the polyfit in chunks to save memory\n adc_map = np.zeros(logy.shape[1])\n for i in range(1+logy.shape[1]/(POLYFIT_SIZE)):\n s = i*POLYFIT_SIZE\n e = min((i+1)*POLYFIT_SIZE, logy.shape[1])\n if s == e:\n break\n # ADC is first coeff in polyfit solve\n adc_map[s:e] = np.polyfit(bvals_array, logy[:, s:e], 1)[0]\n\n # get rid of values where log(y) would've been nan\n for i in range(np.shape(mask)[0]):\n adc_map[mask[i] == True] = 0\n\n adc_map = np.reshape(adc_map, (x, y, z))\n adc_map = np.transpose(adc_map)\n\n # clean up ADC values\n adc_map *= -1\n adc_map[np.isnan(adc_map)] = 0\n adc_map[adc_map < 0] = 0\n\n return adc_map", "title": "" }, { "docid": "85f58e7a6978e9d9ad4b6ccdcfb87b90", "score": "0.46197724", "text": "def ffcalc(a, freq=None):\r\n if freq==None: freq=32000\r\n corr=sc.correlate(a,a,mode='same')\r\n corr=corr[(len(corr)/2):(len(corr)-len(corr)/4)]\r\n dat=np.diff(np.where(np.diff(corr)>0,1,0))\r\n out=float(freq)/float(((list(dat)).index(-1)))\r\n return out", "title": "" }, { "docid": "acc1107078ee34621dfa0926ea63115c", "score": "0.46112573", "text": "def singleTempToRes (self, temp, channel, calIndex=-1):\n if calIndex == -1:\n calIndex = channel\n try:\n if self.calibrations[calIndex][0] == INTERPOLATION:\n # do the log-log interpolation in reverse\n return (np.exp(np.interp(np.log(temp),\n np.log(np.array(self.calibrations[calIndex][2][::-1])),\n np.log(np.array(self.calibrations[calIndex][1][::-1]))\n )))\n elif self.calibrations[calIndex][0] == VRHOPPING:\n return self.calibrations[calIndex][1]['K'] * np.exp((self.calibrations[calIndex][2]['K']/temp)**.25)\n elif self.calibrations[calIndex][0] == FUNCTION:\n # same as getSingleTemp, but use inverse instead of function\n t = temp\n return eval(self.calibrations[calIndex][2])\n elif eslf.calibrations[calIndex][0] == DEFAULT:\n if calIndex > 0:\n return self.singleTempToRes(temp, channel, 0) # use calibration 0\n else:\n return temp2res(temp) # if no calibration for the device either, use old-fashioned temp2res\n except Exception as e:\n print \"Exception converting temp to res: %s\" % e.__str__()\n return 0.0", "title": "" }, { "docid": "743f6a886dd337a163bcaa8e183784e6", "score": "0.45983043", "text": "def _get_coeff_ra(freq):\n coeff_ra_dict = _coeff_ra_table()\n\n freq_band = get_freq_band(freq)\n if (freq_band is not None) and (freq_band in coeff_ra_dict):\n return coeff_ra_dict[freq_band]\n\n if freq < 2e9:\n freq_band_aux = \"S\"\n elif freq > 12e9:\n freq_band_aux = \"X\"\n\n warn(\n \"Radar frequency out of range. \"\n + \"Coefficients only applied to S, C or X band. \"\n + freq_band_aux\n + \" band coefficients will be used.\"\n )\n\n return coeff_ra_dict[freq_band_aux]", "title": "" }, { "docid": "ebebba7d22d0983bdb8048de7dc44db2", "score": "0.45702434", "text": "def compute_sensfunc(self):\n\n meta_table, out_table = flux_calib.sensfunc(self.wave, self.counts, self.counts_ivar, self.counts_mask,\n self.meta_spec['EXPTIME'], self.meta_spec['AIRMASS'], self.std_dict,\n self.meta_spec['LONGITUDE'], self.meta_spec['LATITUDE'],\n self.meta_spec['ECH_ORDERS'],\n telluric=False, polyorder=self.par['polyorder'],\n balm_mask_wid=self.par['UVIS']['balm_mask_wid'],\n nresln=self.par['UVIS']['nresln'],\n resolution=self.par['UVIS']['resolution'],\n trans_thresh=self.par['UVIS']['trans_thresh'],\n polycorrect=self.par['UVIS']['polycorrect'],\n polyfunc=self.par['UVIS']['polyfunc'],\n debug=self.debug)\n # Add the algorithm to the meta_table\n meta_table['ALGORITHM'] = self.par['algorithm']\n\n self.steps.append(inspect.stack()[0][3])\n\n return meta_table, out_table", "title": "" }, { "docid": "f639c7867206601b12c9108663086fd0", "score": "0.45695922", "text": "def calib_FD_using_sensor_data(self, data_files, agg_interval, plot_meas=False):\n all_speed = []\n all_flow = []\n\n for data_f in data_files:\n\n if plot_meas is True:\n fig = plt.figure(figsize=(10, 7), dpi=100)\n ax_speed = fig.add_axes([0.1, 0.1, 0.8, 0.8])\n plt.hold(True)\n\n fig = plt.figure(figsize=(10, 7), dpi=100)\n ax_flow = fig.add_axes([0.1, 0.1, 0.8, 0.8])\n plt.hold(True)\n\n # ===========================================================================================\n # read the sensor data file\n if not exists(data_f):\n raise Exception('Error: sensor data for {0} does not exist.'.format(data_f))\n\n # timestamp(s), speed(kph), count(veh)\n data = np.genfromtxt(data_f, delimiter=',')\n detection_cycle = data[1, 0] - data[0, 0]\n\n # time stamp is shifted, hence the last row is not effective\n data = data[:-1, :]\n data[:, 0] = data[:, 0] + detection_cycle\n\n # ============================================\n # aggregate the data into aggregation intervals\n no_veh_idx = np.isinf(data[:, 1])\n data[no_veh_idx, 1] = 0.0\n data[no_veh_idx, 2] = 0.0\n missing_data_idx = np.isnan(data[:, 1]) | (data[:, 1] < 0)\n data[missing_data_idx, 1] = np.nan\n data[missing_data_idx, 2] = np.nan\n\n agg_steps = agg_interval / detection_cycle\n agg_data = []\n for i in range(0, int(data.shape[0] / agg_steps)):\n start_idx = int(i * agg_steps)\n end_idx = int((i + 1) * agg_steps)\n avg_speed = np.sum(data[start_idx:end_idx, 1] * data[start_idx:end_idx, 2]) / \\\n np.sum(data[start_idx:end_idx, 2])\n cum_count = np.sum(data[start_idx:end_idx, 2])\n agg_data.append([data[end_idx - 1, 0], avg_speed, cum_count])\n\n agg_data = np.array(agg_data)\n\n if plot_meas is True:\n # visualize the smoothing\n ax_speed.plot(data[:, 0], data[:, 1] / 1.609, color='b',\n linewidth=2, label='30 s')\n plt.hold(True)\n ax_speed.plot(agg_data[:, 0], agg_data[:, 1] / 1.609, color='r',\n linewidth=2, label='{0} s'.format(agg_interval))\n\n ax_flow.plot(data[:, 0], data[:, 2] * 3600.0 / detection_cycle, color='b',\n linewidth=2, label='30 s')\n plt.hold(True)\n ax_flow.plot(agg_data[:, 0], agg_data[:, 2] * 3600.0 / agg_interval, color='r',\n linewidth=2, label='{0} s'.format(agg_interval))\n\n print('aggregated {0} measurements to {1}'.format(data.shape[0], agg_data.shape[0]))\n data = agg_data\n\n # ============================================\n # clean data, remove inf, -1, and nan values, using the speed entry\n valid_idx = ~((np.isnan(data[:, 1]) | np.isinf(data[:, 1])) | (data[:, 1] < 0))\n cleaned_data = data[valid_idx, :]\n print('Got clean data points {0}/{1}'.format(cleaned_data.shape[0],\n data.shape[0]))\n\n # ============================================\n # convert speed from kph to mph, and count to flow\n cleaned_data[:, 1] = cleaned_data[:, 1] / 1.609\n cleaned_data[:, 2] = cleaned_data[:, 2] * 3600.0 / agg_interval\n\n # ============================================\n # append data points\n all_speed = np.concatenate([all_speed, cleaned_data[:, 1]])\n all_flow = np.concatenate([all_flow, cleaned_data[:, 2]])\n\n if plot_meas is True:\n ax_flow.set_title('flow {0}'.format(data_f.split('/')[-1]))\n ax_speed.set_title('speed {0}'.format(data_f.split('/')[-1]))\n\n # =============================================================\n # data set for calibration\n speed_array = all_speed.reshape(1, all_speed.size).squeeze()\n flow_array = all_flow.reshape(1, all_flow.size).squeeze()\n density_array = flow_array / speed_array\n\n # =============================================================\n # remove the noisy outlier point in a triangle\n # where v < 20 mph & w >= w_noise from point (0,0) and (rho_noise,0))\n v_thres = 40\n flow_thres = 500\n rho_noise = 300\n w_noise = -12\n rho_thres = 150\n\n noise_idx = (speed_array <= 1) | ((density_array < rho_noise) & (speed_array < v_thres))\n noisy_density = density_array[noise_idx]\n noisy_speed = speed_array[noise_idx]\n noisy_flow = flow_array[noise_idx]\n\n ff_index = (speed_array > v_thres) & (~noise_idx)\n cg_index = (speed_array <= v_thres) & (~noise_idx)\n rest_index = ~(ff_index | cg_index)\n\n ff_speed = speed_array[ff_index]\n ff_density = density_array[ff_index]\n ff_flow = flow_array[ff_index]\n\n cg_speed = speed_array[cg_index]\n cg_density = density_array[cg_index]\n cg_flow = flow_array[cg_index]\n\n # ===========================================================================================\n # fit a quadratic linear line to the data in the free flow regime\n funcQuadFit = lambda vm_beta, rho: vm_beta[0] * rho - np.power(rho, 2) * vm_beta[0] / vm_beta[1]\n\n # updated vresion, whic assumes beta is very large to get approximated TFD\n beta = 1000\n funcQuadFitVm = lambda vm, rho: vm * rho - np.power(rho, 2) * vm / beta\n funErr = lambda vm, rho, q: funcQuadFitVm(vm, rho) - q\n vm_init = [80] # initial guess of vm\n\n vm_est, success = optimize.leastsq(funErr, vm_init, args=(ff_density, ff_flow))\n\n all_vm = vm_est[0]\n # all_beta = vm_est[1]\n all_beta = beta\n print('vm:{0}; beta:{1}'.format(all_vm, all_beta))\n\n # ====================================================================\n # Fit a linear function to the congested regime\n preset_rhom = 500\n # funcLinearCong = lambda w_rhom, rho: w_rhom[0]*(rho-w_rhom[1])\n funcLinearCong = lambda w_rhom, rho: w_rhom[0] * (rho - preset_rhom)\n funErrCong = lambda w_rhom, rho, q: funcLinearCong(w_rhom, rho) - q\n\n w_rhom_init = [-10, 500.0]\n est_w_rhom, success = optimize.leastsq(funErrCong, w_rhom_init, args=(cg_density, cg_flow))\n wc = est_w_rhom[0]\n rho_m = preset_rhom\n print('wc:{0}'.format(wc))\n print(\n 'sqrt({0})'.format(np.power(wc * all_beta - all_vm * all_beta, 2) - 4 * all_vm * (-wc * all_beta * rho_m)))\n\n # compute rho_c and rho_m\n rho_c = (-(wc * all_beta - all_vm * all_beta) -\n np.sqrt(np.power(wc * all_beta - all_vm * all_beta, 2) - 4 * all_vm * (-wc * all_beta * rho_m))) / (\n 2 * all_vm)\n # rho_c = 150\n q_max = funcQuadFit([all_vm, all_beta], rho_c)\n\n # ====================================================================\n fig_window = plt.figure(figsize=(15, 8), dpi=100)\n fig = fig_window.add_subplot(111)\n\n # scatter freeflow\n plt.scatter(ff_density, ff_flow, color='g')\n dens = np.linspace(0, rho_c, 100)\n plt.plot(dens, funcQuadFit([all_vm, all_beta], dens), 'r-', linewidth=2.0)\n\n # scatter congestion\n plt.scatter(cg_density, cg_flow, color='k')\n dens = np.linspace(rho_c, rho_m, 100)\n plt.plot(dens, wc * (dens - rho_m), 'r-', linewidth=2.0)\n\n # plot rest points\n plt.scatter(noisy_density, noisy_flow, color='b')\n\n plt.title('Fundamental diagram, agg {0} s'.format(agg_interval), fontsize=24)\n plt.xlabel('Traffic density (veh/mile)', fontsize=24)\n plt.xlim([0, 800])\n plt.ylabel('Traffic flow (veh/hr)', fontsize=24)\n\n text_str = r'freeflow: $q = v_m\\rho - v_m\\rho^2/\\beta$' + '\\n' \\\n r'congflow: $q = w(\\rho - \\rho_m)$' + '\\n' + \\\n r' $v_m$= {0} mph ({1} m/s)'.format(np.round(all_vm, 2),\n np.round(all_vm * 1609.34 / 3600.0, 2)) + '\\n' + \\\n r' $\\beta$= {0} veh/mile ({1} veh/m)'.format(np.round(all_beta, 2),\n np.round(all_beta / 1609.34, 4)) + '\\n' + \\\n r' $w$= {0} mph ({1} m/s)'.format(np.round(wc, 2), np.round(wc * 1609.34 / 3600.0, 2)) + '\\n' + \\\n r' $\\rho_c$= {0} veh/mile ({1} veh/m)'.format(np.round(rho_c, 2),\n np.round(rho_c / 1609.34, 4)) + '\\n' + \\\n r' $\\rho_m$= {0} veh/mile ({1} veh/m)'.format(np.round(rho_m, 2),\n np.round(rho_m / 1609.34, 4)) + '\\n' + \\\n r' $q_m$= {0} veh/hr ({1} veh/s)'.format(np.round(q_max, 2), np.round(q_max / 3600.0, 4))\n\n anchored_text = AnchoredText(text_str, loc=1)\n fig.add_artist(anchored_text)\n\n plt.grid(True)\n plt.draw()\n\n # now plot the flow vs speed\n fig = plt.figure(figsize=(10, 7), dpi=100)\n ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])\n plt.scatter(ff_speed, ff_flow, color='g')\n plt.grid(True)\n plt.hold(True)\n\n plt.scatter(cg_speed, cg_flow, color='k')\n plt.grid(True)\n plt.xlabel('Speed (mph)')\n plt.ylabel('Flow (veh/hr)')\n plt.title('Flow speed sensor, agg {0} s'.format(agg_interval))\n\n plt.draw()", "title": "" }, { "docid": "73fd9c9c484865f55ef9eb0e2aed5c82", "score": "0.45611626", "text": "def calibrate(self, calib_ps, analyte_ratios=None):\n # can have calibration function stored in self and pass *coefs?\n if analyte_ratios is None:\n analyte_ratios = self.analyte_ratios\n\n if 'calibrated' not in self.data:\n self.data['calibrated'] = Bunch()\n\n for a in analyte_ratios:\n m = calib_ps[a]['m'].new(self.uTime)\n\n if 'c' in calib_ps[a]:\n c = calib_ps[a]['c'].new(self.uTime)\n else:\n c = 0\n\n self.data['calibrated'][a] = self.data['ratios'][a] * m + c\n self.filt.add_to_table(a)\n \n # initialise filtering framework\n # self._init_filts(self.analyte_ratios)\n\n self.setfocus('calibrated')\n return", "title": "" }, { "docid": "4aa653ce67f3ffbaba5516888915591a", "score": "0.4555984", "text": "def get_formants(time_frame, np_freqs):\n time_frame['formants'] = []\n valid_amps = []\n for i, freq in enumerate(np_freqs):\n if VOICE_MIN_FREQ < freq < VOICE_MAX_FREQ:\n amp = time_frame['amplitudes'][i]\n amp_min = utils.get_min_amp(freq)\n print(amp_min, amp)\n if amp > amp_min:\n valid_amps.append(amp)\n peaks = utils.get_peak_amps(valid_amps)\n # Get corresponding frequency of each peak amplitude.\n for i, freq in enumerate(np_freqs):\n if time_frame['amplitudes'][i] in peaks:\n time_frame['formants'].append(round(freq))\n return time_frame", "title": "" }, { "docid": "270b5a9148f4c97066c87aaf4abda43f", "score": "0.45486486", "text": "def raw_to_calibrated_temp(rawtemp, calib_vals):\n t_fine = raw_to_t_fine(rawtemp, calib_vals)\n deg_C = ((t_fine * 5 + 128) >> 8)/100.\n return deg_C", "title": "" }, { "docid": "ccc73d66799515c15f6358211c09a59b", "score": "0.4524065", "text": "def calc_surr_correlation(\n self, weightcalcdata, causevar, affectedvar, box, trials\n ):\n\n # The causal (or source) data is replaced by surrogate data,\n # while the affected (or destination) data remains unchanged.\n\n # Generate surrogate causal data\n thresh_causevardata = box[:, weightcalcdata.variables.index(causevar)][\n weightcalcdata.startindex : weightcalcdata.startindex\n + weightcalcdata.testsize\n ]\n\n # Get the causal data in the correct format\n # for surrogate generation\n original_causal = np.zeros((1, len(thresh_causevardata)))\n original_causal[0, :] = thresh_causevardata\n\n if self.surr_method == \"iAAFT\":\n surr_tsdata = [\n data_processing.gen_iaaft_surrogates(original_causal, 10)\n for n in range(trials)\n ]\n elif self.surr_method == \"random_shuffle\":\n surr_tsdata = [\n data_processing.shuffle_data(thresh_causevardata)\n for n in range(trials)\n ]\n\n surr_corr_list = []\n surr_dirindex_list = []\n for n in range(trials):\n # Compute the weightlist for every trial by evaluating all delays\n surr_weightlist = []\n for delay_index in weightcalcdata.sample_delays:\n thresh_affectedvardata = box[\n :, weightcalcdata.variables.index(affectedvar)\n ][\n weightcalcdata.startindex\n + delay_index : weightcalcdata.startindex\n + weightcalcdata.testsize\n + delay_index\n ]\n surr_weightlist.append(\n self.calcweight(\n surr_tsdata[n][0, :], thresh_affectedvardata\n )[0][0]\n )\n\n _, maxcorr, _, _, _, directionindex, _ = self.select_weights(\n weightcalcdata, causevar, affectedvar, surr_weightlist\n )\n\n surr_corr_list.append(abs(maxcorr))\n surr_dirindex_list.append(directionindex)\n\n return surr_corr_list, surr_dirindex_list", "title": "" }, { "docid": "592f46ac888a4d494861c5f482f970ad", "score": "0.452332", "text": "def set_selection(self, time, freqs, blarr, calname='', radec=(), dist=1, spwind=[], pols=['XX','YY']):\n\n self.spwind = spwind\n if calname:\n self.logger.warn('calname option not used for casa_sol. Applied based on radec.')\n\n # define pol index\n if 'X' in ''.join(pols) or 'Y' in ''.join(pols):\n polord = ['XX', 'YY']\n elif 'R' in ''.join(pols) or 'L' in ''.join(pols):\n polord = ['RR', 'LL']\n self.polind = [polord.index(pol) for pol in pols]\n\n self.ant1ind = [n.where(ant1 == n.unique(blarr))[0][0] for (ant1,ant2) in blarr]\n self.ant2ind = [n.where(ant2 == n.unique(blarr))[0][0] for (ant1,ant2) in blarr]\n\n # select by smallest time distance for source within some angular region of target\n if radec:\n ra, dec = radec\n calra = n.array(self.radec)[:,0]\n caldec = n.array(self.radec)[:,1]\n fields = n.where( (n.abs(calra - ra) < n.radians(dist)) & (n.abs(caldec - dec) < n.radians(dist)) )[0]\n if len(fields) == 0:\n self.logger.warn('Warning: no close calibrator found. Removing radec restriction.')\n fields = n.unique(self.uniquefield)\n else:\n fields = n.unique(self.uniquefield)\n\n sel = []\n for field in fields:\n sel += list(n.where(field == self.uniquefield)[0])\n mjddist = n.abs(time - self.uniquemjd[sel])\n closestgain = n.where(mjddist == mjddist.min())[0][0]\n\n self.logger.info('Using gain solution for field %d at MJD %.5f, separated by %d min ' % (self.uniquefield[n.where(self.uniquemjd == self.uniquemjd[sel][closestgain])], self.uniquemjd[closestgain], mjddist[closestgain]*24*60))\n self.gain = self.gain.take(self.spwind, axis=2).take(self.polind, axis=3)[closestgain]\n\n if hasattr(self, 'bandpass'):\n bins = [n.where(n.min(n.abs(self.bpfreq-selfreq)) == n.abs(self.bpfreq-selfreq))[0][0] for selfreq in freqs]\n self.bandpass = self.bandpass.take(bins, axis=1).take(self.polind, axis=2)\n self.freqs = freqs\n self.logger.debug('Using bandpass at BP bins (1000 bins per spw): %s', str(bins))", "title": "" }, { "docid": "864ccc5cf002bd91efa55bea4821fe10", "score": "0.45135692", "text": "def evaluate(self, temperature: float, frequency: float) -> complex:", "title": "" }, { "docid": "68b2a0760a76b9d41acdd810f51a011d", "score": "0.45130193", "text": "def _perform(self):\n self.logger.info(\"Solving individual arc spectra\")\n # plot control booleans\n master_inter = (self.config.instrument.plot_level >= 2)\n do_inter = (self.config.instrument.plot_level >= 3)\n # output control\n verbose = (self.config.instrument.verbose > 1)\n\n # Bar statistics\n bar_sig = []\n bar_nls = []\n # set thresh for finding lines\n hgt = 50.\n self.logger.info(\"line thresh = %.2f\" % hgt)\n # get relevant part of atlas spectrum\n atwave = self.action.args.refwave[self.action.args.atminrow:\n self.action.args.atmaxrow]\n atspec = self.action.args.reflux[self.action.args.atminrow:\n self.action.args.atmaxrow]\n # convert list into ndarray\n at_wave = np.asarray(self.action.args.at_wave)\n at_flux = np.asarray(self.action.args.at_flux)\n # get x values starting at zero pixels\n self.action.args.xsvals = np.arange(0, len(\n self.context.arcs[self.config.instrument.REFBAR]))\n # loop over arcs and generate a wavelength solution for each\n next_bar_to_plot = 0\n poly_order = 4\n for ib, b in enumerate(self.context.arcs):\n # Starting with pascal shifted coeffs from fit_center()\n coeff = self.action.args.twkcoeff[ib]\n # get bar wavelengths\n bw = np.polyval(coeff, self.action.args.xsvals)\n # smooth spectrum according to slicer\n if 'Small' in self.action.args.ifuname:\n # no smoothing for Small slicer\n bspec = b\n else:\n if 'Large' in self.action.args.ifuname:\n # max smoothing for Large slicer\n win = boxcar(5)\n else:\n # intermediate smoothing for Medium slicer\n win = boxcar(3)\n # do the smoothing\n bspec = sp.signal.convolve(b, win, mode='same') / sum(win)\n # store values to fit\n at_wave_dat = [] # atlas line wavelengths\n at_flux_dat = [] # atlas line peak fluxes\n arc_pix_dat = [] # arc line pixel positions\n arc_int_dat = [] # arc line pixel intensities\n rej_wave = [] # rejected line wavelengths\n rej_flux = [] # rejected line fluxes\n gaus_sig = []\n nrej = 0\n # loop over lines\n for iw, aw in enumerate(self.action.args.at_wave):\n # get window for this line\n try:\n # get arc line initial pixel position\n line_x = [i for i, v in enumerate(bw) if v >= aw][0]\n # get window for arc line\n minow, maxow, count = get_line_window(\n bspec, line_x, thresh=hgt,\n logger=(self.logger if verbose else None))\n # do we have enough points to fit?\n if count < 5 or not minow or not maxow:\n rej_wave.append(aw)\n rej_flux.append(self.action.args.at_flux[iw])\n nrej += 1\n if verbose:\n self.logger.info(\"Arc window rejected for line %.3f\"\n % aw)\n continue\n # check if window no longer contains initial value\n if minow > line_x > maxow:\n rej_wave.append(aw)\n rej_flux.append(self.action.args.at_flux[iw])\n nrej += 1\n if verbose:\n self.logger.info(\n \"Arc window wandered off for line %.3f\" % aw)\n continue\n # get data to fit\n yvec = bspec[minow:maxow + 1]\n xvec = self.action.args.xsvals[minow:maxow + 1]\n wvec = bw[minow:maxow + 1]\n f0 = max(yvec)\n par_start = [f0, np.nanmean(xvec), 1.0]\n par_bounds = ([f0*0.9, np.min(xvec), 0.5],\n [f0*1.1, np.max(xvec), 2.5])\n # Gaussian fit\n try:\n fit, _ = curve_fit(gaus, xvec, yvec, p0=par_start)\n # bounds=par_bounds, method='trf')\n sp_pk_x = fit[1]\n gaus_sig.append(fit[2])\n except (RuntimeError, ValueError):\n rej_wave.append(aw)\n rej_flux.append(self.action.args.at_flux[iw])\n nrej += 1\n if verbose:\n self.logger.info(\n \"Arc Gaussian fit rejected for line %.3f\" % aw)\n # sp_pk_x = line_x\n continue\n\n # get interpolation of arc line\n int_line = interpolate.interp1d(xvec, yvec, kind='cubic',\n bounds_error=False,\n fill_value='extrapolate')\n # use very dense sampling\n xplot = np.linspace(min(xvec), max(xvec), num=1000)\n # re-sample line with dense sampling\n plt_line = int_line(xplot)\n # get peak position\n max_index = plt_line.argmax()\n peak = xplot[max_index]\n # calculate centroid\n cent = np.sum(xvec * yvec) / np.sum(yvec)\n # how different is the centroid from the peak?\n if abs(cent - peak) > 0.7:\n # keep track of rejected line\n rej_wave.append(aw)\n rej_flux.append(self.action.args.at_flux[iw])\n nrej += 1\n if verbose:\n self.logger.info(\"Arc peak - cent offset = %.2f \"\n \"rejected for line %.3f\" %\n (abs(cent - peak), aw))\n continue\n if plt_line[max_index] < 100:\n # keep track of rejected line\n rej_wave.append(aw)\n rej_flux.append(self.action.args.at_flux[iw])\n nrej += 1\n if verbose:\n self.logger.info(\"Arc peak too low = %.2f \"\n \"rejected for line %.3f\" %\n (plt_line[max_index], aw))\n continue\n # store surviving line data\n arc_pix_dat.append(peak)\n arc_int_dat.append(plt_line[max_index])\n at_wave_dat.append(aw)\n at_flux_dat.append(self.action.args.at_flux[iw])\n # plot, if requested\n if do_inter and ib == next_bar_to_plot:\n ptitle = \" Bar# %d - line %3d/%3d: xc = %.1f, \" \\\n \"Wave = %9.2f\" % \\\n (ib, (iw + 1), len(self.action.args.at_wave),\n peak, aw)\n atx0 = [i for i, v in enumerate(atwave)\n if v >= min(wvec)][0]\n atx1 = [i for i, v in enumerate(atwave)\n if v >= max(wvec)][0]\n atnorm = np.nanmax(yvec) / np.nanmax(atspec[atx0:atx1])\n p = figure(\n title=self.action.args.plotlabel +\n \"ATLAS/ARC LINE FITS\" + ptitle,\n x_axis_label=\"Wavelength (A)\",\n y_axis_label=\"Relative Flux\",\n plot_width=self.config.instrument.plot_width,\n plot_height=self.config.instrument.plot_height)\n ylim = [0, np.nanmax(yvec)]\n p.line(atwave[atx0:atx1], atspec[atx0:atx1] * atnorm,\n color='blue', legend_label='Atlas')\n p.circle(atwave[atx0:atx1], atspec[atx0:atx1] * atnorm,\n color='green', legend_label='Atlas')\n p.line([aw, aw], ylim, color='red',\n legend_label='AtCntr')\n p.x_range = Range1d(start=min(wvec), end=max(wvec))\n p.extra_x_ranges = {\"pix\": Range1d(start=min(xvec),\n end=max(xvec))}\n p.add_layout(LinearAxis(x_range_name=\"pix\",\n axis_label=\"CCD Y pix\"),\n 'above')\n p.line(xplot, plt_line, color='black',\n legend_label='Arc', x_range_name=\"pix\")\n p.circle(xvec, yvec, legend_label='Arc', color='red',\n x_range_name=\"pix\")\n ylim = [0, np.nanmax(plt_line)]\n p.line([cent, cent], ylim, color='green',\n legend_label='Cntr', line_dash='dashed',\n x_range_name=\"pix\")\n p.line([sp_pk_x, sp_pk_x], ylim, color='magenta',\n legend_label='Gpeak', line_dash='dashdot',\n x_range_name=\"pix\")\n p.line([peak, peak], ylim, color='black',\n legend_label='Peak', line_dash='dashdot',\n x_range_name=\"pix\")\n p.y_range.start = 0\n bokeh_plot(p, self.context.bokeh_session)\n\n q = input(ptitle + \" - Next? <cr>, q to quit: \")\n if 'Q' in q.upper():\n do_inter = False\n except IndexError:\n if verbose:\n self.logger.info(\n \"Atlas line not in observation: %.2f\" % aw)\n rej_wave.append(aw)\n rej_flux.append(self.action.args.at_flux[iw])\n nrej += 1\n continue\n except ValueError:\n if verbose:\n self.logger.info(\n \"Interpolation error for line at %.2f\" % aw)\n rej_wave.append(aw)\n rej_flux.append(self.action.args.at_flux[iw])\n nrej += 1\n self.logger.info(\"\")\n self.logger.info(\"Fitting wavelength solution starting with %d \"\n \"lines after rejecting %d lines\" %\n (len(arc_pix_dat), nrej))\n # Fit wavelengths\n # Get poly order\n if self.action.args.dichroic_fraction <= 0.6:\n poly_order = 2\n elif 0.6 < self.action.args.dichroic_fraction < 0.75:\n poly_order = 3\n else:\n poly_order = 4\n self.logger.info(\"Fitting with polynomial order %d\" % poly_order)\n # Initial fit\n wfit = np.polyfit(arc_pix_dat, at_wave_dat, poly_order)\n pwfit = np.poly1d(wfit)\n arc_wave_fit = pwfit(arc_pix_dat)\n # fit residuals\n resid = arc_wave_fit - at_wave_dat\n resid_c, low, upp = sigmaclip(resid, low=3., high=3.)\n wsig = resid_c.std()\n # maximum outlier\n max_resid = np.max(abs(resid))\n self.logger.info(\"wsig: %.3f, max_resid: %.3f\" % (wsig, max_resid))\n # keep track of rejected lines\n rej_rsd = [] # rejected line residuals\n rej_rsd_wave = [] # rejected line wavelengths\n rej_rsd_flux = [] # rejected line fluxes\n # iteratively remove outliers\n it = 0\n while max_resid > 2.5 * wsig and it < 25:\n arc_dat = [] # arc line pixel values\n arc_fdat = [] # arc line flux data\n at_dat = [] # atlas line wavelength values\n at_fdat = [] # atlas line flux data\n # trim largest outlier\n for il, rsd in enumerate(resid):\n if abs(rsd) < max_resid:\n # append data for line that passed cut\n arc_dat.append(arc_pix_dat[il])\n arc_fdat.append(arc_int_dat[il])\n at_dat.append(at_wave_dat[il])\n at_fdat.append(at_flux_dat[il])\n else:\n if verbose:\n self.logger.info(\"It%d REJ: %d, %.2f, %.3f, %.3f\" %\n (it, il, arc_pix_dat[il],\n at_wave_dat[il], rsd))\n # keep track of rejected lines\n rej_rsd_wave.append(at_wave_dat[il])\n rej_rsd_flux.append(at_flux_dat[il])\n rej_rsd.append(rsd)\n # copy cleaned data back into input arrays\n arc_pix_dat = arc_dat.copy()\n arc_int_dat = arc_fdat.copy()\n at_wave_dat = at_dat.copy()\n at_flux_dat = at_fdat.copy()\n # refit cleaned data\n wfit = np.polyfit(arc_pix_dat, at_wave_dat, poly_order)\n # new wavelength function\n pwfit = np.poly1d(wfit)\n # new wavelengths for arc lines\n arc_wave_fit = pwfit(arc_pix_dat)\n # calculate residuals of arc lines\n resid = arc_wave_fit - at_wave_dat\n # get statistics\n resid_c, low, upp = sigmaclip(resid, low=3., high=3.)\n wsig = resid_c.std()\n # maximum outlier\n max_resid = np.max(abs(resid))\n # wsig = np.nanstd(resid)\n it += 1\n # END while max_resid > 3.5 * wsig and it < 5:\n # log arc bar results\n self.logger.info(\"\")\n self.logger.info(\"BAR %03d, Slice = %02d, RMS = %.3f, N = %d\" %\n (ib, int(ib / 5), wsig, len(arc_pix_dat)))\n self.logger.info(\n \"Nits: %d, wsig: %.3f, max_resid: %.3f\" % (it, wsig, max_resid))\n self.logger.info(\"NRejRsd: %d, NRejFit: %d\" % (len(rej_rsd_wave),\n len(rej_wave)))\n self.logger.info(\"Line width median sigma: %.2f px\" %\n np.nanmedian(gaus_sig))\n self.logger.info(\"Coefs: \" + ' '.join(['%.6g' % (c,)\n for c in reversed(wfit)]))\n # store final fit coefficients\n self.action.args.fincoeff.append(wfit)\n # store statistics\n bar_sig.append(wsig)\n bar_nls.append(len(arc_pix_dat))\n # do plotting?\n if master_inter and ib == next_bar_to_plot:\n # plot bar fit residuals\n ptitle = \" for Bar %03d, Slice %02d, RMS = %.3f, N = %d\" % \\\n (ib, int(ib / 5), wsig, len(arc_pix_dat))\n p = figure(title=self.action.args.plotlabel +\n \"RESIDUALS\" + ptitle,\n x_axis_label=\"Wavelength (A)\",\n y_axis_label=\"Fit - Inp (A)\",\n plot_width=self.config.instrument.plot_width,\n plot_height=self.config.instrument.plot_height)\n p.diamond(at_wave_dat, resid, legend_label='Rsd', size=8)\n if rej_rsd_wave:\n p.diamond(rej_rsd_wave, rej_rsd, color='orange',\n legend_label='Rej', size=8)\n xlim = [self.action.args.atminwave, self.action.args.atmaxwave]\n ylim = [np.nanmin(list(resid)+list(rej_rsd)),\n np.nanmax(list(resid)+list(rej_rsd))]\n p.line(xlim, [0., 0.], color='black', line_dash='dotted')\n p.line(xlim, [wsig, wsig], color='gray', line_dash='dashdot')\n p.line(xlim, [-wsig, -wsig], color='gray', line_dash='dashdot')\n p.line([self.action.args.cwave, self.action.args.cwave],\n ylim, legend_label='CWAV', color='magenta',\n line_dash='dashdot')\n bokeh_plot(p, self.context.bokeh_session)\n input(\"Next? <cr>: \")\n\n # overplot atlas and bar using fit wavelengths\n p = figure(title=self.action.args.plotlabel +\n \"ATLAS/ARC FIT\" + ptitle,\n x_axis_label=\"Wavelength (A)\",\n y_axis_label=\"Flux\",\n plot_width=self.config.instrument.plot_width,\n plot_height=self.config.instrument.plot_height)\n bwav = pwfit(self.action.args.xsvals)\n p.line(bwav, b, color='darkgrey', legend_label='Arc')\n p.diamond(arc_wave_fit, arc_int_dat, color='darkgrey', size=8)\n ylim = [np.nanmin(b), np.nanmax(b)]\n atnorm = np.nanmax(b) / np.nanmax(atspec)\n p.line(atwave, atspec * atnorm, color='blue',\n legend_label='Atlas')\n p.line([self.action.args.cwave, self.action.args.cwave],\n ylim, color='magenta', line_dash='dashdot',\n legend_label='CWAV')\n p.diamond(at_wave, at_flux * atnorm, legend_label='Kept',\n color='green', size=8)\n if rej_rsd_wave:\n p.diamond(rej_rsd_wave, [rj*atnorm for rj in rej_rsd_flux],\n color='orange', legend_label='RejRsd', size=6)\n p.diamond(rej_wave, [rj*atnorm for rj in rej_flux],\n color='red', legend_label='RejFit', size=6)\n bokeh_plot(p, self.context.bokeh_session)\n q = input(\"Next? <int> or <cr>, q - quit: \")\n if 'Q' in q.upper():\n master_inter = False\n else:\n try:\n next_bar_to_plot = int(q)\n except ValueError:\n next_bar_to_plot = ib + 1\n\n # Plot final results\n\n # plot output name stub\n pfname = \"arc_%05d_%s_%s_%s_tf%02d\" % (\n self.action.args.ccddata.header['FRAMENO'],\n self.action.args.illum, self.action.args.grating,\n self.action.args.ifuname, int(100*self.config.instrument.TAPERFRAC))\n\n # Plot coefs\n if self.config.instrument.plot_level >= 1:\n ylabs = ['Ang/px^4', 'Ang/px^3', 'Ang/px^2', 'Ang/px',\n 'Ang']\n ylabs = ylabs[-(poly_order+1):]\n for ic in reversed(\n range(len(self.action.args.fincoeff[0]))):\n cn = poly_order - ic\n ptitle = self.action.args.plotlabel + \"COEF %d VALUES\" % cn\n p = figure(title=ptitle, x_axis_label=\"Bar #\",\n y_axis_label=\"Coef %d (%s)\" % (cn, ylabs[ic]),\n plot_width=self.config.instrument.plot_width,\n plot_height=self.config.instrument.plot_height)\n coef = []\n for c in self.action.args.fincoeff:\n coef.append(c[ic])\n p.diamond(list(range(120)), coef, size=8)\n xlim = [-1, 120]\n ylim = get_plot_lims(coef)\n p.xgrid.grid_line_color = None\n oplot_slices(p, ylim)\n set_plot_lims(p, xlim=xlim, ylim=ylim)\n bokeh_plot(p, self.context.bokeh_session)\n if self.config.instrument.plot_level >= 2:\n input(\"Next? <cr>: \")\n else:\n time.sleep(self.config.instrument.plot_pause)\n # save coefficients plot\n save_plot(p, filename=pfname + '_coef%d.png' % cn)\n\n # Plot number of lines fit\n self.action.args.av_bar_nls = float(np.nanmean(bar_nls))\n self.action.args.st_bar_nls = float(np.nanstd(bar_nls))\n ptitle = self.action.args.plotlabel + \\\n \"FIT STATS <Nlns> = %.1f +- %.1f\" % (self.action.args.av_bar_nls,\n self.action.args.st_bar_nls)\n p = figure(title=ptitle, x_axis_label=\"Bar #\",\n y_axis_label=\"N Lines\",\n plot_width=self.config.instrument.plot_width,\n plot_height=self.config.instrument.plot_height)\n p.diamond(list(range(120)), bar_nls, size=8)\n xlim = [-1, 120]\n ylim = get_plot_lims(bar_nls)\n self.logger.info(\"<N Lines> = %.1f +- %.1f\" %\n (self.action.args.av_bar_nls,\n self.action.args.st_bar_nls))\n p.line(xlim, [self.action.args.av_bar_nls,\n self.action.args.av_bar_nls], color='red')\n p.line(xlim, [(self.action.args.av_bar_nls -\n self.action.args.st_bar_nls),\n (self.action.args.av_bar_nls -\n self.action.args.st_bar_nls)], color='green',\n line_dash='dashed')\n p.line(xlim, [(self.action.args.av_bar_nls +\n self.action.args.st_bar_nls),\n (self.action.args.av_bar_nls +\n self.action.args.st_bar_nls)], color='green',\n line_dash='dashed')\n p.xgrid.grid_line_color = None\n oplot_slices(p, ylim)\n set_plot_lims(p, xlim=xlim, ylim=ylim)\n if self.config.instrument.plot_level >= 1:\n bokeh_plot(p, self.context.bokeh_session)\n if self.config.instrument.plot_level >= 2:\n input(\"Next? <cr>: \")\n else:\n time.sleep(self.config.instrument.plot_pause)\n # save N lines plot\n save_plot(p, filename=pfname + '_nlines.png')\n\n # Plot fit sigmas\n self.action.args.av_bar_sig = float(np.nanmean(bar_sig))\n self.action.args.st_bar_sig = float(np.nanstd(bar_sig))\n self.logger.info(\"<STD> = %.3f +- %.3f (A)\" %\n (self.action.args.av_bar_sig,\n self.action.args.st_bar_sig))\n\n ptitle = self.action.args.plotlabel + \\\n \"FIT STATS <RMS> = %.3f +- %.3f\" % (self.action.args.av_bar_sig,\n self.action.args.st_bar_sig)\n p = figure(title=ptitle, x_axis_label=\"Bar #\", y_axis_label=\"RMS (A)\",\n plot_width=self.config.instrument.plot_width,\n plot_height=self.config.instrument.plot_height)\n p.diamond(list(range(120)), bar_sig, size=8)\n xlim = [-1, 120]\n ylim = get_plot_lims(bar_sig)\n p.line(xlim, [self.action.args.av_bar_sig,\n self.action.args.av_bar_sig], color='red')\n p.line(xlim, [(self.action.args.av_bar_sig -\n self.action.args.st_bar_sig),\n (self.action.args.av_bar_sig -\n self.action.args.st_bar_sig)], color='green',\n line_dash='dashed')\n p.line(xlim, [(self.action.args.av_bar_sig +\n self.action.args.st_bar_sig),\n (self.action.args.av_bar_sig +\n self.action.args.st_bar_sig)], color='green',\n line_dash='dashed')\n p.xgrid.grid_line_color = None\n oplot_slices(p, ylim)\n set_plot_lims(p, xlim=xlim, ylim=ylim)\n if self.config.instrument.plot_level >= 1:\n bokeh_plot(p, self.context.bokeh_session)\n if self.config.instrument.plot_level >= 2:\n input(\"Next? <cr>: \")\n else:\n time.sleep(self.config.instrument.plot_pause)\n\n # save residual plot\n save_plot(p, filename=pfname + '_resid.png')\n\n log_string = SolveArcs.__module__\n self.action.args.ccddata.header['HISTORY'] = log_string\n self.logger.info(log_string)\n\n return self.action.args", "title": "" }, { "docid": "680be0f9cc89a9590174c4dc09dd5131", "score": "0.44982135", "text": "def apply(correction, S, A, B1):\n TR = correction[1]\n j = correction[2]\n\n spgrWithJ.TR = TR\n spgrWithJ.j = j\n spgrWithJ.TE = 4.6\n spgrWithJ.T2s = 50\n T1i = 1000\n corr = pandas.Series(index=S.index.values, dtype=float)\n for v in S.index.values:\n Sv = S.loc[v].values\n S0i = 15*Sv.max()\n Ab1 = A*(B1[v]/100)\n try:\n popt, pcov = scipy.optimize.curve_fit(\n spgrWithJ, Ab1, Sv, p0=[S0i, T1i])\n except RuntimeError as e:\n continue\n corr[v] = popt[1]\n return corr", "title": "" }, { "docid": "53bc5929a532b2bf647adf2df6031b45", "score": "0.44943678", "text": "def analysis(wav, sample_rate, mcep_dims=60, bap_dims=5):\n f0, smoothed_spectrogram, aperiodicity = basic_analysis(wav, sample_rate)\n\n vuv = extract_vuv(f0)\n f0_interpolated = utils.interpolate(f0, vuv)\n\n mel_cepstrum = freq_to_mcep(smoothed_spectrogram, sample_rate, dims=mcep_dims)\n band_aperiodicity = freq_to_mcep(aperiodicity, sample_rate, dims=bap_dims)\n\n return f0_interpolated, vuv, mel_cepstrum, band_aperiodicity", "title": "" }, { "docid": "9f807e36897f9ba4462780d5cb7c8be6", "score": "0.44923413", "text": "def filterChans(ps):\n\n # Determine the number of good channels\n nchans = ps.str_voltData.shape[0]\n nchans_good = 0\n\n for i in range(nchans):\n if ps.chanFlagAC[i] == 1: nchans_good += 1\n elif ps.chanFlagDC[i] == 1: nchans_good += 1\n else: pass\n\n if nchans_good < nchans: ps.setWarning(64)\n if nchans_good < 2: ps.setError(650)\n\n # Create the final arrays to be passed to the Bayesian functions\n ps.scPhotons_Bayes = ndarray(shape=(nchans_good))\n ps.bgPhotons_Bayes = ndarray(shape=(nchans_good))\n ps.fqe_Bayes = ndarray(shape=(nchans_good))\n ps.trans_Bayes = ndarray(shape=(nchans_good, ps.calib.trans.shape[1]))\n\n index = 0\n for i in range(nchans):\n if ps.chanFlagAC[i] == 1:\n ps.scPhotons_Bayes[index] = ps.scatPhotonsAC[i]\n ps.fqe_Bayes[index] = ps.calib.APDfqeAC[i]\n ps.trans_Bayes[index, :] = ps.calib.trans[i,:]\n\n ps.bgPhotons_Bayes[index] = ps.bgPhotons[i]\n\n index += 1\n elif ps.chanFlagDC[i] == 1:\n ps.scPhotons_Bayes[index] = ps.scatPhotonsDC[i]\n ps.bgPhotons_Bayes[index] = ps.bgPhotons[i]\n ps.fqe_Bayes[index] = ps.calib.APDfqeDC[i]\n ps.trans_Bayes[index, :] = ps.calib.trans[i,:]\n\n index += 1\n else: pass", "title": "" }, { "docid": "10e46f89864ae7c540d64ed4089c9150", "score": "0.44772437", "text": "def modelogic(freqspec, pointing, duration_tot, integration, bsx_type, bfs, acc,\n allsky):\n if acc and not pointing:\n warnings.warn('ACC requires beamctl')\n # TODO add more conditions", "title": "" }, { "docid": "3fd8263ddaa5658d71796ce8f60a2e25", "score": "0.4476815", "text": "def makeCCF(spec_wave, spec_flux, mask_wave=None, mask_contrast=None, mask=None,\n mask_width=0.82, rvmin=None, rvmax=None, drv=None, rvarray=None):\n if rvarray is None:\n if rvmin is None or rvmax is None or drv is None:\n raise ValueError(\"Provide `rvmin`, `rvmax`, and `drv`.\")\n # check order of rvmin and rvmax\n if rvmax <= rvmin:\n raise ValueError(\"`rvmin` should be smaller than `rvmax`.\")\n rvarray = np.arange(rvmin, rvmax + drv / 2, drv)\n\n wave_resolution = spec_wave[1] - spec_wave[0]\n\n if mask is None:\n if mask_wave is None:\n raise ValueError(\"Provide the mask wavelengths in `mask_wave`.\")\n if mask_contrast is None:\n raise ValueError(\"Provide the mask wavelengths in `mask_contrast`.\")\n\n mask = np.c_[doppler_shift_wave(mask_wave, -mask_width / 2),\n doppler_shift_wave(mask_wave, mask_width / 2), mask_contrast]\n\n ccfarray = np.zeros_like(rvarray)\n for i, RV in enumerate(rvarray):\n nlines = 0\n CCF = 0.0\n\n mask_rv_shifted = np.copy(mask)\n mask_rv_shifted[:, :2] = doppler_shift_wave(mask[:, :2], RV)\n\n # region of intersection between the RV-shifted mask and the spectrum\n region = (spec_wave[0] < mask_rv_shifted[:, 0]) & (mask_rv_shifted[:, 1] < spec_wave[-1])\n mask_rv_shifted = mask_rv_shifted[region]\n\n # for every line in the mask\n for mask_line_start, mask_line_end, mask_line_depth in mask_rv_shifted:\n\n if mask_line_end + wave_resolution >= spec_wave[-1]:\n break\n\n # find the limiting indices in spec_wave, corresponding to the start\n # and end wavelength of the mask\n linePixelIni = bisect_left(spec_wave, mask_line_start)\n linePixelEnd = bisect_right(spec_wave, mask_line_end)\n\n # fraction of the spectrum inside the mask hole at the start\n lineFractionIni = (spec_wave[linePixelIni] - mask_line_start) / wave_resolution\n # fraction of the spectrum inside the mask hole at the end\n lineFractionEnd = 1 - abs(mask_line_end - spec_wave[linePixelEnd]) / wave_resolution\n\n CCF += mask_line_depth * np.sum(spec_flux[linePixelIni:linePixelEnd])\n CCF += mask_line_depth * lineFractionIni * spec_flux[linePixelIni - 1]\n CCF += mask_line_depth * lineFractionEnd * spec_flux[linePixelEnd + 1]\n nlines += 1\n\n ccfarray[i] = CCF\n\n return rvarray, ccfarray", "title": "" }, { "docid": "5354b4de6c101dce3a0b354bf06b44a0", "score": "0.446772", "text": "def solveTemp(ratio, structure, rho_S, abs_coeff = np.logspace(-2,-6,15)):\r\n # Absorbance is measured as 1-R-T where R, T are reflectance and transmittance.\r\n # dependence on the absorption coefficient is based on definition of RI from paper.\r\n wavelength_0 = 1.2e-6\r\n beta = np.linspace(0,0.2,100)\r\n power_in = [0]*10 # need to find maximum p_in based on beta\r\n\r\n # Loop that gets the maximum power value (change to vector to optimise?)\r\n for b in beta:\r\n wavelength = wavelength_0*np.sqrt((1+b)/(1-b))\r\n A = find_absorption_from_coefficient(structure, abs_coeff, wavelength)\r\n # Finding the LHS of Ilic equation\r\n power_beta = ratio*A*rho_S*(1-b)/(1+b)\r\n\r\n if power_beta[-1] > power_in[-1]:\r\n power_in = power_beta\r\n\r\n \"\"\" Note: Honestly, this below section should be made into its own function\r\n since it is a reusable block of code. Consider doing this at some point\r\n but for now, focus on optimising code and commenting\r\n \"\"\"\r\n # The RHS is more complicated, since you can't get an expression for T explicitly\r\n # We need to integrate power flux over all wavelengths to get the total radiated power\r\n temps = []\r\n midpoints = []\r\n highs = []\r\n lows = []\r\n for P in power_in: # Related to each x-val (abs coeff)\r\n start_time = time.time()\r\n bb_temp = (P/(2*1*5.67e-8))**0.25\r\n\r\n T_low = bb_temp # Lower bound = max emissivity = black body temp\r\n T_high = bb_temp*10 # Upper bound arbitrary (might not hold at higher temps) - should find a way to set a true reasonable higher bound\r\n\r\n # Use trapezoidal rule to find the total power out for a given temperature\r\n def power_out(T):\r\n points = 101 # Can be changed if better resolution is required\r\n\r\n # Ilic paper uses 1-25 microns, but eqns should be valid from 0.5-50 microns if so required\r\n bounds = np.linspace(1e-6, 25e-6, points)\r\n power_out_at_wl = points*[None]\r\n\r\n # Running each integral and adding to the list (optimisation here would be to fix list size and assign vals)\r\n i = 0\r\n for wavelength in bounds:\r\n power_out_at_wl[i] = (spectral_power_flux(wavelength, structure, T))\r\n i += 1\r\n power_out = np.trapz(power_out_at_wl, bounds, (25e-6-1e-6)/points)\r\n return power_out\r\n\r\n # Powers at the bounds of temperature interval\r\n P_high = power_out(T_high)\r\n P_low = power_out(T_low)\r\n\r\n # Halving the interval for a result\r\n while abs(P_high - P_low) >= 0.05*P:\r\n\r\n # The only issue we can really get is if P_high is too low - if this is\r\n # the case, just double P_high\r\n if (P_high <= P):\r\n T_high = T_high*2\r\n\r\n midpoint = (T_low+T_high)/2\r\n\r\n if power_out(midpoint) > P:\r\n T_high = midpoint\r\n else:\r\n T_low = midpoint\r\n\r\n P_high = power_out(T_high)\r\n P_low = power_out(T_low)\r\n\r\n # Take the midpoints as the final result since this is the result from halving interval\r\n midpoints.append((T_high+T_low)/2)\r\n\r\n # Also keep interval bounds in case need to compare (maybe also used to check error/give interval)\r\n highs.append(T_high)\r\n lows.append(T_low)\r\n\r\n # Timer and printing out the midpoint temperature in case needs to be seen\r\n print(T_high/2+T_low/2)\r\n print(\"--- %s seconds ---\" % (time.time() - start_time))\r\n\r\n temps = [midpoints, highs, lows]\r\n\r\n return temps", "title": "" }, { "docid": "156029c1488e687e1f942b51f2e57424", "score": "0.44511366", "text": "def rms_spectrum_test(song='tainted', tuning_f0=110., channel=0):\n\tx0, sr, fmt = wavread(song+os.sep+'mix_000.wav')\n\tx1, sr, fmt = wavread(song+os.sep+'mix_100.wav')\n\tif channel==2: # mix the channels\n\t\tif len(x0.shape) > 1:\n\t\t\tx0 = x0.mean(1)\n\t\tif len(x1.shape) > 1:\n\t\t\tx1 = x1.mean(1)\n\telse: # extract given channel\n\t\tif len(x0.shape) > 1:\n\t\t\tx0 = x0[:,channel]\n\t\tif len(x1.shape) > 1:\n\t\t\tx1 = x1[:,channel]\n\t# Short-time Fourier analysis\n\tF0 = LinearFrequencySpectrum(x0,nfft=8192,wfft=8192,nhop=2048)\n\tF1 = LinearFrequencySpectrum(x1,nfft=8192,wfft=8192,nhop=2048)\n\teq_freqs = tuning_f0*2**(arange(0,5,1/12.))\n\teq_bins = array([argmin(abs(F0._fftfrqs-f)) for f in eq_freqs])\n\t# df0 = normalize(F0.X)[eq_bins].mean(1)\n\tdf0 = (normalize(F0.X)[eq_bins]**2).mean(1)**0.5\t\n\t#df1 = nomalize(F1.X)[eq_bins].mean(1)\n\tdf1 = (normalize(F1.X)[eq_bins]**2).mean(1)**0.5\n\tfigure()\n\tsemilogx(F0._fftfrqs[eq_bins], df0)\n\tsemilogx(F0._fftfrqs[eq_bins], df1)\n\tlegend(['Original vocals','Autotuned vocals'],loc=0)\n\ttitle(song+': ET bands untuned/tuned vocals mixed with background', fontsize=20)\n\txlabel('Equal Temperament Bands (Hz)',fontsize=20)\n\tylabel('Power',fontsize=20)\t\n\tgrid()\n\treturn {'nontuned_rms':rms_flat(df0), 'autotuned_rms':rms_flat(df1)}", "title": "" }, { "docid": "3f285be4c216423250da32c6088cd394", "score": "0.44413394", "text": "def calc_misc_Sol(self, model):\n # FIXME: This was not calculated on Ocelot\n #self._reg_write(model.vars.FEFILT0_CF_CFOSR, 0)\n \n self._reg_write(model.vars.FEFILT0_CF_ADCBITORDERI, 0)\n self._reg_write(model.vars.FEFILT0_CF_ADCBITORDERQ, 0)\n \n # FIXME: This was not calculated on Ocelot\n #self._reg_write(model.vars.FEFILT0_SRCCHF_SRCDECEN2, 0) \n \n # Digital Gain Control\n self._reg_write(model.vars.FEFILT0_DIGIGAINCTRL_DEC0GAIN, 0)\n self._reg_write(model.vars.FEFILT0_DIGIGAINCTRL_DIGIGAINDOUBLE, 0)\n self._reg_write(model.vars.FEFILT0_DIGIGAINCTRL_DIGIGAINEN, 0)\n self._reg_write(model.vars.FEFILT0_DIGIGAINCTRL_DIGIGAINHALF, 0)\n self._reg_write(model.vars.FEFILT0_DIGIGAINCTRL_DIGIGAINSEL, 0)\n\n ## New registers\n # FIXME: how to calculate these?\n self._reg_write(model.vars.FEFILT0_CHFCTRL_FWSELCOEFF, 0)\n self._reg_write(model.vars.FEFILT0_CHFCTRL_FWSWCOEFFEN, 0)\n self._reg_write(model.vars.FEFILT0_CHFLATENCYCTRL_CHFLATENCY, 0)\n\n self._reg_write(model.vars.FEFILT0_DIGMIXCTRL_DIGMIXMODE, 1)\n self._reg_write(model.vars.FEFILT0_DIGMIXCTRL_DIGMIXFB, 1)\n\n self._reg_write(model.vars.FEFILT0_DCCOMPFILTINIT_DCCOMPINIT, 0)\n self._reg_write(model.vars.FEFILT0_DCCOMPFILTINIT_DCCOMPINITVALI, 0)\n self._reg_write(model.vars.FEFILT0_DCCOMPFILTINIT_DCCOMPINITVALQ, 0)", "title": "" }, { "docid": "8f641587ec8cda6fdeabab99d8ee7b1b", "score": "0.44401404", "text": "def set_waveform(\r\n self,\r\n waveform_index,\r\n raw_values=None,\r\n values=None,\r\n min_value=-1.0,\r\n max_value=1.0,\r\n value_count=8192):\r\n if waveform_index < 1:\r\n raise UnknownWaveformError('waveform_index < 1')\r\n\r\n if raw_values:\r\n if values is not None:\r\n raise RawValueConflictError(\r\n 'Please do not provide both values and raw_values')\r\n else:\r\n raw_values = list(_convert_values_to_raw_values(\r\n values, min_value, max_value))\r\n\r\n if len(raw_values) != value_count:\r\n raise ValueCountError(\r\n 'Unexpected value array length. expected %d, got %d' %\r\n (value_count, len(raw_values)))\r\n\r\n for c in (0, 1):\r\n if self.is_serial and self.get(c, 'wave') == 'arb%u' % waveform_index:\r\n raise ChannelActiveError(\r\n 'Can not update arb%u because it is active on channel %u' %\r\n (waveform_index, c))\r\n\r\n data = []\r\n for v in raw_values:\r\n data.append(v & 255) # lower 8 bits\r\n data.append((v >> 8) & 63) # upper 6 bits\r\n\r\n response = self.send('DDS_WAVE%u' % waveform_index)\r\n if self.is_serial and response != 'W':\r\n raise CommandNotAcknowledgedError('DDS_WAVE command was not acknowledged')\r\n\r\n if self.is_serial:\r\n self.port.write(bytearray(data))\r\n else:\r\n for i in range(0, len(data), 16):\r\n self.port.write(''.join('%02X' % d for d in data[i:i+16]))\r\n self.port.write('\\n')\r\n response = self._recv('(Wave Data)').strip()\r\n if self.is_serial and response != 'HN':\r\n raise CommandNotAcknowledgedError('DDS_WAVE data was not accepted')", "title": "" }, { "docid": "6d9f4ad7997216c4b8e50fa94562f81e", "score": "0.4436732", "text": "def wrf_caustic(_di,_df,_pts,_wfr,_optBL,_filename, _inPol=6, _inIntType = 0, _inDepType = 7, _inE = None, _inX = 0, _inY = 0):\n print()\n if _inE is None:\n _inE = _wfr.mesh.eStart\n\n delta_Z = numpy.linspace(_di, _df, num=_pts) #\n\n z0 = _optBL.arOpt[-1].L\n\n if (z0+_df) < 0:\n print(\">>>> Error: trying to start calculation before the last optical element. Check simulation range.\")\n sys.exit()\n\n for k in range(_pts):\n print(\">>>> Caustics: point %d\"%(k+1)+\" out of %d\"%_pts)\n\n wfrp = deepcopy(_wfr)\n position = z0 + delta_Z[k]\n\n _optBL.arOpt[-1] = SRWLOptD(position)\n\n srwl.PropagElecField(wfrp,_optBL)\n\n # -vs x (horizontal position or angle);\n if _inDepType == 1:\n if k == 0:\n ar_xi = array('f', [0] * _pts)\n ar_xf = array('f', [0] * _pts)\n Cst_vs_x = NaN((wfrp.mesh.nx, _pts), 'f')\n\n arI = array('f', [0] * wfrp.mesh.nx)\n\n srwl.CalcIntFromElecField(arI, wfrp, _inPol, _inIntType, 1, _inE, _inX, _inY)\n\n arI = numpy.array(arI)\n Cst_vs_x[:, k] = arI\n\n ar_xi[k] = wfrp.mesh.xStart\n ar_xf[k] = wfrp.mesh.xFin\n\n # -vs y (vertical position or angle);\n if _inDepType == 2:\n if k == 0:\n ar_yi = array('f', [0] * _pts)\n ar_yf = array('f', [0] * _pts)\n Cst_vs_y = NaN((wfrp.mesh.ny,_pts), 'f')\n\n arI = array('f', [0] * wfrp.mesh.ny)\n\n srwl.CalcIntFromElecField(arI, wfrp, _inPol, _inIntType, 2, _inE, _inX, _inY)\n arI = numpy.array(arI)\n Cst_vs_y[:,k] = arI\n\n ar_yi[k] = wfrp.mesh.yStart\n ar_yf[k] = wfrp.mesh.yFin\n\n # -vs x&y (horizontal and vertical positions or angles);\n if _inDepType == 3:\n subgroupname = \"wfr_%d\"%k\n\n arI = array('f', [0] * wfrp.mesh.nx * wfrp.mesh.ny)\n\n srwl.CalcIntFromElecField(arI, wfrp, _inPol, _inIntType, 3, _inE, _inX, _inY)\n\n arI = numpy.array(arI)\n arI = arI.reshape((wfrp.mesh.ny, wfrp.mesh.nx)).T\n\n wfr_mesh = [wfrp.mesh.eStart, wfrp.mesh.xStart, wfrp.mesh.xFin, wfrp.mesh.nx, wfrp.mesh.yStart,wfrp.mesh.yFin, wfrp.mesh.ny,_di,_df,_pts]\n if k == 0:\n _save_caustic_2_hdf5(arI,wfr_mesh,_filename,subgroupname,_overwrite=False)\n else:\n _save_caustic_2_hdf5(arI, wfr_mesh, _filename, subgroupname)\n\n # -vs x (horizontal position or angle) & -vs y (vertical position or angle) - DEFAULT;\n if _inDepType == 7:\n # -vs x\n if k == 0:\n ar_xi = array('f', [0] * _pts)\n ar_xf = array('f', [0] * _pts)\n Cst_vs_x = NaN((wfrp.mesh.nx,_pts), 'f')\n\n arI = array('f', [0] * wfrp.mesh.nx)\n\n srwl.CalcIntFromElecField(arI, wfrp, _inPol, _inIntType, 1, _inE, _inX, _inY)\n\n arI = numpy.array(arI)\n Cst_vs_x[:,k] = arI\n\n ar_xi[k] = wfrp.mesh.xStart\n ar_xf[k] = wfrp.mesh.xFin\n\n # -vs y\n if k == 0:\n ar_yi = array('f', [0] * _pts)\n ar_yf = array('f', [0] * _pts)\n Cst_vs_y = NaN((wfrp.mesh.ny,_pts), 'f')\n\n arI = array('f', [0] * wfrp.mesh.ny)\n\n srwl.CalcIntFromElecField(arI, wfrp, _inPol, _inIntType, 2, _inE, _inX, _inY)\n\n arI = numpy.array(arI)\n Cst_vs_y[:,k] = arI\n\n ar_yi[k] = wfrp.mesh.yStart\n ar_yf[k] = wfrp.mesh.yFin\n\n # -vs x (horizontal position or angle) - COLLAPSED;\n if _inDepType == 8:\n if k == 0:\n ar_xi = array('f', [0] * _pts)\n ar_xf = array('f', [0] * _pts)\n Cst_vs_x = NaN((wfrp.mesh.nx, _pts), 'f')\n\n arI = array('f', [0] * wfrp.mesh.nx * wfrp.mesh.ny)\n\n srwl.CalcIntFromElecField(arI, wfrp, _inPol, _inIntType, 3, _inE, _inX, _inY)\n\n arI = numpy.array(arI)\n arI = arI.reshape((wfrp.mesh.ny, wfrp.mesh.nx)).T\n\n for x in range(wfrp.mesh.nx):\n Cst_vs_x[x,k] = numpy.sum(arI[x,:])/wfrp.mesh.ny\n\n ar_xi[k] = wfrp.mesh.xStart\n ar_xf[k] = wfrp.mesh.xFin\n\n # -vs y (vertical position or angle) - COLLAPSED;\n if _inDepType == 9:\n if k == 0:\n ar_yi = array('f', [0] * _pts)\n ar_yf = array('f', [0] * _pts)\n Cst_vs_y = NaN((wfrp.mesh.ny,_pts), 'f')\n\n arI = array('f', [0] * wfrp.mesh.nx * wfrp.mesh.ny)\n\n srwl.CalcIntFromElecField(arI, wfrp, _inPol, _inIntType, 3, _inE, _inX, _inY)\n\n arI = numpy.array(arI)\n arI = arI.reshape((wfrp.mesh.ny, wfrp.mesh.nx)).T\n\n for y in range(wfrp.mesh.ny):\n Cst_vs_y[y,k] = numpy.sum(arI[:,y])/wfrp.mesh.nx\n\n ar_yi[k] = wfrp.mesh.yStart\n ar_yf[k] = wfrp.mesh.yFin\n\n if _inDepType == 10: # -vs x (horizontal position or angle) & -vs y (vertical position or angle) - COLLAPSED;\n if k == 0:\n ar_xi = array('f', [0] * _pts)\n ar_xf = array('f', [0] * _pts)\n ar_yi = array('f', [0] * _pts)\n ar_yf = array('f', [0] * _pts)\n\n Cst_vs_x = NaN((wfrp.mesh.nx,_pts), 'f')\n Cst_vs_y = NaN((wfrp.mesh.ny,_pts), 'f')\n\n arI = array('f', [0]*wfrp.mesh.nx*wfrp.mesh.ny)\n\n srwl.CalcIntFromElecField(arI, wfrp, _inPol, _inIntType, 3, _inE, _inX, _inY)\n\n arI = numpy.array(arI)\n arI = arI.reshape((wfrp.mesh.ny, wfrp.mesh.nx)).T\n\n for x in range(wfrp.mesh.nx):\n Cst_vs_x[x,k] = numpy.sum(arI[x,:])/wfrp.mesh.ny\n\n for y in range(wfrp.mesh.ny):\n Cst_vs_y[y,k] = numpy.sum(arI[:,y])/wfrp.mesh.nx\n\n ar_xi[k] = wfrp.mesh.xStart\n ar_xf[k] = wfrp.mesh.xFin\n\n ar_yi[k] = wfrp.mesh.yStart\n ar_yf[k] = wfrp.mesh.yFin\n\n print(\"\\n>>>> Caustics: post processing. It takes time.\")\n\n # -vs x (horizontal position or angle)\n if (_inDepType == 1) or (_inDepType == 8):\n X = numpy.linspace(numpy.amax(ar_xi),numpy.amin(ar_xf),wfrp.mesh.nx)\n\n Cst_vs_xn = NaN((wfrp.mesh.nx, _pts), 'f')\n\n for k in range(_pts):\n ar = numpy.linspace(ar_xi[k], ar_xf[k], wfrp.mesh.nx)\n\n for x in range(wfrp.mesh.nx):\n Cst_vs_xn[x,k] = interp_1d_var(X[x], ar, Cst_vs_x[:,k], _ord=3)\n\n wfr_mesh = [wfrp.mesh.eStart, numpy.amax(ar_xi), numpy.amin(ar_xf), wfrp.mesh.nx, _di, _df, _pts]\n\n _save_caustic_2_hdf5(Cst_vs_xn, wfr_mesh, _filename, 'Horizontal',_overwrite=False)\n\n # -vs y (vertical position or angle)\n if (_inDepType == 2) or (_inDepType == 9):\n Y = numpy.linspace(numpy.amax(ar_yi),numpy.amin(ar_yf),wfrp.mesh.ny)\n\n Cst_vs_yn = NaN((wfrp.mesh.ny, _pts), 'f')\n\n for k in range(_pts):\n ar = numpy.linspace(ar_yi[k], ar_yf[k], wfrp.mesh.ny)\n\n for y in range(wfrp.mesh.ny):\n Cst_vs_yn[y,k] = interp_1d_var(Y[y], ar, Cst_vs_y[:,k], _ord=3)\n\n wfr_mesh = [wfrp.mesh.eStart, numpy.amax(ar_yi), numpy.amin(ar_yf), wfrp.mesh.ny, _di, _df, _pts]\n\n _save_caustic_2_hdf5(Cst_vs_yn, wfr_mesh, _filename, 'Vertical',_overwrite=False)\n\n # -vs x (horizontal position or angle) & -vs y (vertical position or angle)\n if (_inDepType == 7) or (_inDepType == 10):\n X = numpy.linspace(numpy.amax(ar_xi),numpy.amin(ar_xf),wfrp.mesh.nx)\n Y = numpy.linspace(numpy.amax(ar_yi),numpy.amin(ar_yf),wfrp.mesh.ny)\n\n Cst_vs_xn = NaN((wfrp.mesh.nx, _pts), 'f')\n Cst_vs_yn = NaN((wfrp.mesh.ny, _pts), 'f')\n\n for k in range(_pts):\n ar = numpy.linspace(ar_xi[k], ar_xf[k], wfrp.mesh.nx)\n for x in range(wfrp.mesh.nx):\n Cst_vs_xn[x,k] = interp_1d_var(X[x], ar, Cst_vs_x[:,k], _ord=3)\n\n ar = numpy.linspace(ar_yi[k], ar_yf[k], wfrp.mesh.ny)\n for y in range(wfrp.mesh.ny):\n Cst_vs_yn[y,k] = interp_1d_var(Y[y], ar, Cst_vs_y[:,k], _ord=3)\n\n wfr_mesh = [wfrp.mesh.eStart,numpy.amax(ar_xi),numpy.amin(ar_xf),wfrp.mesh.nx,_di,_df,_pts]\n _save_caustic_2_hdf5(Cst_vs_xn,wfr_mesh,_filename,'Horizontal',_overwrite=False)\n\n wfr_mesh = [wfrp.mesh.eStart,numpy.amax(ar_yi),numpy.amin(ar_yf),wfrp.mesh.ny,_di,_df,_pts]\n _save_caustic_2_hdf5(Cst_vs_yn,wfr_mesh,_filename,'Vertical',_overwrite=True)\n\n print(\"\\n>>>> Caustics: calculation finished. Data saved to file.\")", "title": "" }, { "docid": "0e07079591215c769b1ac653cf1b99e4", "score": "0.44292414", "text": "def calc_osc_accels(self, osc_freqs, osc_damping=0.05, trans_func=[]):\n if len(trans_func):\n tf = np.asarray(trans_func)\n else:\n tf = np.ones_like(self.freqs)\n\n resp = np.array([\n self.calc_peak(\n tf * calc_sdof_tf(self.freqs, of, osc_damping),\n osc_freq=of,\n osc_damping=osc_damping,\n site_tf=trans_func) for of in osc_freqs\n ])\n\n of = 100\n peak, pf = self.peak_calculator(\n self._duration, self._freqs, self._fourier_amps *\n np.abs(calc_sdof_tf(self.freqs, of, osc_damping)))\n\n return resp", "title": "" }, { "docid": "588b18c8c4fc54affbab364997763fb1", "score": "0.4423978", "text": "def add_data(self, FITSrec, toneFITSrec, cfg_key):\n scans = self.collector.fft_meta.keys()\n scans.sort()\n numscans = len(scans) # scans observed\n self.logger.debug(\"add_data: %d scans: %s\", numscans, scans)\n \n # both IFs have the same subchannels so use IF 1.\n subchannels = self.collector.wvsr_cfg[cfg_key][1]['subchannels']\n self.logger.debug(\"add_data: subchannels: %s\", subchannels)\n anysubch = subchannels[0] # any subchannel\n # create frame to return antenna RA and dec to J2000\n fk5_2000 = FK5(equinox=Time(2000, format='jyear', scale='utc'))\n \n # receiver providing the signal to the spectrometer\n rx_key = self.collector.equip[cfg_key][1]['Receiver'].keys()[0] # only one\n rx = self.collector.equip[cfg_key][1]['Receiver'][rx_key]\n self.logger.debug(\"add_data: receiver is %s\", rx)\n \n # original number of channels\n num_chan = self.collector.equip[cfg_key][1]['Backend'].num_chan\n\n bad_tones = []\n #data_row_index = 0 # one row for every scan and every cycle (subchannel)\n #tone_row_index = 0 #\n have_tones = False\n #IFpower = {}\n # fill up the rows scan-by-scan\n for scan in scans:\n # dataset header is keyed on the index of the scan in the set of scans\n try:\n scan_index = scans.index(scan)\n except ValueError,details:\n self.logger.warning(\"add _data: scan %s not found; skipped\",\n scan)\n continue\n # use date of first record; see doc string for explanation of extra index\n year, month, day = calendar_date(self.collector.year, self.collector.doy)\n date_obs = \"%4d/%02d/%02d\" % (year, month, day)\n fyear = self.collector.year + self.collector.doy/365.25\n self.logger.debug(\"add_data: for scan %d, %s is J%f\", scan, date_obs, fyear)\n #subch_tone_idx = 0 # collect tone data from both subchannels\n # add a CYCLE row for every WVSR subchannel\n for subch in subchannels:\n sub_idx = subchannels.index(subch)\n try:\n datafile = self.collector.scaninfo[scan]['subch '+str(sub_idx+1)]\n except KeyError, details:\n self.logger.warning(\"add_data: could not find subch %d for scan %d\",\n sub_idx+1, scan)\n continue\n # this returns a structured array with 131072 spectrum channels\n thisdata = read_FFT_file(fftdir+datafile)\n # check on some reasons for discarding these data\n if type(thisdata) != numpy.ndarray:\n # bad data file\n # this is probably the end of the recording\n self.logger.warning(\n \"add_data: read_FFT_file return not a numpy array for scan %d %s\",\n scan, subch)\n continue\n cycle = sub_idx + 1\n data_row_index = get_row(\"SINGLE DISH\", scans, scan=scan,\n num_cycles=len(subchannels), cycle=cycle)\n #self.logger.debug(\n # \"add_data: processing scan %d %s data row %d tone row %d\",\n # scan, subch, data_row_index, tone_row_index)\n self.logger.debug(\"add_data: processing scan %d subch %s data row %d\",\n scan, subch, data_row_index)\n FITSrec[data_row_index]['SCAN'] = scan # int\n FITSrec[data_row_index]['DATE-OBS'] = date_obs \n # UNIX time at midnight\n midnight = time.mktime(dateutil.parser.parse(date_obs).timetuple())\n # each subchannel has its own cycle\n FITSrec[data_row_index]['CYCLE'] = cycle\n self.logger.debug(\"add_data: CYCLE = %d\", \n FITSrec[data_row_index]['CYCLE'])\n # In [26]: data.dtype.names\n # Out[26]: \n # ('freq', 'IF1-ps', 'IF2-ps', 'IF1-phase', 'IF2-phase',\n # 'I', 'Q', 'U', 'V', 'P',\n # 'count', 'index')\n if self.collector.scaninfo[scan] == {}:\n # skip this scan\n self.logger.warning(\"add_data: no scan info for %d\", scan)\n continue\n try:\n starttime = self.collector.scaninfo[scan]['start']\n except KeyError:\n # incomplete scan info\n continue\n # process the data\n endtime = self.collector.scaninfo[scan]['end']\n self.logger.debug(\"add_data: for scan %d subch %s between %s and %s\",\n scan, subch, starttime, endtime)\n startUXtime = datetime_to_UnixTime(starttime)\n endUXtime = datetime_to_UnixTime(endtime)\n # IFs used for this scan? One (single pol) or two (both pols)\n if numpy.any(thisdata['IF2-ps'] != 0):\n IFs = ['IF1', 'IF2']\n else:\n IFs = ['IF1']\n # put data in row\n FITSrec[data_row_index]['UNIXtime'] = startUXtime\n if startUXtime == 0.0:\n FITSrec[data_row_index]['CYCLE'] = 0\n FITSrec[data_row_index]['TIME'] = \\\n FITSrec[data_row_index]['UNIXtime']-midnight\n if self.collector.scaninfo[scan]['source'][-4:] == \"-ref\":\n FITSrec[data_row_index]['OBJECT'] = \\\n self.collector.scaninfo[scan]['source'][:-4]\n FITSrec[data_row_index]['SIG'] = False\n else:\n FITSrec[data_row_index]['OBJECT'] = \\\n self.collector.scaninfo[scan]['source']\n FITSrec[data_row_index]['SIG'] = True\n self.logger.debug(\"add_data: source is %s\", \n FITSrec[data_row_index]['OBJECT'])\n response = self.logserver.get_azel(startUXtime, endUXtime)\n self.logger.debug(\"add_data: response = %s\", response)\n if response:\n az,el = response\n FITSrec[data_row_index]['AZIMUTH'] = az\n FITSrec[data_row_index]['ELEVATIO'] = el\n else:\n FITSrec[data_row_index]['AZIMUTH'] = numpy.nan\n FITSrec[data_row_index]['ELEVATIO'] = numpy.nan\n FITSrec[data_row_index]['OBSMODE'] = obsmode\n # same exposure for all channels\n FITSrec[data_row_index]['EXPOSURE'] = thisdata[0]['count']\n \n # same frequency and bandwidth for both IFs so use IF 1\n obsfreq = self.collector.wvsr_cfg[cfg_key][1]['rf_to_if_lo']*1e6 \\\n + self.collector.wvsr_cfg[cfg_key][1][subch]['sfro']\n\n FITSrec[data_row_index]['BANDWIDT'] = \\\n self.collector.wvsr_cfg[cfg_key][1]['chan_id 1']['bandwidth']\n # add the data to the columns of this row\n FITSrec[data_row_index]['OBSFREQ'] = obsfreq\n FITSrec[data_row_index]['RESTFREQ'] = obsfreq # is this always true?\n self.logger.debug(\"add_data: OBJECT is '%s'\",\n FITSrec[data_row_index]['OBJECT'])\n sourcename = FITSrec[data_row_index]['OBJECT'].replace('_',' ')\n self.logger.debug(\"add_data: OBJECT is '%s'\",\n FITSrec[data_row_index]['OBJECT'])\n FITSrec[data_row_index]['VELOCITY'] = \\\n self.collector.sources[sourcename]['Vlsr']\n FITSrec[data_row_index]['VELDEF'] = veldef\n weather = self.logserver.get_weather(startUXtime)\n self.logger.debug(\"add_data: weather at %s is %s\", startUXtime, weather)\n if weather:\n FITSrec[data_row_index]['TAMBIENT'] = weather[0]\n FITSrec[data_row_index]['PRESSURE'] = weather[1]\n FITSrec[data_row_index]['HUMIDITY'] = weather[2]\n FITSrec[data_row_index]['WINDSPEE'] = weather[3]\n FITSrec[data_row_index]['WINDDIRE'] = weather[4]\n else:\n self.logger.debug(\"add_data: weather not available for %f\", startUXtime)\n FITSrec[data_row_index]['TAMBIENT'] = numpy.nan\n FITSrec[data_row_index]['PRESSURE'] = numpy.nan\n FITSrec[data_row_index]['HUMIDITY'] = numpy.nan\n FITSrec[data_row_index]['WINDSPEE'] = numpy.nan\n FITSrec[data_row_index]['WINDDIRE'] = numpy.nan\n # GBT SDFITS wants SIDEBAND\n if rx['IFmode'] == 'U':\n FITSrec[data_row_index]['SIDEBAND'] = +1\n elif rx['IFmode'] == 'L':\n FITSrec[data_row_index]['SIDEBAND'] = -1\n else:\n self.logger.error(\"add_data: IF mode %s is invalid; default to USB\",\n rx['IFmode'])\n FITSrec[data_row_index]['SIDEBAND'] = +1\n \n datacubeshape = FITSrec[data_row_index]['DATA'].shape\n # the frequency axis is first in FITS/FORTRAN order and last (of four)\n # in PYTHON/C order\n num_Stokes_chan = datacubeshape[3]\n \n # the sign of CDELT1 depends on the sideband of the last SSB mixer in\n # the chain\n \n # second and third data axes (coordinates)\n RA, dec = self.logserver.get_RAdec(startUXtime)\n self.logger.debug(\"add_data: apparent RA,dec = %f,%f\", RA, dec)\n c = SkyCoord(RA, dec, unit=(u.deg, u.deg),\n frame=FK5(equinox=Time('J'+str(fyear), scale='utc')))\n self.logger.debug(\"add_data: RA,dec = %f,%f\", c.ra.hour, c.dec.deg)\n c2000 = c.transform_to(fk5_2000)\n self.logger.debug(\"add_data: precessed RA,dec = %f,%f\",\n c2000.ra.hour, c2000.dec.deg)\n FITSrec[data_row_index]['CRVAL2'] = c2000.ra.hour # hours\n FITSrec[data_row_index]['CRVAL3'] = c2000.dec.deg # deg\n FITSrec[data_row_index]['EQUINOX'] = 2000\n # get the radial velocity of the LSR\n FITSrec[data_row_index]['VFRAME'] = \\\n V_LSR(c2000.ra.hour, c2000.dec.deg, self.tel.number, starttime)\n FITSrec[data_row_index]['RVSYS'] = \\\n FITSrec[data_row_index]['VELOCITY'] \\\n + FITSrec[data_row_index]['VFRAME']\n \n # fourth data axis (polarization)\n FITSrec[data_row_index]['CRVAL4'] = -1\n FITSrec[data_row_index]['CDELT4'] = -1 # for I,Q,U,V (-1,-2,-3,-4)\n \n # initialize power averages\n #IFpower[data_row_index] = {}\n\n # fit the tones to the data using the original resolution\n bandwidth = FITSrec[data_row_index]['BANDWIDT']\n tone_offsets, tone_chnls = tone_chnl_nums(num_chan, obsfreq, bandwidth)\n self.logger.debug(\"add_data: tone offsets: %s\", tone_offsets)\n self.logger.debug(\"add_data: tone channels: %s\", tone_chnls)\n tone_indices = list(tone_chnls) # define the channels to be selected\n num_tones = len(tone_indices)\n offset, center_tone = math.modf(obsfreq/1e6) # tones every MHz\n \n # the objective is to fit the position of one (e.g. central) tone\n # and one stdev with all the other tones at a fixed distance from the\n # central tone and one stdev for all tones. The individual amplitudes\n # may vary. 'multigauss.other_pars' has the fixed parameters.\n for IF in IFs:\n self.logger.debug(\"add_data: processing %s\", IF)\n IFidx = IFs.index(IF)\n # this is the full spectrum IF power\n IFpwr = thisdata[IF+\"-ps\"]\n #IFpower[data_row_index][IF] = IFpwr\n # a rail is a set of evenly spaced tones\n rails = self.check_tones(thisdata, IF, threshold=30)\n self.logger.debug(\"add_data: %s has %d tone rails: %s\",\n IF, len(rails), rails)\n if len(rails) > 1:\n # bad tone rails present; skip this dataset\n FITSrec[data_row_index]['CYCLE'] = 0\n bad_tones.append(scan)\n self.logger.warning(\n \"add_data: scan %d subch %s row %d has extra tones\",\n scan, subch, data_row_index) \n continue\n elif len(rails) == 0:\n # no tones; make compressed IF spectra\n newspec, newrefval, newrefpix, newdelta = \\\n reduce_spectrum_channels(IFpwr, 0, 0, 0, num_chan=1024)\n FITSrec[data_row_index]['IFSPECTR'][IFidx, 0, 0,:] = newspec\n else:\n have_tones = True\n # accept only one rail\n toneFITSrec, newspec = self.add_tone_data(thisdata,\n collector, cfg_key, toneFITSrec, scan,\n subch, IFidx, IFs, midnight, IFpwr)\n # newspec is the IF spectrum compressed to 1024 channels\n FITSrec[data_row_index]['IFSPECTR'][IFidx, 0, 0,:] = newspec\n \n # compute the average power as proxy for TSYS\n FITSrec[data_row_index]['TSYS'][IFidx,0,0,0] = IFpwr.mean()\n FITSrec.columns['TSYS'].unit = \"count\"\n\n # the data in dataset is keyed on scan number\n refval = FITSrec[data_row_index]['OBSFREQ']\n refpix = num_chan/2\n delta = self.bandwidth/num_chan\n self.logger.debug(\"add_data: loading DATA\")\n I, newrefval, newrefpix, newdelta = \\\n reduce_spectrum_channels(thisdata['I'], refval, refpix, delta,\n num_chan=num_Stokes_chan)\n Q, newrefval, newrefpix, newdelta = \\\n reduce_spectrum_channels(thisdata['Q'], refval, refpix, delta,\n num_chan=num_Stokes_chan)\n U, newrefval, newrefpix, newdelta = \\\n reduce_spectrum_channels(thisdata['U'], refval, refpix, delta,\n num_chan=num_Stokes_chan)\n V, newrefval, newrefpix, newdelta = \\\n reduce_spectrum_channels(thisdata['V'], refval, refpix, delta,\n num_chan=num_Stokes_chan)\n FITSrec[data_row_index]['DATA'][0, 0, 0,:] = I\n FITSrec[data_row_index]['DATA'][1, 0, 0,:] = Q\n FITSrec[data_row_index]['DATA'][2, 0, 0,:] = U\n FITSrec[data_row_index]['DATA'][3, 0, 0,:] = V\n FITSrec[data_row_index]['CRVAL1'] = newrefval + delta/2\n FITSrec[data_row_index]['CRPIX1'] = newrefpix\n FITSrec[data_row_index]['CDELT1'] = newdelta\n self.logger.info(\"add_data: finished row %d scan %d cycle %d\",\n data_row_index, FITSrec[data_row_index]['SCAN'],\n FITSrec[data_row_index]['CYCLE'])\n #data_row_index += 1\n # end subch loop\n # end scan loop\n if unique(bad_tones):\n self.exthead.add_comment(\"bad tones in scans %s\" % str(unique(bad_tones)))\n if have_tones:\n return FITSrec, toneFITSrec\n else:\n return FITSrec, None", "title": "" }, { "docid": "4a457b54b86fafe3a154df9ca7bdaf95", "score": "0.4413369", "text": "def ffcalcfftqual(a, freq=None):\r\n if freq==None: freq=32000\r\n fft=sc.fftpack.fft(a)\r\n corr=sc.fftpack.ifft(fft*fft.conjugate())\r\n #corr=corr[:(len(corr)/4)]\r\n dfff=np.diff(corr)\r\n dat=np.diff(np.where(dfff>0,1,0))\r\n if -1 not in list(dat): out=(float(freq),float(0))\r\n else:\r\n first=(((list(dat)).index(-1)))\r\n slope=(dfff[(first+1)]-dfff[first])\r\n out=(slope*first-dfff[first])/slope\r\n out=(freq/out,corr[first])\r\n return out", "title": "" }, { "docid": "0e350ed3df40e7169873001aae4ca169", "score": "0.4412964", "text": "def ffcalcfft(a, freq=None):\r\n if freq==None: freq=32000\r\n fft=sc.fftpack.fft(a)\r\n corr=sc.fftpack.ifft(fft*fft.conjugate())\r\n corr=corr[:(len(corr)/4)]\r\n dfff=np.diff(corr)\r\n dat=np.diff(np.where(dfff>0,1,0))\r\n if -1 not in list(dat): out=freq\r\n else:\r\n first=(((list(dat)).index(-1)))\r\n slope=(dfff[(first+1)]-dfff[first])\r\n out=(slope*first-dfff[first])/slope\r\n out=freq/out\r\n return out", "title": "" }, { "docid": "5afb89162e9c5b42e57cc528cbad20e3", "score": "0.44035128", "text": "def vocal_tract(formant_frequencies,f_sampling):\n global bw\n r = []\n theta = []\n ts = 1/f_sampling\n for i in formant_frequencies:\n r.append(np.exp(-pi*bw*ts)) #radius in z-plane\n theta.append(2*pi*i*ts) #angle in z-plane\n\n denom_coeffs = []\n num_coeffs = []\n convolved_a = 1\n for radius,angle in zip(r,theta):\n poles = [radius*exp(1j*angle),radius*exp(-1j*angle)]\n zeros = zeros_like(poles)\n b,a = zpk2tf(zeros,poles,k=1)\n num_coeffs.append(b)\n denom_coeffs.append(a)\n convolved_a = conv(convolved_a,a)\n\n denom_coeffs = zeros_like(convolved_a)\n denom_coeffs[0] = 1\n \n return denom_coeffs,convolved_a", "title": "" }, { "docid": "0d603a83e0c526491af56f740c59de9e", "score": "0.440345", "text": "def search_with_qffa(\n times,\n f0,\n f1,\n fdot=0,\n fddot=0,\n nbin=16,\n nprof=None,\n npfact=2,\n oversample=8,\n n=1,\n search_fdot=True,\n t0=None,\n t1=None,\n silent=False,\n):\n if nprof is None:\n # total_delta_phi = 2 == dnu * T\n # In a single sub interval\n # delta_phi = dnu * t\n # with t = T / nprof\n # so dnu T / nprof < 1 / nbin, and\n # nprof > total_delta_phi * nbin to get all the signal inside one bin\n # in a given sub-integration\n nprof = 4 * 2 * nbin * npfact\n\n times = copy.deepcopy(times)\n\n if t0 is None:\n t0 = times.min()\n if t1 is None:\n t1 = times.max()\n meantime = (t1 + t0) / 2\n times -= meantime\n\n maxerr = check_phase_error_after_casting_to_double(np.max(times), f1, fdot)\n if maxerr > 1 / nbin / 10:\n warnings.warn(\n f\"Maximum error on the phase expected when casting to \" f\"double: {maxerr}\"\n )\n warnings.warn(\n \"Casting to double produces non-negligible phase errors. \"\n \"Please use shorter light curves.\",\n AstropyUserWarning,\n )\n\n times = times.astype(np.double)\n\n length = t1 - t0\n\n frequency = (f0 + f1) / 2\n\n # Step: npfact * 1 / T\n\n step = 4 * npfact / length\n\n niter = int(np.rint((f1 - f0) / step)) + 2\n\n allvalues = list(range(-(niter // 2), niter // 2))\n if allvalues == []:\n allvalues = [0]\n\n all_fgrid = []\n all_fdotgrid = []\n all_stats = []\n\n local_show_progress = show_progress\n if silent:\n\n def local_show_progress(x):\n return x\n\n for ii, i in enumerate(local_show_progress(allvalues)):\n offset = step * i\n fdot_offset = 0\n\n mean_f = np.double(frequency + offset + 0.12 * step)\n mean_fdot = np.double(fdot + fdot_offset)\n mean_fddot = np.double(fddot)\n fgrid, fdotgrid, stats = search_with_qffa_step(\n times,\n mean_f,\n mean_fdot=mean_fdot,\n mean_fddot=mean_fddot,\n nbin=nbin,\n nprof=nprof,\n npfact=npfact,\n oversample=oversample,\n n=n,\n search_fdot=search_fdot,\n )\n\n if all_fgrid is None:\n all_fgrid = fgrid\n all_fdotgrid = fdotgrid\n all_stats = stats\n else:\n all_fgrid.append(fgrid)\n all_fdotgrid.append(fdotgrid)\n all_stats.append(stats)\n all_fgrid = np.vstack(all_fgrid)\n all_fdotgrid = np.vstack(all_fdotgrid)\n all_stats = np.vstack(all_stats)\n\n step = np.median(np.diff(all_fgrid[:, 0]))\n fdotstep = np.median(np.diff(all_fdotgrid[0]))\n if search_fdot:\n return (\n all_fgrid.T,\n all_fdotgrid.T,\n all_stats.T,\n step,\n fdotstep,\n length,\n )\n else:\n return all_fgrid.T[0], all_stats.T[0], step, length", "title": "" }, { "docid": "59fe27303525284dad2ecfcfa36fd13c", "score": "0.43991393", "text": "def cwt_tc_frequency(data, wavelet_func, scales, dt=1., axis=-1):\n # make sure that parmeters are arrays\n data = jnp.asarray(data)\n scales = jnp.asarray(scales)\n # number of data points for each data vector\n n = data.shape[axis]\n # next power of 2\n pn = next_pow_of_2(n)\n # compute the FFT of the data\n data_fft = jfft.fft(data, n=pn, axis=axis)\n # angular frequencies at which the Wavelet basis will be computed\n wk = jfft.fftfreq(pn, d=dt) * 2 * jnp.pi\n # sample wavelet at all the scales and normalise\n norm = ( 1 / dt) ** .5\n wavelet_freq = norm * wavelet_func(wk, scales)\n # take the conjugate\n wavelet_freq = jnp.conj(wavelet_freq)\n # Convert negative axis. Add one to account for\n # inclusion of scales axis above.\n axis = (axis % data.ndim) + 1\n # perform the convolution in frequency space\n slices = [slice(None)] + [None for _ in data.shape]\n slices[axis] = slice(None)\n slices = tuple(slices)\n out = jfft.ifft(data_fft[None] * wavelet_freq[slices],\n n=pn, axis=axis)\n slices = [slice(None) for _ in out.shape]\n slices[axis] = slice(None, n)\n slices = tuple(slices)\n if data.ndim == 1:\n return out[slices].squeeze()\n else:\n return out[slices]", "title": "" }, { "docid": "fdf12a0f35f170a6a2f193545a7493f1", "score": "0.43986154", "text": "def uhf_fixed_occ(atoms,occa, occb,**kwargs):\n\n from biorthogonal import biorthogonalize,pad_out\n\n ConvCriteria = kwargs.get('ConvCriteria',settings.ConvergenceCriteria)\n MaxIter = kwargs.get('MaxIter',settings.MaxIters)\n DoAveraging = kwargs.get('DoAveraging',settings.Averaging)\n averaging = kwargs.get('averaging',settings.MixingFraction)\n ETemp = kwargs.get('ETemp',settings.ElectronTemperature)\n\n bfs = getbasis(atoms,**kwargs)\n\n S,h,Ints = getints(bfs,atoms,**kwargs)\n\n nel = atoms.get_nel()\n\n nalpha,nbeta = atoms.get_alphabeta() #pass in kwargs for multiplicity\n\n orbsa = kwargs.get('orbsa')\n orbsb = kwargs.get('orbsb')\n if (orbsa == None or orbsb == None):\n orbe,orbs = geigh(h,S)\n orbea = orbeb = orbe\n orbsa = orbsb = orbs\n \n #print \"A Trial Orbital Energies:\\n\", orbea\n\n print \"A Trial Orbitals:\\n\"\n pad_out(orbsa)\n\n #print \"B Trial Orbital Energies:\\n\",orbeb\n\n print \"B Trial Orbitals:\\n\"\n pad_out(orbsb)\n \n enuke = atoms.get_enuke()\n eold = 0.\n\n for i in xrange(MaxIter):\n print \"SCF Iteration:\",i,\"Starting Energy:\",eold\n #save the starting orbitals\n oldorbs_a=orbsa\n oldorbs_b=orbsb\n\n Da = mk_auger_dens(orbsa,occa)\n Db = mk_auger_dens(orbsb,occb)\n #Da_std = mkdens(orbsa,0,nalpha)\n #Db_std = mkdens(orbsb,0,nbeta)\n #pad_out(Da - Da_std ) #use to test mk_aug_dens with ground state occupations\n #pad_out(Db - Db_std )\n \n \n Ja = getJ(Ints,Da)\n Jb = getJ(Ints,Db)\n Ka = getK(Ints,Da)\n Kb = getK(Ints,Db)\n Fa = h+Ja+Jb-Ka\n Fb = h+Ja+Jb-Kb\n\n orbea,orbsa = geigh(Fa,S)\n orbeb,orbsb = geigh(Fb,S)\n \n #save the new orbitals\n neworbs_a=orbsa\n neworbs_b=orbsb\n \n #now we biorthogonalize the new orbitals to the old ones\n #to setup occupation arrays for the next scf cycle\n orbsa = biorthogonalize(neworbs_a,oldorbs_a,S,nalpha,occa)\n orbsb = biorthogonalize(neworbs_b,oldorbs_b,S,nbeta,occb)\n \n energya = get_energy(h,Fa,Da)\n energyb = get_energy(h,Fb,Db)\n energy = (energya+energyb)/2+enuke\n Dab = Da+Db\n Eone = trace2(Dab,h)\n Ej = 0.5*trace2(Dab,Ja+Jb)\n Ek = -0.5*(trace2(Da,Ka)+trace2(Db,Kb))\n \n #print \"%d %f %f %f %f\" % (i,energy,Eone,Ej,Ek)\n \n logging.debug(\"%d %f %f %f %f\" % (i,energy,Eone,Ej,Ek))\n if abs(energy-eold) < ConvCriteria: break\n eold = energy\n if i==(MaxIter-1):\n print \"Warning: Reached maximum number of SCF cycles may want to rerun calculation with more SCF cycles\"\n logger.info(\"Final UHF energy for system %s is %f\" % (atoms.name,energy))\n return energy,(orbea,orbeb),(orbsa,orbsb)", "title": "" }, { "docid": "f0260561088515179bb58668290fc111", "score": "0.43964058", "text": "def freqresp(self, kv, wake_prop_settings=None):\n\n MS = self.MS\n K = self.K\n K_star = self.K_star\n\n Nk = len(kv)\n kvdt = kv * self.dt\n zv = np.cos(kvdt) + 1.j * np.sin(kvdt)\n Yfreq = np.empty((self.outputs, self.inputs, Nk,), dtype=np.complex_)\n\n ### loop frequencies\n for kk in range(Nk):\n\n ### build Cw complex\n Cw_cpx = self.get_Cw_cpx(zv[kk], settings=wake_prop_settings)\n\n # get bound state freq response\n if self.remove_predictor:\n Ygamma = np.linalg.solve(\n self.A0 + libsp.dot(\n self.A0W, Cw_cpx, type_out=libsp.csc_matrix), self.Bss)\n else:\n Ygamma = zv[kk] ** (-1) * \\\n np.linalg.solve(\n self.A0 + libsp.dot(\n self.A0W, Cw_cpx, type_out=libsp.csc_matrix), self.Bss)\n Ygamma_star = Cw_cpx.dot(Ygamma)\n\n # determine factor for delta of bound circulation\n if self.integr_order == 0:\n dfact = (1.j * kv[kk]) * self.dt\n elif self.integr_order == 1:\n dfact = (1. - 1. / zv[kk])\n elif self.integr_order == 2:\n dfact = .5 * (3. - 4. / zv[kk] + 1. / zv[kk] ** 2)\n else:\n raise NameError('Specify valid integration order')\n\n Yfreq[:, :, kk] = np.dot(self.Css[:, :K], Ygamma) + \\\n np.dot(self.Css[:, K:K + K_star], Ygamma_star) + \\\n np.dot(self.Css[:, -K:], dfact * Ygamma) + \\\n self.Dss\n\n return Yfreq", "title": "" }, { "docid": "bc7fd6db4c6ad31914d1cbc61b3a44f0", "score": "0.43925706", "text": "def get_frequency(self, index):\n return self.fmin + self.df * index", "title": "" }, { "docid": "0818ba6a68c7b4ef441852a078be2fe1", "score": "0.43898627", "text": "def ffprofile_corr(a, Fs = 32000, window=512, energy_per_sample_thresh = 0.02, plot = False):\r\n a = np.divide(a, np.max(np.abs(a)))\r\n ff_est_i = ffcalc_jk(a)[0]\r\n ff_est = float(ff_est_i)\r\n f_range = ff_est * 0.1\r\n f_contour = np.zeros(len(a))\r\n energy = np.zeros(len(a))\r\n for x in range(len(a)):\r\n data = ffcalc_jk(a[x:x+window-1], fmin = ff_est - f_range, fmax = ff_est + f_range)\r\n f_contour[x] = data[0]\r\n energy[x] = data[1] / window\r\n if energy[x] > energy_per_sample_thresh:\r\n ff_est = f_contour[x]\r\n f_range = 100\r\n else:\r\n ff_est = float(ff_est_i)\r\n f_range = ff_est * 0.1\r\n\r\n t0= np.divide(np.arange(0, len(f_contour), dtype = float), Fs)\r\n idxs = np.arange(0,len(f_contour))\r\n idx_i = idxs[energy>energy_per_sample_thresh][0]\r\n idx_t = idxs[energy>energy_per_sample_thresh][-1]\r\n f_contour = np.interp(t0[idx_i:idx_t], t0[energy>energy_per_sample_thresh], f_contour[energy>energy_per_sample_thresh])\r\n t = t0[idx_i:idx_t]\r\n\r\n if plot:\r\n fig = plt.figure()\r\n ax = fig.add_subplot(2,1,1)\r\n ax.specgram(a, Fs=Fs)\r\n ax.plot(t, f_contour,'k')\r\n plt.xlim([0, t[-1]])\r\n ax = fig.add_subplot(2,1,2)\r\n plt.plot(t0, energy)\r\n plt.xlim([0, t[-1]])\r\n # ax.plot([t[0], t[-1]], [ave_ff_freq, ave_ff_freq],'-b')\r\n # ax.plot([t[0], t[-1]], [ave_ff_freq*float(1+float(percent_boundry)/100), ave_ff_freq*float(1+float(percent_boundry)/100)],'b')\r\n # ax.plot([t[0], t[-1]], [ave_ff_freq*float(1-float(percent_boundry)/100), ave_ff_freq*float(1-float(percent_boundry)/100)],'b')\r\n plt.show()\r\n return (t, f_contour)", "title": "" }, { "docid": "3ab5b9b6ee9a10968e42def3b514f824", "score": "0.43841848", "text": "def featurize(self, struct, idx):\n cevals = []\n self.lgf.setup_structure(structure=struct)\n se = self.lgf.compute_structure_environments(\n only_indices=[idx],\n maximum_distance_factor=self.max_dist_fac)\n for ce in self.cetypes:\n try:\n tmp = se.get_csms(idx, ce)\n tmp = tmp[0]['symmetry_measure'] if len(tmp) != 0 \\\n else self.max_csm\n tmp = tmp if tmp < self.max_csm else self.max_csm\n cevals.append(1 - tmp / self.max_csm)\n except IndexError:\n cevals.append(0)\n return np.array(cevals)", "title": "" }, { "docid": "fd349de1cc4bd651f6258ab3b8949769", "score": "0.43743673", "text": "def fd_waveform(\n samples, approximant, delta_f, f_low, f_high, f_ref=20., project=None,\n ind=0, longAscNodes=0., eccentricity=0., LAL_parameters=None,\n mode_array=None, pycbc=False, flen=None\n):\n from gwpy.frequencyseries import FrequencySeries\n\n waveform_args, _samples = _waveform_args(\n samples, f_ref=f_ref, ind=ind, longAscNodes=longAscNodes,\n eccentricity=eccentricity\n )\n approx = _lal_approximant_from_string(approximant)\n if mode_array is not None:\n LAL_parameters = _insert_mode_array(\n mode_array, LAL_parameters=LAL_parameters\n )\n hp, hc = lalsim.SimInspiralChooseFDWaveform(\n *waveform_args, delta_f, f_low, f_high, f_ref, LAL_parameters, approx\n )\n hp = FrequencySeries(hp.data.data, df=hp.deltaF, f0=0.)\n hc = FrequencySeries(hc.data.data, df=hc.deltaF, f0=0.)\n if pycbc:\n hp, hc = hp.to_pycbc(), hc.to_pycbc()\n if flen is not None:\n hp.resize(flen)\n hc.resize(flen)\n if project is None:\n return {\"h_plus\": hp, \"h_cross\": hc}\n ht = _project_waveform(\n project, hp, hc, _samples[\"ra\"], _samples[\"dec\"], _samples[\"psi\"],\n _samples[\"geocent_time\"]\n )\n return ht", "title": "" }, { "docid": "281cf43ecd964d4dcb8985ff77da2dfb", "score": "0.43460166", "text": "def get_calfact(dataI, dataQ, A_masq, fmod=1, mod_factor=0.5, wsample=[], docalib=True):\n ndet, nint, nptint = dataI.shape\n\n calfact = np.zeros((ndet, nint), np.float32)\n Icc, Qcc = np.zeros((ndet, nint), np.float32), np.zeros((ndet, nint), np.float32)\n P0 = np.zeros((ndet, nint), np.float32)\n R0 = np.zeros((ndet, nint), np.float32)\n interferograms = np.zeros_like(dataI)\n\n for iint in range(nint): # single interferogram\n\n Icurrent = dataI[:, iint, :]\n Qcurrent = dataQ[:, iint, :]\n A_masqcurrent = A_masq[iint, :]\n\n l1 = A_masqcurrent == 3 # A_masq is the flag for calibration, values:-> 3: lower data\n l2 = A_masqcurrent == 1 # 1: higher data\n l3 = A_masqcurrent == 0 # 0: normal data\n\n # Make sure we have no issues with l1 and l2\n l1 = binary_erosion(l1, iterations=2)\n l2 = binary_erosion(l2, iterations=2)\n\n # remove first point in l3\n l3[:6] = False\n\n # Check for cases with missing data in one of the modulation (all flagged)\n if np.all(~l1) or np.all(~l2) or np.all(~l3):\n get_calfact._log.warning(\"Interferogram {} could not be calibrated\".format(iint))\n continue\n\n x1 = np.median(Icurrent[:, l1], axis=1)\n y1 = np.median(Qcurrent[:, l1], axis=1)\n x2 = np.median(Icurrent[:, l2], axis=1)\n y2 = np.median(Qcurrent[:, l2], axis=1)\n x3 = np.median(Icurrent[:, l3], axis=1)\n y3 = np.median(Qcurrent[:, l3], axis=1)\n\n # Fit circle\n den = 2.0 * (x1 * (y2 - y3) + x2 * (y3 - y1) + x3 * (y1 - y2))\n Ic = (x1 * x1 + y1 * y1) * (y2 - y3) + (x2 * x2 + y2 * y2) * (y3 - y1) + (x3 * x3 + y3 * y3) * (y1 - y2)\n Qc = (x1 * x1 + y1 * y1) * (x3 - x2) + (x2 * x2 + y2 * y2) * (x1 - x3) + (x3 * x3 + y3 * y3) * (x2 - x1)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n Ic /= den\n Qc /= den\n\n # Filter\n nfilt = 9\n if iint < nfilt:\n epsi = np.zeros(ndet) + 1.0 / np.double(iint + 1)\n else:\n epsi = np.zeros(ndet) + np.double(1.0 / nfilt)\n # epsi=1.0\n valIQ = (Ic * Ic) + (Qc * Qc)\n # CHECK : This will take the last element for the first interferogram\n dist = (Icc[:, iint - 1] - Ic) * (Icc[:, iint - 1] - Ic) + (Qcc[:, iint - 1] - Qc) * (Qcc[:, iint - 1] - Qc)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n epsi[dist > 0.05 * valIQ] = 1.0\n\n # TODO: This could be vectorized\n if iint > 0:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n Ic = Ic * epsi + (1 - epsi) * Icc[:, iint - 1]\n Qc = Qc * epsi + (1 - epsi) * Qcc[:, iint - 1]\n\n Icc[:, iint] = Ic\n Qcc[:, iint] = Qc\n\n # Comupute circle radius and zero angle\n # TODO: Not used ??\n # rc = np.sqrt((x3 - Ic) * (x3 - Ic) + (y3 - Qc) * (y3 - Qc))\n P0[:, iint] = np.arctan2(Ic, Qc)\n\n # compute angle difference between two modulation points\n r0 = np.arctan2(Ic - x3, Qc - y3)\n R0[:, iint] = r0\n\n r1 = np.arctan2(Ic - x1, Qc - y1)\n r2 = np.arctan2(Ic - x2, Qc - y2)\n diffangle = angle0(r2 - r1)\n\n # Get calibration factor\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n diffangle[np.abs(diffangle) < 0.001] = 1.0\n\n calcoeff = 2 / diffangle\n calfact[:, iint] = calcoeff * fmod * mod_factor\n\n # r = np.arctan2(Icc[:,iint]-Icurrent,np.transpose(Qcc[:,iint])-Qcurrent)\n r = np.arctan2(Icc[:, iint][:, np.newaxis] - Icurrent, Qcc[:, iint][:, np.newaxis] - Qcurrent)\n\n # r = angleto0(np.arctan2((Icc[:,iint]-Icurrent.transpose()),\\\n # (Qcc[:,iint]-Qcurrent.transpose())) - r0).transpose()\n ra = angle0(r - r0[:, np.newaxis])\n\n if docalib:\n interferograms[:, iint, :] = calfact[:, iint][:, np.newaxis] * ra\n else:\n interferograms[:, iint, :] = ra\n\n # selection mask for normal data\n mask = A_masq == 0\n mask[:, :6] = False\n interferograms = np.ma.array(interferograms, mask=np.tile(~mask.flatten(), ndet), fill_value=0)\n\n # Mask nans if present\n nan_itg = np.isnan(interferograms.data)\n if np.any(nan_itg):\n interferograms.mask |= nan_itg\n\n output = {\n \"Icc\": Icc,\n \"Qcc\": Qcc,\n \"P0\": P0,\n \"R0\": R0,\n \"calfact\": calfact,\n \"continuum\": compute_continuum(R0, P0, calfact),\n \"interferograms\": interferograms,\n }\n\n return output", "title": "" }, { "docid": "cb86b2c4e3368a168cde66742f6c39b6", "score": "0.43405655", "text": "def ssfrf(A, B, C, D, omega_low, omega_high, in_index, out_index,\n num_freqs=1000):\n # A, B, C, D = ctrl.ssdata(sys)\n if 0 < in_index < (B.shape[1] + 1) and 0 < out_index < (C.shape[0] + 1):\n sa = A.shape[0]\n omega = np.linspace(omega_low, omega_high, num_freqs)\n H = omega * 1j\n i = 0\n for i, w in enumerate(omega):\n H[i] = ([email protected](w * 1j * np.eye(sa) - A, B) + D)[out_index - 1,\n in_index - 1]\n else:\n raise ValueError(\n 'Input {} or output {} infeasible.'.format(in_index, out_index))\n return omega.reshape(1, -1), H.reshape(1, -1)", "title": "" }, { "docid": "f90919497b5da89fe1b5282011dd412f", "score": "0.43272364", "text": "def cqt_algorithm(signal:np.ndarray, sampling_f:int=48_000,\n f_range:list = (24_000, 20), tones=[]):\n if isinstance(tones, np.ndarray):\n f_range = (tones[-1], tones[0])\n bins_per_octave = 30\n else:\n bins_per_octave = 45\n n_octaves = int(np.log2(f_range[0]/f_range[1]))\n n_bins = bins_per_octave*n_octaves\n hop_length = 2**(n_octaves-1)\n spec_2d = np.abs(librosa.cqt(signal, sampling_f, hop_length = hop_length,\n fmin = f_range[1], n_bins = n_bins,\n bins_per_octave = bins_per_octave,\n filter_scale = 0.8))\n freqs = librosa.cqt_frequencies(fmin = 20, n_bins = n_bins,\n bins_per_octave = bins_per_octave)\n times = hop_length/sampling_f*np.arange(len(spec_2d[0]))\n return freqs, times, spec_2d", "title": "" }, { "docid": "dfeb03690a6a351a46dde6c9af6a4be9", "score": "0.4323965", "text": "def aftan(self, prephdir, sps = 1., channel = 'Z', outdir = None, inftan = pyaftan.InputFtanParam(),\\\n basic1 = True, basic2 = True, pmf1 = True, pmf2 = True, verbose = False, f77 = True, pfx = 'DISP',\n walltimeinhours = None, walltimetol = 2000., startind = 1):\n print ('[%s] [AFTAN] start aftan analysis' %datetime.now().isoformat().split('.')[0])\n if walltimeinhours != None:\n walltime = walltimeinhours*3600.\n else:\n walltime = 1e10\n stime4compute = timeit.default_timer()\n try:\n print (self.cat)\n except AttributeError:\n self.copy_catalog()\n if len(self.cat) >= 10000:\n raise ValueError ('number of events is larger than 10000')\n # Loop over stations\n Nsta = len(self.waveforms.list())\n ista = startind-1\n for staid in (self.waveforms.list())[(startind-1):]:\n etime4compute = timeit.default_timer()\n if etime4compute - stime4compute > walltime - walltimetol:\n print ('================================== End computation due to walltime ======================================')\n print ('start from '+str(ista+1)+' next run!')\n break\n netcode, stacode = staid.split('.')\n ista += 1\n print ('[%s] [AFTAN] Station ID: %s %d/%d' %(datetime.now().isoformat().split('.')[0], \\\n staid, ista, Nsta))\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n tmppos = self.waveforms[staid].coordinates\n stla = tmppos['latitude']\n stlo = tmppos['longitude']\n stz = tmppos['elevation_in_m']\n outstr = ''\n taglst = self.waveforms[staid].get_waveform_tags()\n if len(taglst) == 0:\n print ('!!! No data for station: '+ staid)\n continue\n # Loop over tags(events)\n ievent = 0\n Ndata = 0\n for event in self.cat:\n ievent += 1\n evid = 'E%04d' % (ievent) # evid, \n pmag = event.preferred_magnitude()\n magnitude = pmag.mag\n Mtype = pmag.magnitude_type\n event_descrip = event.event_descriptions[0].text+', '+event.event_descriptions[0].type\n porigin = event.preferred_origin()\n otime = porigin.time\n timestr = otime.isoformat()\n evlo = porigin.longitude\n evla = porigin.latitude\n try:\n evdp = porigin.depth/1000.\n except:\n continue\n event_id = event.resource_id.id.split('=')[-1]\n timestr = otime.isoformat()\n oyear = otime.year\n omonth = otime.month\n oday = otime.day\n ohour = otime.hour\n omin = otime.minute\n osec = otime.second\n label = '%d_%d_%d_%d_%d_%d' %(oyear, omonth, oday, ohour, omin, osec)\n tag = 'surf_' + label\n if not tag in taglst:\n continue\n staid_aux = netcode+'_'+stacode+'_'+channel\n try:\n if tag in self.auxiliary_data['DISPbasic1'].list():\n if staid_aux in self.auxiliary_data['DISPbasic1'][tag].list():\n continue\n except:\n pass\n dist, az, baz = obspy.geodetics.gps2dist_azimuth(evla, evlo, stla, stlo) # distance is in m\n dist = dist/1000.\n if baz < 0:\n baz += 360.\n #-------------------\n # get waveform data\n #-------------------\n try:\n inST = self.waveforms[staid][tag].select(component = channel)\n except KeyError:\n continue\n if len(inST) == 0:\n continue\n else:\n if len(inST) > 1:\n print ('!!! WARNING: more traces stored: '+tag+' station: ' + staid )\n tr = inST[0]\n # resample\n target_dt = 1./sps\n dt = tr.stats.delta\n if abs(dt - target_dt)> (min(dt, target_dt))/100.:\n factor = np.round(target_dt/dt)\n if abs(factor*dt - target_dt) < min(dt, target_dt/1000.):\n dt = target_dt/factor\n tr.stats.delta = dt\n else:\n print(target_dt, dt)\n raise ValueError('CHECK!' + staid)\n tr.filter(type = 'lowpass', freq = sps/2., zerophase = True) # prefilter\n tr.decimate(factor = int(factor), no_filter = True)\n # # # try:\n # # # tr.filter(type = 'lowpass', freq = sps/2., zerophase = True) # prefilter\n # # # tr.decimate(factor = int(factor), no_filter = True)\n # # # except:\n # # # continue\n else:\n tr.stats.delta = target_dt\n stime = tr.stats.starttime\n etime = tr.stats.endtime\n tr.stats.sac = {}\n tr.stats.sac['dist']= dist\n tr.stats.sac['b'] = stime - otime\n tr.stats.sac['e'] = etime - otime\n aftanTr = pyaftan.aftantrace(tr.data, tr.stats)\n phvelname = prephdir + \"/%s.%s.pre\" %(evid, staid)\n if not os.path.isfile(phvelname):\n print ('*** WARNING: '+ phvelname+' not exists!')\n continue\n if f77:\n aftanTr.aftanf77(pmf=inftan.pmf, piover4=inftan.piover4, vmin=inftan.vmin, vmax=inftan.vmax, tmin=inftan.tmin, tmax=inftan.tmax,\n tresh=inftan.tresh, ffact=inftan.ffact, taperl=inftan.taperl, snr=inftan.snr, fmatch=inftan.fmatch, nfin=inftan.nfin,\n npoints=inftan.npoints, perc=inftan.perc, phvelname=phvelname)\n else:\n aftanTr.aftan(pmf=inftan.pmf, piover4=inftan.piover4, vmin=inftan.vmin, vmax=inftan.vmax, tmin=inftan.tmin, tmax=inftan.tmax,\n tresh=inftan.tresh, ffact=inftan.ffact, taperl=inftan.taperl, snr=inftan.snr, fmatch=inftan.fmatch, nfin=inftan.nfin,\n npoints=inftan.npoints, perc=inftan.perc, phvelname=phvelname)\n aftanTr.get_snr(ffact = inftan.ffact) # SNR analysis\n staid_aux = tag+'/'+netcode+'_'+stacode+'_'+channel\n #-----------------------------------\n # save aftan results to ASDF dataset\n #-----------------------------------\n if basic1:\n parameters = {'Tc': 0, 'To': 1, 'U': 2, 'C': 3, 'ampdb': 4, 'dis': 5, 'snrdb': 6, 'mhw': 7, 'amp': 8, 'Np': aftanTr.ftanparam.nfout1_1}\n self.add_auxiliary_data(data = aftanTr.ftanparam.arr1_1, data_type = 'DISPbasic1',\\\n path = staid_aux, parameters = parameters)\n if basic2:\n parameters = {'Tc': 0, 'To': 1, 'U': 2, 'C': 3, 'ampdb': 4, 'snrdb': 5, 'mhw': 6, 'amp': 7, 'Np': aftanTr.ftanparam.nfout2_1}\n self.add_auxiliary_data(data = aftanTr.ftanparam.arr2_1, data_type = 'DISPbasic2',\\\n path = staid_aux, parameters = parameters)\n if inftan.pmf:\n if pmf1:\n parameters = {'Tc': 0, 'To': 1, 'U': 2, 'C': 3, 'ampdb': 4, 'dis': 5, 'snrdb': 6, 'mhw': 7, 'amp': 8, 'Np': aftanTr.ftanparam.nfout1_2}\n self.add_auxiliary_data(data = aftanTr.ftanparam.arr1_2, data_type = 'DISPpmf1',\\\n path = staid_aux, parameters = parameters)\n if pmf2:\n parameters = {'Tc': 0, 'To': 1, 'U': 2, 'C': 3, 'ampdb': 4, 'snrdb': 5, 'mhw': 6, 'amp': 7, 'snr':8, 'Np': aftanTr.ftanparam.nfout2_2}\n self.add_auxiliary_data(data = aftanTr.ftanparam.arr2_2, data_type = 'DISPpmf2',\\\n path = staid_aux, parameters = parameters)\n if outdir is not None:\n if not os.path.isdir(outdir+'/'+pfx+'/'+tag):\n os.makedirs(outdir+'/'+pfx+'/'+tag)\n foutPR = outdir+'/'+pfx+'/'+tag+'/'+ staid+'_'+channel+'.SAC'\n aftanTr.ftanparam.writeDISP(foutPR)\n Ndata += 1\n outstr += otime.date.isoformat()\n outstr += ' '\n print('--- %4d traces processed' %Ndata)\n if verbose:\n print('EVENT DATE: '+outstr)\n print('-----------------------------------------------------------------------------------------------------------')\n print ('[%s] [AFTAN] all done' %datetime.now().isoformat().split('.')[0])\n return", "title": "" }, { "docid": "82bd0f8ea17433dc7b8374189103d1b7", "score": "0.43182787", "text": "def _rain_coeff(freqHz: float, polarization: Union[str, float], elevation: float):\n freqHz = np.asarray(freqHz)\n assert (\n (1e9 <= freqHz) & (freqHz < 1e16)\n ).all(), \"Model validity bounds: 1-1000 GHz\" # type: ignore\n\n if polarization == \"v\":\n polarization = 90.0\n elif polarization == \"h\":\n polarization = 0.0\n elif isinstance(polarization, (int, float)) and 0.0 <= polarization <= 90.0:\n elevation = np.radians(elevation)\n polarization = np.radians(elevation)\n else:\n raise ValueError(f\"Unknown polarization {polarization}\")\n\n if np.isclose(polarization, 0.0):\n # Table 1\n ak = (-5.33980, -0.35351, -0.23789, -0.94158)\n bk = (-0.10008, 1.26970, 0.86036, 0.64552)\n ck = (1.13098, 0.45400, 0.15354, 0.16817)\n mk = -0.18961\n Ck = 0.71147\n\n # Table 3\n aa = (-0.14318, 0.29591, 0.32177, -5.37610, 16.1721)\n ba = (1.82442, 0.77564, 0.63773, -0.96230, -3.29980)\n ca = (-0.55187, 0.19822, 0.13164, 1.47828, 3.43990)\n ma = 0.67849\n Ca = -1.95537\n elif np.isclose(polarization, 90.0):\n # Table 2\n ak = (-3.80595, -3.44965, -0.39902, 0.50167)\n bk = (0.56934, -0.22911, 0.73042, 1.07319)\n ck = (0.81061, 0.51059, 0.11899, 0.27195)\n mk = -0.16398\n Ck = 0.63297\n\n # Table 4\n aa = (-0.07771, 0.56727, -0.20238, -48.2991, 48.5833)\n ba = (2.33840, 0.95545, 1.14520, 0.791669, 0.791459)\n ca = (-0.76284, 0.54039, 0.26809, 0.116226, 0.116479)\n ma = -0.053739\n Ca = 0.83433\n else:\n # %% elliptical polarization\n av, kv = _rain_coeff(freqHz, \"v\", elevation)\n ah, kh = _rain_coeff(freqHz, \"h\", elevation)\n\n assert isinstance(polarization, (float, int))\n # Equation 4\n k = (kh + kv + (kh - kv) * np.cos(elevation) ** 2 * np.cos(2.0 * polarization)) / 2.0\n # Equation 5\n a = (\n kh * ah\n + kv * av\n + (kh * ah - kv * av) * np.cos(elevation) ** 2 * np.cos(2 * polarization)\n ) / (2.0 * k)\n\n return a, k\n # %%\n logF = np.log10(freqHz / 1e9)\n # %% compute k (Equation 2)\n logk = mk * logF + Ck\n for j in range(4):\n logk += ak[j] * np.exp(-((logF - bk[j]) / ck[j]) ** 2)\n\n k = 10.0 ** logk\n # %% compute alpha==a (Equation 3)\n a = ma * logF + Ca\n for j in range(5):\n a += aa[j] * np.exp(-((logF - ba[j]) / ca[j]) ** 2)\n\n return a, k", "title": "" }, { "docid": "9ac1013ef45a01eb02ad55a7f60cad55", "score": "0.43135294", "text": "def model_data(self,wav,dt,t0,minf,maxf,vel,ref,jf=1,nrmax=3,eps=0.,dtmax=5e-05,time=True,\n ntx=0,nty=0,px=0,py=0,nthrds=1,sverb=True,wverb=False) -> np.ndarray:\n # Save wavelet temporal parameters\n nt = wav.shape[0]; it0 = int(t0/dt)\n\n # Create the input frequency domain source and get original frequency axis\n self.__nwo,self.__ow,self.__dw,wfft = self.fft1(wav,dt,minf=minf,maxf=maxf)\n wfftd = wfft[::jf]\n self.__nwc = wfftd.shape[0] # Get the number of frequencies to compute\n self.__dwc = jf*self.__dw\n\n if(sverb or wverb): print(\"Frequency axis: nw=%d ow=%f dw=%f\"%(self.__nwc,self.__ow,self.__dwc))\n\n # Single square root object\n ssf = ssr3(self.__nx ,self.__ny,self.__nz , # Spatial Sizes\n self.__dx ,self.__dy,self.__dz , # Spatial Samplings\n self.__nwc,self.__ow,self.__dwc,eps, # Frequency axis\n ntx,nty,px,py, # Taper and padding\n dtmax,nrmax,nthrds) # Reference velocities\n\n # Compute slowness and reference slownesses\n slo = 1/vel\n ssf.set_slows(slo)\n\n # Allocate output data (surface wavefield)\n datw = np.zeros([self.__nexp,self.__nwc,self.__ny,self.__nx],dtype='complex64')\n\n # Allocate the source for one shot\n sou = np.zeros([self.__nwc,self.__ny,self.__nx],dtype='complex64')\n\n # Loop over sources\n k = 0\n for icrd in progressbar(self.__scoords,\"nexp:\",verb=sverb):\n # Get the source coordinates\n sy = icrd[0]; sx = icrd[1]\n # Create the source for this shot\n sou[:] = 0.0\n sou[:,sy,sx] = wfftd[:]\n # Downward continuation\n ssf.modallw(ref,sou,datw[k],wverb)\n k += 1\n\n # Reshape output data\n datwr = datw.reshape([self.__nsy,self.__nsx,self.__nwc,self.__ny,self.__nx])\n\n if(time):\n # Inverse fourier transform\n datt = self.data_f2t(datwr,self.__nwo,self.__ow,self.__dwc,nt,it0)\n return datt\n else:\n return datwr", "title": "" }, { "docid": "2f00662182a94cc23b9d5601baf856af", "score": "0.4312005", "text": "def wavecalib(band,profile,method=True):\n band=band[0:4]\n nw=profile.shape[0]\n \n if method:\n if band == '6562':\n line=np.array([6561.097,6564.206])\n lamb0=6562.817\n dldw=0.019182\n elif band == '8542':\n line=np.array([8540.817,8546.222])\n lamb0=8542.090\n dldw=-0.026252\n elif band == '5890':\n line=np.array([5889.951,5892.898])\n lamb0=5889.9509\n dldw=0.016847\n elif band == '5434':\n line=np.array([5434.524,5436.596])\n lamb0=5434.5235\n dldw=-0.016847\n else:\n raise ValueError(\"The wavelength band value is not allowable.\\n\"+\n \"Please select the wavelenth \"+\n \"among '6562','8542','5890','5434'\")\n else:\n if band == '6562':\n line=np.array([6562.817,6559.580])\n lamb0=6562.817\n dldw=0.019182\n elif band == '8542':\n line=np.array([8542.089,8537.930])\n lamb0=8542.090\n dldw=-0.026252\n else:\n raise ValueError(\"The wavelength band value is not allowable.\\n\"\n \"Please select the wavelenth \"\n \"among '6562','8542','5890','5434'\")\n \n w=np.arange(nw)\n wl=np.zeros(2)\n wc=profile[20:nw-20].argmin()+20\n lamb=(w-wc)*dldw+lamb0\n \n for i in range(2):\n mask=np.abs(lamb-line[i]) <= 0.3\n wtmp=w[mask]\n ptmp=np.convolve(profile[mask],[-1,2,-1],'same')\n mask2=ptmp[1:-1].argmin()+1\n try:\n wtmp=wtmp[mask2-3:mask2+4]\n ptmp=ptmp[mask2-3:mask2+4]\n except:\n raise ValueError('Fail to wavelength calibration\\n'\n 'please change the method %s to %s' %(repr(method), repr(not method)))\n c=np.polyfit(wtmp-np.median(wtmp),ptmp,2)\n wl[i]=np.median(wtmp)-c[1]/(2*c[0]) #local minimum of the profile\n \n dldw=(line[1]-line[0])/(wl[1]-wl[0])\n wc=wl[0]-(line[0]-lamb0)/dldw\n wavelength=(w-wc)*dldw\n \n return wavelength", "title": "" }, { "docid": "548847585dfcef036e3f514ef256d480", "score": "0.4308171", "text": "def transform(self, waveform):\n # For now use NFFT of 256 to get appropriately wide freq bands, then\n # downsample in time\n Pxx, freqs, t = mlab.specgram(waveform, NFFT=self.NFFT, \n noverlap=self.noverlap, Fs=self.Fs, detrend=self.detrend, \n **self.specgram_kwargs)\n \n # Apply the normalization\n Pxx = Pxx * np.tile(freqs[:, np.newaxis] ** self.normalization, \n (1, Pxx.shape[1]))\n\n # strip out unused frequencies\n if self.max_freq is not None:\n Pxx = Pxx[freqs < self.max_freq, :]\n freqs = freqs[freqs < self.max_freq]\n if self.min_freq is not None:\n Pxx = Pxx[freqs > self.min_freq, :]\n freqs = freqs[freqs > self.min_freq]\n\n # Rebin in size \"downsample_ratio\". If last bin is not full, discard.\n Pxx_rebinned = []\n t_rebinned = []\n for n in range(0, len(t) - self.downsample_ratio + 1, \n self.downsample_ratio):\n Pxx_rebinned.append(\n np.median(Pxx[:, n:n+self.downsample_ratio], axis=1).flatten())\n t_rebinned.append(\n np.mean(t[n:n+self.downsample_ratio]))\n\n # Convert to arrays\n Pxx_rebinned_a = np.transpose(np.array(Pxx_rebinned))\n t_rebinned_a = np.array(t_rebinned)\n\n # log it and deal with infs\n Pxx_rebinned_a_log = -np.inf * np.ones_like(Pxx_rebinned_a)\n Pxx_rebinned_a_log[np.nonzero(Pxx_rebinned_a)] = \\\n 10 * np.log10(Pxx_rebinned_a[np.nonzero(Pxx_rebinned_a)])\n\n\n self.freqs = freqs\n self.t = t_rebinned_a\n return Pxx_rebinned_a_log, freqs, t_rebinned_a", "title": "" }, { "docid": "d9da4b5c1c0a89648f1d3f0c19c46b1a", "score": "0.43029743", "text": "def correlate(self, scan_bolus):\n retval = []\n if scan_bolus[0] == \"gps\":\n self.state = scan_bolus[1]\n elif scan_bolus[0] != \"gsm_modem_channel\":\n print(\"CgiCorrelator: Unsupported scan type: %s\" % scan_bolus[0])\n pass\n else:\n channel = scan_bolus[1]\n if channel[\"mcc\"] in [\"\", None]:\n return retval # We don't correlate incomplete CGIs...\n # Here's the feed comparison part:\n channel[\"feed_info\"] = self.get_feed_info(channel[\"mcc\"],\n channel[\"mnc\"],\n channel[\"lac\"],\n channel[\"cellid\"])\n chan, here = CgiCorrelator.build_chan_here(channel, self.state)\n channel[\"distance\"] = Utility.calculate_distance(chan[\"lon\"],\n chan[\"lat\"],\n here[\"lon\"],\n here[\"lat\"])\n # In the event we have incomplete information, bypass comparison.\n skip_feed_comparison = CgiCorrelator.should_skip_feed(channel)\n if skip_feed_comparison is False:\n if channel[\"mcc\"] not in self.mcc_list:\n msg = (\"MCC %s should not be observed by this sensor. ARFCN: %s CGI: %s Cell Priority: %s\" % # NOQA\n (channel[\"mcc\"], channel[\"arfcn\"], channel[\"cgi_str\"], channel[\"cell\"])) # NOQA\n retval.append(self.alerts.build_alert(130, msg))\n feed_comparison_results = self.feed_comparison(channel)\n for feed_alert in feed_comparison_results:\n retval.append(feed_alert)\n return retval", "title": "" }, { "docid": "b0a698f7a3bbfa0f4e3a6390525434e6", "score": "0.4297724", "text": "def frequenciesAnalyse(self, frequencies,nLoops=1.,intervalMode=-1,maxBit=1,fast=False,preAcquire=True):\n if preAcquire:\n print \"pre-acquire\"\n self.parent.AcquireTransfer(voltages=True, wantedChannels=3, transferAverage=False, getHorPos=True, getTimeStamps=False,nLoops=1)\n self.lenArray =4\n #Acquire\n #self.AcquireTransferV4(voltages=True, wantedChannels=15, transferAverage=False, getHorPos=True, getTimeStamps=False,nLoops=nLoops)\n self.parent.AcquireTransfer(voltages=True, wantedChannels=15, transferAverage=False, getHorPos=True, getTimeStamps=False,nLoops=nLoops)\n \n #Array for demodulation\n components=zeros((self.lenArray,2,self.parent.getLastWave() ['nbrSegmentArray'][0]))\n rotatedComponents=zeros((self.lenArray,2,self.parent.getLastWave()['nbrSegmentArray'][0]))\n \n #Array for clicks\n clicks=zeros((self.lenArray,self.parent.getLastWave()['nbrSegmentArray'][0]))\n \n #Array for probabilities\n probabilities=zeros(self.lenArray)\n\n results=dict()\n t0=time.time() \n #Demodulation\n for i in range(0,len(frequencies)):\n if frequencies[i][1]:\n\n index=frequencies[i][2]\n frequency=frequencies[i][0]\n\n hp=self.parent.getLastWave()['horPos']\n# hp[:]=0\n# (o1,o2,components[frequencies[i][2],0,:],components[frequencies[i][2],1,:],o3,o4)=self.demodulate2ChIQ(self.parent.getLastWave()['wave'][self.iChannel],self.parent.getLastWave()['wave'][self.qChannel],self.parent.getLastWave()['horPos'],self.parent.getLastWave()['nbrSegmentArray'][0],int(self.parent.getLastWave()['nbrSamplesPerSeg']),self.parent.getLastWave()['samplingTime'], frequency * 1E9,intervalMode=intervalMode)\n t=time.time()\n (o1,o2,components[frequencies[i][2],0,:],components[frequencies[i][2],1,:],o3,o4)=self.demodulate2ChIQ(self.parent.getLastWave()['wave'][self.iChannel],self.parent.getLastWave()['wave'][self.qChannel],hp,self.parent.getLastWave()['nbrSegmentArray'][0],int(self.parent.getLastWave()['nbrSamplesPerSeg']),self.parent.getLastWave()['samplingTime'], frequency*1e9 ,intervalMode=intervalMode)\n print \"demodulation time :\",(time.time()-t)\n t=time.time()\n length=c_long(self.parent.getLastWave()['nbrSegmentArray'][0])\n\n coX=zeros(self.parent.getLastWave()['nbrSegmentArray'][0],dtype=c_float)\n coY=zeros(self.parent.getLastWave()['nbrSegmentArray'][0],dtype=c_float)\n\n coX[:]=components[frequencies[i][2],0,:]\n coY[:]=components[frequencies[i][2],1,:]\n\n coU=zeros(self.parent.getLastWave()['nbrSegmentArray'][0],dtype=c_float)\n coV=zeros(self.parent.getLastWave()['nbrSegmentArray'][0],dtype=c_float)\n\n clicksf=zeros(self.parent.getLastWave()['nbrSegmentArray'][0],dtype=c_float)\n\n #co=zeros((2,self.parent.getLastWave()['nbrSegmentArray'][0]))\n #co[0,:]=components[frequencies[i][2],0,:]\n #co[1,:]=components[frequencies[i][2],1,:]\n\n #Rotate and count in C\n #(clicks[index,:],cop)=self.multiplexedBifurcationMapAdd(co,frequency) \n #Rotate and count in python\n #(clicks[index,:],cop)=self.multiplexedBifurcationMapAdd(co,frequency)\n\n [xOffset,yOffset,angle]=self.getCorrections(frequency)\n\n prob=zeros(1,dtype=c_float)\n\n\n self.easyMath.shiftRotate(coX.ctypes.data,coY.ctypes.data,coU.ctypes.data,coV.ctypes.data,c_float(xOffset),c_float(yOffset),c_float(angle),length)\n self.easyMath.aboveThreshold(coU.ctypes.data, clicksf.ctypes.data, c_float(0.), prob.ctypes.data, length)\n\n# cop=zeros((2,len(co[1])))\n# for h in range(0,len(co[1])):\n# cop[0,h]=(co[0,h]-self.Io)*cos(self.r)+(co[1,h]-self.Qo)*sin(self.r)\n# cop[1,h]=(co[0,h]-self.Io)*sin(self.r)-(co[1,h]-self.Qo)*cos(self.r)\n# if cop[0,h]>0:clicks[index,h]=1\n# rotatedComponents[frequencies[i][2],0,:]=cop[0,:]\n# rotatedComponents[frequencies[i][2],1,:]=cop[1,:]\n\n \n rotatedComponents[frequencies[i][2],0,:]=coU[:]\n rotatedComponents[frequencies[i][2],1,:]=coV[:]\n clicks[index,:]=clicksf[:]\n\n print \"rotation time :\"+str(time.time()-t)\n probabilities[frequencies[i][2]]=prob[0] \n results['b%i'%frequencies[i][2]]=probabilities[frequencies[i][2]] \n \n print 'total demodulation duration: %f sec' %(time.time()-t0)\n\n ##Calculate all probabilities\n\n nJBA=self.lenArray\n proba=zeros(2**nJBA,dtype=c_float)\n nbSegments=self.parent.getLastWave()['nbrSegmentArray'][0]\n t0=time.time()\n tempArray=zeros(nbSegments)\n for i in range(0,nJBA):\n for j in range(0,nbSegments):\n tempArray[j]+=clicks[i,j]*2**i\n for j in range(0,nbSegments):\n proba[tempArray[j]]+=1./nbSegments\n## self.easyMath.counter(clicks.ctypes.data,proba.ctypes.data, c_int(nJBA), c_long(nbSegments))\n probasInDict=dict()\n for v in range(0,2**nJBA):\n# probasInDict['p%i'%v]=mean([tempArray[i]==v else 0 for i in range(0,nbSegments) ])\n probasInDict['p%i'%v]=proba[v]\n\n\n\n if fast:\n return (results,dict(probasInDict.items()+results.items()))\n else:\n return (components,results,rotatedComponents,clicks,dict(probasInDict.items()+results.items()))", "title": "" }, { "docid": "caa177c8a640d3973a7ddd7e7e741451", "score": "0.4295183", "text": "def fft_sensors(data, fs):\n ff_data = []\n # FFT each sensor measurement\n for key in data:\n ff_freq = np.fft.rfftfreq(len(key), 1.0 / fs)\n ff_power = np.fft.rfft(key)\n ff_data.append(np.array([ff_freq, ff_power]))\n return ff_data", "title": "" }, { "docid": "4323ab40229180aa7fa51a0a7588c625", "score": "0.42950532", "text": "def pitch_tracking(self, original_audio, fs):\n self._original_audio = original_audio\n self._fs = fs\n if self.params.is_two_pass_nccf:\n (downsample_rate, downsampled_audio) = self._get_downsampled_audio(\n original_audio, fs,\n self.params.maximum_allowed_freq,\n self.params.is_run_filter)\n\n # calculate parameters for RAPT with input audio\n self._calculate_params(original_audio, fs, downsampled_audio, downsample_rate)\n\n # get F0 candidates using nccf\n nccf_results = self._run_nccf(original_audio, fs, downsampled_audio, downsample_rate)\n else:\n self._calculate_params(original_audio, fs)\n nccf_results = self._run_nccf(original_audio, fs)\n \n # dynamic programming - determine voicing state at each period candidate\n freq_estimate = self._get_freq_estimate(nccf_results[0], fs)\n\n # filter out high freq points\n for i, item in enumerate(freq_estimate):\n if item > 500.0:\n freq_estimate[i] = 0.0\n return nccf_results, freq_estimate", "title": "" }, { "docid": "75b2c5dc0fb16dfd83768a3c20fea68c", "score": "0.4295026", "text": "def _get_freq_estimate(self, nccf_results, sample_rate):\n results = []\n candidates = self._determine_state_per_frame(nccf_results, sample_rate)\n\n for candidate in candidates:\n if candidate > 0:\n results.append(sample_rate/candidate)\n else:\n results.append(0.0)\n\n return results", "title": "" }, { "docid": "8a0bb382aad3f99aeb59fa8fc0c3c03f", "score": "0.42924252", "text": "def calculate(self):\n if 'meas' not in self.data:\n return\n\n phases = sorted(self.data['meas'].keys())\n nr_corrs = len(self.devices['corr'].orbitcorr_psnames)\n\n # init ffwd array\n ffwd = _np.zeros((len(phases), nr_corrs))\n self.data['ffwd'] = ffwd\n\n # loop through different phases\n for i, phase in enumerate(phases):\n mdatum = self.data['meas'][phase]\n curr_deltas, *_ = \\\n self.calculate_at_phase(**mdatum)\n ffwd[i, :] = curr_deltas", "title": "" }, { "docid": "231af5b87d882fbba827ea9b2689efd5", "score": "0.4288185", "text": "def scan(interface, band, autotype, width):\n utils.log('Doing autochannel scan.')\n\n permitted_frequencies = get_permitted_frequencies(band, autotype, width)\n\n subprocess.call(('ip', 'link', 'set', interface, 'up'))\n\n # TODO(apenwarr): We really want to clear any old survey results first. But\n # there seems to be no iw command for that yet...\n #\n # TODO(apenwarr): This only scans each channel for 100ms. Ideally it should\n # scan for longer, to get a better activity sample. It would also be nice to\n # continue scanning in the background while hostapd is running, using 'iw\n # offchannel'. Retry this a few times if it fails, just in case there was a\n # scan already in progress started somewhere else (e.g. from waveguide).\n for _ in xrange(9):\n if utils.subprocess_quiet(('iw', 'dev', interface, 'scan', 'passive'),\n no_stdout=True) == 0:\n break\n time.sleep(0.5)\n\n # TODO(apenwarr): This algorithm doesn't deal with overlapping channels. Just\n # because channel 1 looks good doesn't mean we should use it; activity in\n # overlapping channels could destroy performance. In fact, overlapping\n # channel activity is much worse than activity on the main channel. Also, if\n # using 40 MHz or 80 MHz channel width, we should count activity in all the 20\n # MHz sub-channels separately, and choose the least-active sub-channel as the\n # primary.\n best_frequency = best_noise = best_ratio = frequency = None\n for tokens in utils.subprocess_line_tokens(\n ('iw', 'dev', interface, 'survey', 'dump')):\n # TODO(apenwarr): Randomize the order of channels. Otherwise when channels\n # are all about equally good, we would always choose exactly the same\n # channel, which might be bad in the case of hidden nodes.\n if len(tokens) >= 2 and tokens[0] == 'frequency:':\n frequency = tokens[1]\n noise = active = busy = None\n elif len(tokens) >= 2 and tokens[0] == 'noise:':\n noise = int(tokens[1])\n elif len(tokens) >= 4 and tokens[:3] == ('channel', 'active', 'time:'):\n active = int(tokens[3])\n elif len(tokens) >= 4 and tokens[:3] == ('channel', 'receive', 'time:'):\n busy = int(tokens[3])\n # TODO(rofrankel): busy or 1 might make more sense than busy + 1 here;\n # need to discuss with apenwarr@.\n ratio = (active + 1) * 1000 / (busy + 1)\n\n if frequency not in permitted_frequencies.split():\n continue\n\n # Some radios support both bands, but we only want to match channels on\n # the band we have chosen.\n if band[0] != frequency[0]:\n continue\n\n utils.log('freq=%s ratio=%s noise=%s', frequency, ratio, noise)\n\n if best_noise is None or best_noise - 15 > noise or best_ratio < ratio:\n best_frequency, best_ratio, best_noise = frequency, ratio, noise\n\n if not best_frequency:\n utils.log('Autoscan did not find any channel, picking random channel.')\n utils.log('Permitted frequencies: %s', permitted_frequencies)\n if not permitted_frequencies:\n utils.log('No default channel: type=%s band=%s width=%s',\n autotype, band, width)\n return None\n best_frequency = random.choice(permitted_frequencies.split())\n\n utils.log('autofreq=%s', best_frequency)\n\n for tokens in utils.subprocess_line_tokens(('iw', 'phy')):\n if len(tokens) >= 4 and tokens[2] == 'MHz':\n frequency = tokens[1]\n if frequency == best_frequency:\n channel = tokens[3].strip('[]')\n break\n\n if not channel:\n utils.log('No channel number matched freq=%s.', best_frequency)\n return None\n\n utils.log('autochannel=%s', channel)\n\n return channel", "title": "" }, { "docid": "34a167419633b445b2475d9ec98ebfc7", "score": "0.4281321", "text": "def calc():\n\n global xValues, potValues, startVals, vals, freqAmpl, freqAmplPlot, omegaPlot\n xValues = np.linspace(-3.0, +3.0, 5000)\n potValues = potErg(xValues)\n startVals = determineStartingValues(E) # starte im linken Minimum mit gegebener Energie E, düse am Angfang Richtung rechts\n vals = scipy.integrate.odeint(bewGl, y0=startVals, t=t) # integriere DGL\n freqAmpl = scipy.fft(vals[:, 0])\n freqAmplPlot = freqAmpl[0:len(freqAmpl)/2] # nur positive Freq plotten\n # berechne Fourier-Frequenzen zu positiven Frequenzen:\n omegaPlot = np.fft.fftfreq(len(t), np.max(t)/len(t))[0:len(t)/2]", "title": "" }, { "docid": "88424c98258e4e98f28a069d824517b9", "score": "0.42719942", "text": "def calculate(self, spin_id=None, scaling_matrix=None, verbosity=1, sim_index=None):\n\n # Test if the current pipe exists.\n check_pipe()\n\n # The spectrum types have not been set.\n if not hasattr(cdp, 'spectrum_type'):\n raise RelaxError(\"The spectrum types have not been set.\")\n\n # Test if the 2 spectra types 'ref' and 'sat' exist.\n if not 'ref' in list(cdp.spectrum_type.values()) or not 'sat' in list(cdp.spectrum_type.values()):\n raise RelaxError(\"The reference and saturated NOE spectra have not been loaded.\")\n\n # Loop over the spins.\n for spin in spin_loop():\n # Skip deselected spins.\n if not spin.select:\n continue\n\n # Average intensities and squared errors (if required).\n sat = 0.0\n sat_err2 = 0.0\n sat_count = 0\n ref = 0.0\n ref_err2 = 0.0\n ref_count = 0\n for id in cdp.spectrum_ids:\n # Sat spectra.\n if cdp.spectrum_type[id] == 'sat':\n sat += spin.peak_intensity[id]\n sat_err2 += spin.peak_intensity_err[id]**2\n sat_count += 1\n\n # Ref spectra.\n if cdp.spectrum_type[id] == 'ref':\n ref += spin.peak_intensity[id]\n ref_err2 += spin.peak_intensity_err[id]**2\n ref_count += 1\n\n # Average the values and errors (variance averaging).\n sat = sat / sat_count\n sat_err2 = sat_err2 / sat_count\n ref = ref / ref_count\n ref_err2 = ref_err2 / ref_count\n\n # Calculate the NOE.\n spin.noe = sat / ref\n\n # Calculate the error.\n spin.noe_err = sqrt(sat_err2 * ref**2 + ref_err2 * sat**2) / ref**2", "title": "" }, { "docid": "d28ec351a4c13d6994c44d75e7a19449", "score": "0.42595628", "text": "def computemycwt (fd, signal):\n # N = signal.size\n fr = np.arange(1, 80) # np.linspace(1, 200, 400); # vector of frequencies\n w0 = 5 \n scales = fd*w0/fr/2/np.pi\n\n # J = 200 \n # scales = np.asarray([2**(i * 0.1) for i in range(J)])\n\n coef = mycwt(signal, scales)\n #t = np.arange (0, N, 1/fd, dtype = np.float64)\n return fr, coef", "title": "" }, { "docid": "bd17fa02a4dbdbc1712c5f672969e866", "score": "0.42588425", "text": "def calibrateTAC(self,data):\n # scan through data and get mean peak positions in a fairly crude search\n peakpos=[]\n N=len(data)\n x=np.arange(N)\n i=2\n while i<N-6:\n if data[i]==0 and data[i+5]==0:\n s=np.sum(data[i:i+6])\n if s>10:\n s=np.sum(data[i:i+6]*x[i:i+6])\n s=s/np.sum(data[i:i+6])\n peakpos.append(s)\n i=i+5\n else:\n i=i+1\n else:\n i=i+1\n \n #calculate tac calibration in channels/ns\n peakpos=np.array(peakpos)\n N=len(peakpos)//2\n d=AnalysisData()\n taccalstep=d.TAC_interval # was fixed 20 ns\n diff=0.0\n \n # from peakpos, avoid 'method of fools'\n for i in range(N):\n diff+=(peakpos[i+N]-peakpos[i])/N**2\n\n # from linregress\n tacslope, tacintercept,r,p,stderr=linregress(peakpos, np.arange(len(peakpos))*taccalstep)\n self.logger.info('mean peak spacing in TAC spectrum=%4.1f ch with calibrator setting %3.0f ns'%( diff,taccalstep))\n self.logger.info('TAC calibration=%5.3f ns/ch (%3.0f ns calibrator setting)'%(taccalstep/diff,taccalstep))\n self.logger.info('TAC calibration=%5.3f %s, %5.3f'%(tacslope,\" ns/ch (linregress)\",tacintercept))\n self.TACcalibration=(tacslope,tacintercept) # ns/ch\n self.calibration.TAC=tacslope\n \n return tacslope,tacintercept,peakpos", "title": "" }, { "docid": "d92bb6767d8b52af0ca9b275cd6005b3", "score": "0.42542297", "text": "def calibration_single(filename, channel, source, cut, ax=None, **kw):\n # load data\n ch1, ch2, ch3, tr1, tr2, tr3, c2, c3, ts = lab4.loadtxt(filename, unpack=True, usecols=(0, 1, 2, 4, 5, 6, 8, 9, 12))\n samples = np.array([ch1, ch2, ch3][channel - 1][[tr1, tr2, tr3][channel - 1] > 500], dtype=int)\n hist = np.bincount(samples)\n bins = np.arange(len(hist) + 1)\n # missing_codes = bins[(bins // 4) % 2 == 0]\n\n # fit\n # prepare data\n x = (bins[1:] + bins[:-1]) / 2\n y = hist\n dy = np.sqrt(y)\n # cut\n cut = (y > 0) & (x >= cut[0]) & (x <= cut[1])\n if np.sum(cut) > 3:\n x, y, dy = np.array([x, y, dy])[:, cut]\n # fit function\n def gauss(x, peak, mean, sigma):\n return peak * np.exp(-(x - mean)**2 / sigma**2)\n def fit_fun(x, *par):\n return gauss(x, *par) + gauss(x - 4, *par)\n # initial parameters\n p0 = (np.max(y) / 2, np.mean(x), (np.max(x) - np.min(x)) / 2)\n out = lab.fit_curve(fit_fun, x, y, dy=dy, p0=p0, **kw)\n else:\n out = None\n\n # plot\n if not (ax is None):\n # data\n lab4.bar(bins, hist, ax=ax, label=filename.split('/')[-1])\n # fit\n if not (out is None):\n xspace = np.linspace(np.min(x), np.max(x), 100)\n if out.success:\n ax.plot(xspace, fit_fun(xspace, *out.par), '-k', label='fit ' + source)\n else:\n ax.plot(xspace, fit_fun(xspace, *p0), '-r', label='p0 (fit fallito)')\n # decorations\n ax.set_xlabel('canale ADC')\n ax.set_ylabel('conteggio')\n ax.legend(loc='best')\n ax.set_yscale('log')\n \n return (sources[source],) + ((None, None) if out is None else tuple(out.upar[1:]))", "title": "" }, { "docid": "5f1c0a67bc240a8d280abc45c87fc9ba", "score": "0.42532638", "text": "def calculateFFT(self, duration, framerate, sample):\n\n fft_length = int(duration * framerate)\n # For the FFT to work much faster take the length that is a power of 2.\n fft_length = int(get_next_power_2(fft_length))\n FFT = numpy.fft.fft(sample, n=fft_length)\n\n ''' ADJUSTING THRESHOLD - HIGHEST SPECTRAL PEAK METHOD'''\n threshold = 0\n power_spectra = []\n frequency_bin_with_max_spectrum = 0\n for i in range(int(len(FFT) / 2)):\n power_spectrum = scipy.absolute(FFT[i]) * scipy.absolute(FFT[i])\n if power_spectrum > threshold:\n threshold = power_spectrum\n frequency_bin_with_max_spectrum = i\n power_spectra.append(power_spectrum)\n max_power_spectrum = threshold\n threshold *= 0.1\n\n binFrequencies = []\n magnitudes = []\n binResolution = float(framerate) / float(fft_length)\n sum_of_significant_spectra = 0\n # For each bin calculate the corresponding frequency.\n for k in range(len(FFT)):\n binFreq = k * binResolution\n\n # Truncating the FFT so we consider only hearable frequencies.\n if binFreq > self.maxFreqConsidered:\n FFT = FFT[:k]\n break\n elif binFreq > self.minFreqConsidered:\n # Consider only the frequencies\n # with magnitudes higher than the threshold.\n power_spectrum = power_spectra[k]\n if power_spectrum > threshold:\n magnitudes.append(power_spectrum)\n binFrequencies.append(binFreq)\n\n # Sum all significant power spectra\n # except the max power spectrum.\n if power_spectrum != max_power_spectrum:\n sum_of_significant_spectra += power_spectrum\n\n significant_freq = 0.0\n\n if max_power_spectrum > sum_of_significant_spectra:\n significant_freq = frequency_bin_with_max_spectrum * binResolution\n\n # Max. frequency considered after truncating.\n # maxFreq = rate without truncating.\n maxFreq = len(FFT) / duration\n\n return (FFT, binFrequencies, maxFreq, magnitudes, significant_freq)", "title": "" }, { "docid": "a42e2c89f21ad7238d48b1d5b48d0039", "score": "0.42515397", "text": "def ffqualitymask(a, freq=None):\r\n if freq==None: freq=32000\r\n fft=sc.fftpack.fft(a)\r\n corr=sc.fftpack.ifft(fft*fft.conjugate())\r\n corr=corr[:(len(corr)/4)]\r\n dfff=np.diff(corr)\r\n dat=np.diff(np.where(dfff>0,1,0))\r\n if -1 not in list(dat): out=freq\r\n else:\r\n first=(((list(dat)).index(-1)))\r\n slope=(dfff[(first+1)]-dfff[first])\r\n out=(slope*first-dfff[first])/slope\r\n out=freq/out\r\n return out", "title": "" }, { "docid": "ebce0f10571dccda7bb5739c3b7a69a2", "score": "0.42501482", "text": "def getComaAmp(\twaves,\t\t\t\t# List of waves\n\t\tup,\t\t\t\t# Upper mass limits for waves\n\t\tlow, \t\t\t\t# Lower mass limits for waves\n\t\tdirect,\t\t\t\t# Directory\n\t\tCONJUGATE=True\t\t):\t# Conjugate Amplitudes (Required for some fits, issue in PWA)\n\tcount_calls('getComaAmp')\n\tCOMPARE=False\n\tSHOW = False # Shows jacobian and its numerical computed counterpart\n\traw_data = getRelevantData(waves,direct)\n\tif CONJUGATE:\n\t\tfor p in range(len(raw_data)):\n\t\t\tfor i in range(len(raw_data[p][2])):\n\t\t\t\tif i%2==1:\n\t\t\t\t\traw_data[p][2][i]*=-1\n\t\t\t\tfor j in range(len(raw_data[p][2])):\n\t\t\t\t\tif (i+j)%2==1:\n\t\t\t\t\t\traw_data[p][3][i][j]*=-1\n\tnWaves = len(waves)\n\tnBins = len(raw_data)\n\tphases = []\n\tdcdR =[] # dc / dR\n\tdcdI =[] # dc / dI\n\tdsdR =[] # ds / dR\n\tdsdI =[] # ds / dI\n\tcomps=[]\n\tbinning=[raw_data[0][0]]\n\tfor point in raw_data:\n\t\tcomp=[]\n\t\tbinning.append(point[1])\n\t\tfor i in range(len(point[2])/2):\n\t\t\tcomp.append(point[2][2*i]+1.j*point[2][2*i+1])\n\t\tcomps.append(comp)\t\n\tfor comp in comps:\n\t\tif not abs(point[2][0]==0.):\n\t\t\tphases.append(comp[0]/abs(comp[0])) # Get the phases from the first wave = anchor wave\n\t\t\tre = comp[0].real\n\t\t\tim = comp[0].imag\n\t\t\tab = (re**2+im**2)**.5\n\t\t\t#\t\t\t# cos = re/sqrt(re^2 + im^2)\n\t\t\t#\t\t\t# sin = im/sqrt(re^2 + im^2)\n\t\t\tdcdR.append(1/ab - re**2/ab**3)\n\t\t\tdcdI.append(-re*im/ab**3)\n\t\t\tdsdR.append(-re*im/ab**3)\n\t\t\tdsdI.append(1/ab - im**2/ab**3)\n\t\telse:\n\t\t\tphases.append(1.+0.j)\n\t\t\tdcdR.append(0.)\n\t\t\tdcdI.append(0.)\n\t\t\tdsdR.append(0.)\n\t\t\tdsdI.append(0.)\n#\t\tprint \"::::\"+str(phases)+\"::::\"\n\tfor i in range(len(comps)):\n\t\tfor j in range(len(comps[i])):\n\t\t\tcomps[i][j]/=phases[i]\n#\tfor comp in comps:\n#\t\tprint comp\n\tcomas=[]\n\tfor i in range(len(raw_data)):\n\t\tjac = []\n\t\tc = phases[i].real\n\t\ts = phases[i].imag\n\t\tfor j in range(int(len(raw_data[i][2])/2)):\n\t\t\tjac_line1 = []\t\t\n\t\t\tjac_line2 = []\n\t\t\tfor k in range(int(len(raw_data[i][2])/2)):\n\t\t\t\tif j == k:\n\t\t\t\t\t# c/exp(i phi) = c/(cos(phi) + i sin(phi)) = c*(cos(phi)-i sin(phi))\n\t\t\t\t\tjac_line1.append( c) # dR'/dR\n\t\t\t\t\tjac_line1.append( s) # dR'/dI\n\t\t\t\t\tjac_line2.append(-s) # dI'/dR\n\t\t\t\t\tjac_line2.append( c) # dI'/dI\n\t\t\t\telse:\n\t\t\t\t\tjac_line1.append(0.)\n\t\t\t\t\tjac_line1.append(0.)\n\t\t\t\t\tjac_line2.append(0.)\n\t\t\t\t\tjac_line2.append(0.)\n\t\t\tjac.append(jac_line1)\n\t\t\tjac.append(jac_line2)\n\t\tfor j in range(int(len(raw_data[i][2])/2)):\n\t\t\tre = raw_data[i][2][2*j ]\n\t\t\tim = raw_data[i][2][2*j+1]\n\t\t\tjac[2*j ][0]+= (re*dcdR[i] + im*dsdR[i]) # dR'/dRa\n\t\t\tjac[2*j ][1]+= (re*dcdI[i] + im*dsdI[i]) # dR'/dIa\n\t\t\tjac[2*j+1][0]+= (im*dcdR[i] - re*dsdR[i]) # dI'/dRa\n\t\t\tjac[2*j+1][1]+= (im*dcdI[i] - re*dsdI[i]) # dI'/dIa\n\t\tif SHOW:\n\t\t\tnumericalJac(raw_data[i][2])\n\t\t\tprint\n\t\t\tprettyPrint(jac)\n\t\t\traw_input()\n\t\tcoma_rot = (numpy.matrix(jac)*numpy.matrix(raw_data[i][3])*numpy.transpose(numpy.matrix(jac))).tolist()\t\n#\t\tcoma_rot = (numpy.transpose(numpy.matrix(jac))*numpy.matrix(raw_data[i][3])*numpy.matrix(jac)).tolist()\t\n#\t\traw_data[i][3]= coma_rot\n\t\tcomas.append(coma_rot)\n\tpoints=[]\n\tfor comp in comps:\n\t\tpoint=[]\n\t\tfor dat in comp:\n\t\t\tpoint.append(dat.real)\n\t\t\tpoint.append(dat.imag)\n\t\tpoints.append(point)\n\tif COMPARE:\n\t\tfor i in range(len(raw_data)):\n#\t\t\traw_T = raw_data[i][2]\n#\t\t\trot_T = points[i]\n#\t\t\tcoma_raw = raw_data[i][3]\n#\t\t\tcoma_rot = comas[i]\n#\t\t\tprint\n#\t\t\tprint rot_T\n#\t\t\tprint\n#\t\t\tprint coma_rot\n\t\t\tprint \"--------------\"\n\t\t\tprint \"rotated: \"+str(numpy.matrix(rot_T)*numpy.matrix(coma_rot)*numpy.transpose(numpy.matrix(rot_T)))\n\t\t\tprint \"Raw : \"+str(numpy.matrix(raw_T)*numpy.matrix(coma_raw)*numpy.transpose(numpy.matrix(raw_T)))\n\t\t\tprint \"T_rot: \"+str(rot_T)\n\t\t\tprint \"T_raw: \"+str(raw_T)\n\tcomas_inv=[]\n\tboi = -1 #BinOfInterest\n\tfor bin in range(len(comas)):\n\t\tcoma = comas[bin]\n\t\tbc = binning[bin]+binning[bin+1]\n\t\tbc/=2.\n\t\tfor i in range(len(coma)):\n\t\t\tupi = up[i/2]\n\t\t\tlowi = low[i/2]\n\t\t\tfor j in range(len(coma)):\n\t\t\t\tupj=up[j/2]\n\t\t\t\tlowj=low[j/2]\n\t\t\t\tif bin == boi and i==7 and j==7:\n\t\t\t\t\tprint '-------'\n\t\t\t\t\tprint upi\n\t\t\t\t\tprint lowi\n\t\t\t\t\tprint upj\n\t\t\t\t\tprint lowj\n\t\t\t\t\tprint bc\n\t\t\t\tif upi < bc or lowi > bc or upj < bc or lowj > bc:\n\t\t\t\t\tcoma[i][j]=0.\n\t\tif bin == boi:\n\t\t\tprint (binning[boi]+binning[boi+1])/2.\n\t\t\tprint binning\n\t\t\tprettyPrint(coma)\n\t\tcoma_inv= la.pinv(numpy.matrix(coma)).tolist() # Since Im_anc == 0, the coma is singular. Use pinv instead of inv. Should give the same\n\t\tif bin == boi:\n\t\t\tprettyPrint(coma_inv)\n\t\t\traw_input('<enter>')\n\t\tcomas_inv.append(coma_inv)\n\treturn [points,comas_inv]", "title": "" }, { "docid": "b08f3d88300a9bdbd5227a5e35b553f8", "score": "0.42462313", "text": "def contact_FFT(s, nominal_stress, E, nu, it_max=1000, err_lim=1.0E-10, initial_penetration_fraction=0.1, verbose=0):\n # getting discretization props\n N, dxy = s.shape[0], s.dxy\n # retrieving kernel (influence coefficients)\n A = _stiffness_FFT(N, dxy, E, nu)\n # and transformint from spatial to frequency domain\n fA = dft.fft2(A)\n \n # discretizing P and assigning initial guess as uniformly\n # distributed from target load (charge..[N])\n charge = nominal_stress*s.nominal_area() \n # imporved initial guess,\n h_max = np.max(s.h)\n P = np.zeros((N,N))\n ic = np.where(s.h > h_max-h_max*initial_penetration_fraction)\n contact_area_initial_guess = ic[0].size*dxy**2\n P[ic] = charge/contact_area_initial_guess \n \n # convergence auxilliaries\n errlim = err_lim # rel limit set first to initial absolute error\n pk = np.zeros((N,N))\n err, errs = 1., []\n it, gold = 0, 1.\n s.h *= -1.\n if verbose:\n print('cg/fft elastic contact algorithm on %d X %d grid, sigma_0 = %8.2e, E = %8.2e, nu = %5.2f'%(N, N, nominal_stress, E, nu)) \n \n # CG loop\n while err > errlim:\n if it > it_max:\n if verbose:\n print('stopped due to maximum iteration constraint of %d'%(it_max))\n break\n it = it+1\n sy = np.where(P > 0.) # contact area\n sn = np.where(P <= 0.) # free area\n # compute displacement in frequency domain and transform back to spatial domain\n dd = dft.ifft2(fA*dft.fft2(P,s=(2*N,2*N)))\n dd = dd.real\n u = dd[0:N,0:N] \n rk = u+s.h\n do = np.mean(rk[sy])\n rk = rk-do \n # norm\n G = np.sum(rk[sy]*rk[sy])\n # slopes\n pk[sy] = rk[sy] + G/gold*pk[sy]\n pk[sn] = 0.\n gold = G\n # qk\n dd = dft.ifft2(fA*dft.fft2(pk,s=(2*N,2*N)))\n dd = dd.real\n qk = dd[0:N,0:N]\n # rb is the adjustment in approach\n rb = np.mean(qk[sy]);\n qk = qk-rb\n # coeffs\n dp = np.sum(rk[sy]*pk[sy])/np.sum(qk[sy]*pk[sy])\n P[sy] = P[sy] - dp*pk[sy]\n P[P < 0.] = 0. \n sol = np.where((P == 0.) & (rk < 0.))\n P[sol] = P[sol] - dp*rk[sol]\n W = np.sum(P) * dxy**2\n P = charge/W*P\n err = np.sqrt(gold*dxy**2)\n errs.append(err)\n if it == 1:\n errlim = err*errlim\n if verbose:\n print('iteration %4d yields residual of %8.2e versus limit of %8.2e'%(it, err, errlim))\n \n # return\n s.h *= -1.\n contact = Results()\n contact.p, contact.u = P, u\n return contact", "title": "" }, { "docid": "f15e12984e2b04936a3ca3da7fa1a4dd", "score": "0.4231634", "text": "def retrieveCalibrationData(self):\n\n\t\tcalib_time = self.interval_start\n\n\t\t#get data from db\n\t\tcalibration_data = {}\n\t\tfor channel in ['BBHG_incand','BBLG_incand']:\n\t\t\tself.db_cur.execute('''\n\t\t\tSELECT \n\t\t\t\t0_term,\n\t\t\t\t1_term,\n\t\t\t\t2_term,\n\t\t\t\t0_term_err,\n\t\t\t\t1_term_err,\n\t\t\t\t2_term_err,\n\t\t\t\tcalibration_material,\n\t\t\t\tid\t\n\t\t\tFROM\n\t\t\t\tsp2_calibrations\n\t\t\tWHERE\n\t\t\t\tinstr_ID = %s\n\t\t\t\tAND calibrated_channel = %s\n\t\t\t\tAND calibration_date <= %s\n\t\t\t\tORDER BY calibration_date DESC LIMIT 1\n\t\t\t\t\n\t\t\t''',\n\t\t\t(self.instr_ID,channel,calib_time))\n\n\t\t\tcalib_coeffs = self.db_cur.fetchall()\n\t\t\tif calib_coeffs == []:\n\t\t\t\tcalib_coeffs_np = [[np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,'nan',np.nan]]\n\t\t\telse:\n\t\t\t\tcalib_coeffs_np = np.array(calib_coeffs, dtype=[('term0', 'f4'),('term1', 'f4'),('term2', 'f4'),('term0err', 'f4'),('term1err', 'f4'),('term2err', 'f4'),('mat', 'S7'),('ID', 'f4'),]) #converts Nones to nans for calculations\n\n\t\t\t#Aqudag correction\n\t\t\tfor row in calib_coeffs_np:\n\t\t\t\tcalib_material \t= row[6]\n\t\t\t\tcalib_ID \t\t= row[7]\n\t\t\t\tcalib_0 \t\t= row[0]\n\t\t\t\tcalib_0_err\t\t= row[3]\n\t\t\t\tcalib_1 \t\t= row[1]\n\t\t\t\tcalib_1_err \t= row[4]\n\t\t\t\tcalib_2 \t\t= row[2]\n\t\t\t\tcalib_2_err \t= row[5]\n\t\t\t\tif calib_material == 'Aquadag':\n\t\t\t\t\tcalib_1 \t= row[1]/0.7\n\t\t\t\t\tcalib_1_err = row[4]/0.7\n\t\t\t\t\tcalib_2 \t= row[2]/0.7\n\t\t\t\t\tcalib_2_err = row[5]/0.7\n\t\t\t\n\t\t\t#set calibration ids\t\t\n\t\t\tif channel == 'BBHG_incand':\n\t\t\t\tself.HG_calibration_ID = float(calib_ID)\n\t\t\tif channel == 'BBLG_incand':\n\t\t\t\tself.LG_calibration_ID = float(calib_ID)\n\n\t\t\t#get the signal limits for calculating mass\n\t\t\tif self.extrapolate_calibration == False:\n\t\t\t\tpkht_ll, pkht_ul = self._retrieveCalibrationLimits(calib_ID)\n\t\t\telse:\n\t\t\t\tpkht_ll = self.min_detectable_signal\n\t\t\t\tpkht_ul = self.saturation_limit\n\t\t\t\t#an electrical issue on SP2 #17 prior to 2012 gave anomalous signals at masses above ~240nm, this only applies to calibration #1, so we limit the mass range in this case\n\t\t\t\tif calib_ID == 1:\n\t\t\t\t\tpkht_ul = 1410\n\n\n\t\t\tcalibration_data[channel] = [pkht_ll, pkht_ul, calib_0, calib_1, calib_2, calib_0_err, calib_1_err, calib_2_err]\n\t\t\n\t\tself.calibration_info = calibration_data", "title": "" }, { "docid": "bd52d733e274ff0b53b32a9581b38e2c", "score": "0.4230407", "text": "def apply_cca(X, W, fs):\n\n start = default_timer()\n print('apply_cca - calculations started')\n\n N, D, T = X.shape\n # gamma = 0.1\n window_sec = 5\n X = X.reshape(D * N, T)\n\n # Rij\n Rij = np.swapaxes(np.reshape(np.cov(X), (N, D, N, D)), 1, 2)\n\n # Rw\n Rw = np.mean([Rij[i, i, :, :]\n for i in range(0, N)], axis=0)\n # Rw_reg = (1 - gamma) * Rw + gamma * np.mean(eigh(Rw)[0]) * np.identity(Rw.shape[0])\n\n # Rb\n Rb = np.mean([Rij[i, j, :, :]\n for i in range(0, N)\n for j in range(0, N) if i != j], axis=0)\n\n # ISCs\n ISC = np.sort(np.diag(np.transpose(W) @ Rb @ W) / np.diag(np.transpose(W) @ Rw @ W))[::-1]\n\n # Scalp projections\n A = np.linalg.solve(Rw @ W, np.transpose(W) @ Rw @ W)\n\n # ISC by subject\n print('by subject is calculating')\n ISC_bysubject = np.empty((D, N))\n\n for subj_k in range(0, N):\n Rw, Rb = 0, 0\n Rw = np.mean([Rw + 1 / (N - 1) * (Rij[subj_k, subj_k, :, :] + Rij[subj_l, subj_l, :, :])\n for subj_l in range(0, N) if subj_k != subj_l], axis=0)\n Rb = np.mean([Rb + 1 / (N - 1) * (Rij[subj_k, subj_l, :, :] + Rij[subj_l, subj_k, :, :])\n for subj_l in range(0, N) if subj_k != subj_l], axis=0)\n\n ISC_bysubject[:, subj_k] = np.diag(np.transpose(W) @ Rb @ W) / np.diag(np.transpose(W) @ Rw @ W)\n\n # ISC per second\n print('by persecond is calculating')\n ISC_persecond = np.empty((D, int(T / fs) + 1))\n window_i = 0\n\n for t in range(0, T, fs):\n\n Xt = X[:, t:t+window_sec*fs]\n Rij = np.cov(Xt)\n Rw = np.mean([Rij[i:i + D, i:i + D]\n for i in range(0, D * N, D)], axis=0)\n Rb = np.mean([Rij[i:i + D, j:j + D]\n for i in range(0, D * N, D)\n for j in range(0, D * N, D) if i != j], axis=0)\n\n ISC_persecond[:, window_i] = np.diag(np.transpose(W) @ Rb @ W) / np.diag(np.transpose(W) @ Rw @ W)\n window_i += 1\n\n stop = default_timer()\n print(f'Elapsed time: {round(stop - start)} seconds.')\n\n return ISC, ISC_persecond, ISC_bysubject, A", "title": "" }, { "docid": "b2f10726467c9561d6b3c3e08cdee5ad", "score": "0.42273065", "text": "def setBasicSignal(self, channel=None, waveform=None, freq=None,\n period=None, amp=None, offset=None, highLev=None,\n lowLev=None, phase=None, width=None, duty=None,\n risetime=None, falltime=None, delay=None, symmetry=None,\n stDev=None, mean=None): \n \n if channel in self._channelOpts:\n str1 = '' \n \n wfOpts = ['SINE', 'SQUARE', 'RAMP', 'PULSE', 'NOISE', 'DC']\n if waveform in wfOpts:\n str1 = str1 + ', WVTP, ' + waveform\n elif waveform != None:\n str1 = str1 + ', WVTP, SINE'\n print('Warning: Unknown Waveform type! Assuming SINE as default.')\n \n if waveform != 'DC' or waveform != 'NOISE':\n # general options for most waveforms\n if freq != None:\n str1 = str1 + ', FRQ, ' + str(freq)\n elif period != None:\n str1 = str1 + ', PERI, ' + str(period)\n \n if amp != None or offset != None:\n if amp != None:\n str1 = str1 + ', AMP, ' + str(amp)\n \n if offset != None:\n str1 = str1 + ', OFST, ' + str(offset)\n elif highLev != None or lowLev != None:\n if highLev != None:\n str1 = str1 + ', HLEV, ' + str(highLev)\n \n if lowLev != None:\n str1 = str1 + ', LLEV, ' + str(lowLev)\n \n # phase option not defined for PULSE, DC and NOISE waveform\n if phase != None and (waveform != 'PULSE' and waveform != 'DC' and\n waveform != 'NOISE'):\n str1 = str1 + ', PHSE, ' + str(phase) \n \n if waveform == 'DC':\n # DC option completely defined by offset\n if offset != None:\n str1 = str1 + ', OFST, ' + str(offset)\n \n # special options for PULSE waveform\n if waveform == 'PULSE':\n if width != None:\n str1 = str1 + ', WIDTH, ' + str(width)\n elif duty != None:\n str1 = str1 + ', DUTY, ' + str(duty)\n \n if risetime != None:\n str1 = str1 + ', RISE, ' + str(risetime)\n if falltime != None:\n str1 = str1 + ', FALL, ' + str(falltime)\n \n if delay != None:\n str1 = str1 + ', DLY, ' + str(delay)\n \n # special option for RAMP waveform\n if symmetry != None and waveform == 'RAMP':\n str1 = str1 + ', SYM, ' + str(symmetry)\n \n # special option for SQUARE waveform\n if duty != None and waveform == 'SQUARE':\n str1 = str1 + ', DUTY, ' + str(duty)\n \n # special options for NOISE waveform\n if waveform == 'NOISE':\n if stDev != None:\n str1 = str1 + ', STDEV, ' + str(stDev)\n if mean != None:\n str1 = str1 + ', MEAN, ' + str(mean)\n \n if len(str1) > 0: \n self.cmd(channel + ':BSWV' + str1[1:])\n else:\n print('Warning: Unknown Channel! Options are C1 for Channel 1 and C2 for Channel 2.')", "title": "" }, { "docid": "3ac6c105849f3fc8de8efb7a7b7dfbd2", "score": "0.42262277", "text": "def get_fft_values(ts):\n # Return zeros if there are no frequencies in-band\n if self._n_freqs==0:\n return np.zeros(len(ts))\n\n # Get the complete times array\n length = ((self._fft_end-self._fft_start+self._dt) * self._unique\n - self._dt)\n fft_times = np.linspace(self._fft_start, self._fft_start+length,\n self._n_all_freqs)\n\n # Get the complete values array\n all_freqs = scipy.fft.rfftfreq(self._n_all_freqs, self._dt)\n band = (all_freqs>=self.f_min) & (all_freqs<=self.f_max)\n amps = np.zeros(len(all_freqs))\n amps[band] = self.amps\n phases = np.zeros(len(all_freqs))\n phases[band] = self.phases\n fft_values = scipy.fft.irfft(amps * np.exp(-1j*phases),\n n=self._n_all_freqs)\n\n # Normalization calculated by guess-and-check; seems to work fine\n # normalization = len(all_freqs) * np.sqrt(1/(2*len(band_freqs)))\n fft_values *= self._n_all_freqs * np.sqrt(1/(2*self._n_freqs))\n\n # Interpolate values at the given ts based on the full arrays\n values = np.interp(ts, fft_times, fft_values, period=length)\n\n # So far, the units of the values are V/V_rms, so multiply by the\n # rms voltage:\n values *= self.rms\n\n return values", "title": "" }, { "docid": "d92c3ba1bff2b156d8248727297fd40d", "score": "0.4226063", "text": "def _calculate(self, detectors, calibration_params, distance):\n # Create a workspace of the three peaks against 1/v1^2\n wks = WorkspaceFactory.Instance().create('Workspace2D', len(detectors), 3, 3)\n\n for detector in detectors:\n det_index = detector - detectors[0]\n\n x_data = []\n for peak in range(3):\n peak_position = calibration_params.getItem(peak).column('f1.PeakCentre')[det_index]\n x_data.append(1.0/(distance[det_index]/peak_position)**2)\n\n params = calibration_params.getItem(peak)\n sigma = params.column('f1.Sigma')[det_index]\n sigma_err = params.column('f1.Sigma_Err')[det_index]\n\n u_peak = U_PEAKS[peak]\n wks.dataY(det_index)[peak] = (sigma ** 2) - (u_peak[4]**2)\n wks.dataE(det_index)[peak] = 2*sigma*sigma_err\n\n wks.setX(det_index, np.array(x_data))\n\n AnalysisDataService.Instance().addOrReplace('__bank_data', wks)\n\n # Perform a linear fit of each spectra\n fit = AlgorithmManager.Instance().create('PlotPeakByLogValue')\n fit.initialize()\n fit.setChild(True)\n fit.setProperty('Function', 'name=LinearBackground')\n fit.setProperty('Input', '__bank_data,v0:132')\n fit.setProperty('OutputWorkspace', 'backscattering_params')\n fit.execute()\n\n DeleteWorkspace('__bank_data')\n params = fit.getProperty('OutputWorkspace').value\n\n # Process fit parameters\n for index, detector in enumerate(detectors):\n params.setCell(index, 0, detector)\n\n t0_val = params.cell(index, 1)\n l_dist = params.cell(index, 3)\n\n # Set negative values to zero, otherwise take square root\n if t0_val > 0:\n t0_val = np.sqrt(t0_val)\n else:\n t0_val = 0\n\n if l_dist > 0:\n l_dist = np.sqrt(l_dist)\n else:\n l_dist = 0\n\n params.setCell(index, 1, t0_val)\n params.setCell(index, 3, l_dist)\n\n return params", "title": "" }, { "docid": "e5b598608de5b8c41c3f22727ab9c219", "score": "0.42239383", "text": "def apply(self, accel, time, time_units='us', time_conv_kw=None):\n # convert timestamps and window if chosen\n if not self._window:\n timestamps, dt = process_timestamps(time, accel, time_units=time_units, conv_kw=time_conv_kw,\n window=self._window, hours=self._hours)\n else:\n timestamps, dt, acc_win = process_timestamps(time, accel, time_units=time_units, conv_kw=time_conv_kw,\n window=self._window, hours=self._hours)\n\n # acceleration filter object\n acc_filt = AccelerationFilter(continuous_wavelet=self._cwave, power_band=self._pwr_band,\n power_peak_kw=self._pwr_pk_kw, power_std_height=self._pwr_std_h,\n reconstruction_method=self._recon_method, lowpass_order=self._lp_order,\n lowpass_cutoff=self._lp_cut, window=self._filt_window,\n discrete_wavelet=self._dwave, extension_mode=self._ext_mode,\n reconstruction_level=self._recon_level)\n\n if not self._window:\n filt_accel, rec_accel, power, power_peaks = acc_filt.apply(accel, 1 / dt) # run the filtering\n else:\n filt_accel, rec_accel, power, power_peaks = {}, {}, {}, {}\n for day in acc_win.keys():\n filt_accel[day], rec_accel[day], power[day], power_peaks[day] = acc_filt.apply(acc_win[day], 1 / dt)\n\n # setup the STS detection\n if self._method == 'stillness':\n detect = detectors.Stillness(gravity=self._grav, thresholds=self._ths, gravity_pass_ord=self._grav_ord,\n gravity_pass_cut=self._grav_cut, long_still=self._long_still,\n moving_window=self._still_window, duration_factor=self._duration_factor,\n displacement_factor=self._disp_factor, lmax_kwargs=self._lmax_kw,\n lmin_kwargs=self._lmin_kw, trans_quant=self._tq)\n elif self._method == 'displacement':\n detect = detectors.Displacement(gravity=self._grav, thresholds=self._ths, gravity_pass_ord=self._grav_ord,\n gravity_pass_cut=self._grav_cut, long_still=self._long_still,\n moving_window=self._still_window, duration_factor=self._duration_factor,\n displacement_factor=self._disp_factor, lmax_kwargs=self._lmax_kw,\n lmin_kwargs=self._lmin_kw, trans_quant=self._tq)\n else:\n raise ValueError('Method must be set as `stillness` or `displacement`.')\n\n if not self._window:\n sist = detect.apply(accel, filt_accel, rec_accel, timestamps, dt, power_peaks)\n else:\n sist = {}\n for day in filt_accel.keys():\n day_sist = detect.apply(acc_win[day], filt_accel[day], rec_accel[day], timestamps[day], dt,\n power_peaks[day])\n sist.update(day_sist)\n\n return sist", "title": "" }, { "docid": "266f794e5e1a45290b6bdb21b95f9919", "score": "0.42173627", "text": "def reconstruction(self, lowfreq_filtered, vdirectional_filtered, hdirectional_filtered):\n \n ndims_image = len(lowfreq_filtered.shape) - 2\n axis_filter = ndims_image - 2\n axis_real_imag = axis_filter + 1\n \n expanded_filters = self.filters\n for _ in range(axis_filter):\n expanded_filters = cp.expand_dims(expanded_filters, axis = 0)\n \n get_real_part = lambda arr: cp.take(arr, 0, axis = axis_real_imag)\n get_imag_part = lambda arr: cp.take(arr, 1, axis = axis_real_imag)\n to_complex = lambda arr: get_real_part(arr) + 1j * get_imag_part(arr)\n \n \n lowfreq_filtered = cp.fft.fft2(lowfreq_filtered, norm = \"ortho\")\n lowfreq_filtered = cp.squeeze(lowfreq_filtered, axis = axis_real_imag)\n \n \n hdirectional_filtered = cp.fft.fft2( to_complex(hdirectional_filtered), norm = \"ortho\" ) /math.sqrt(2)\n \n vdirectional_filtered = cp.fft.fft2( to_complex(vdirectional_filtered), norm = \"ortho\") /math.sqrt(2)\n \n lowfreq_filtered = cp.tile(lowfreq_filtered, [1] * (ndims_image - 1) + [2,2]) \n hdirectional_filtered = cp.tile( hdirectional_filtered, [1] * (ndims_image - 1) + [self.decimation_factor,2] )\n vdirectional_filtered = cp.tile( vdirectional_filtered, [1] * (ndims_image - 1) + [2,self.decimation_factor] )\n \n filtered_fft = cp.concatenate((vdirectional_filtered, hdirectional_filtered, lowfreq_filtered), axis = axis_filter)\n filtered_fft = filtered_fft * expanded_filters\n \n hf_filtered, lowfreq_filtered = cp.split(filtered_fft, [2*self.n_angles], axis = axis_filter)\n lowfreq_filtered = cp.squeeze(lowfreq_filtered, axis = axis_filter)\n hf_filtered = cp.sum( hf_filtered, axis = axis_filter)\n \n \n hf_filtered_flipped = cp.flip(hf_filtered, axis =(-1))\n hf_filtered_flipped = cp.roll(hf_filtered_flipped, 1, axis =(-1))\n hf_filtered_flipped = cp.flip(hf_filtered_flipped, axis =(-2))\n hf_filtered_flipped = cp.roll(hf_filtered_flipped, 1, axis =(-2))\n \n\n hf_filtered = hf_filtered + cp.conj(hf_filtered_flipped)\n return cp.fft.ifft2(hf_filtered + lowfreq_filtered, norm = \"ortho\").real", "title": "" }, { "docid": "aa74b5c2e4bff20045ead7db890e3b0d", "score": "0.4212841", "text": "def signal_processing(signal:np.ndarray, time:np.ndarray,\n sampling_f:int = 48_000, max_frequency:int = 10_240,\n tones = None):\n try:#trim the silent portion of the signal, if there is any\n tm0 = np.argwhere(abs(signal)>1E-3)[0][0] + int(sampling_f//160)\n except:#pylint: disable=W0702\n tm0 = 0\n min_frequency=20\n freqs, times, spec_2d = nkt_algorithm(signal[tm0:], sampling_f,\n f_range=[max_frequency, min_frequency],\n tones = tones)\n time_list = times + time[tm0]\n if isinstance(tones, np.ndarray):\n p_time = np.array([])\n p_intensity = np.array([])\n for tone in tones[:-1]:#merge \n start_index = np.argwhere(freqs>=tone)[0]\n min_index = int(max(start_index-1, 0))\n max_index = int(min(start_index+2, len(freqs)))\n indeces = range(min_index, max_index)\n intensity_merged = np.array([spec_2d[ind] \n for ind in indeces]).sum(axis=0)\n peak_pos = [np.argmax(intensity_merged)]\n p_time = np.append(p_time, time_list[peak_pos])\n p_intensity = np.append(p_intensity, intensity_merged[peak_pos])\n spectrum = (p_time, tones[:-1])\n return spectrum, tones[:-1]\n else:\n main_component = np.array([freqs[np.argmax(spec_2d[:,i])]\n for i in range(len(times))])\n spectrum = (time_list, main_component)\n return spectrum, freqs", "title": "" }, { "docid": "90e54321c0642849101e791b70a24786", "score": "0.42113185", "text": "def ffcalcfftqual2(a, freq=None):\r\n if freq==None: freq=32000\r\n fft=sc.fftpack.fft(a)\r\n corr=sc.fftpack.ifft(fft*fft.conjugate())\r\n corr=corr[:(len(corr)/4)]\r\n lags = np.divide(freq, np.arange(0,len(corr), dtype = float))\r\n plt.plot(lags,corr); plt.show()\r\n import ipdb; ipdb.set_trace(); \r\n dfff=np.diff(corr)\r\n dat=np.diff(np.where(dfff>0,1,0))\r\n if -1 not in list(dat): out=(float(freq),float(0))\r\n else:\r\n dat=np.where(dat<0,1,0)\r\n dat=list(dat) \r\n (lab, inc)=sc.ndimage.label(dat)\r\n \r\n if inc >4: inc=4\r\n arr=[]\r\n while inc!=0:\r\n first=[]\r\n pos=((list(lab)).index(inc))-1\r\n first.append(pos)\r\n first.append(corr[pos])\r\n arr.append(first)\r\n inc=inc-1\r\n arr.sort(key=lambda x:x[1])\r\n first=arr[len(arr)-1][0]\r\n slope=(dfff[(first+1)]-dfff[first])\r\n out=(slope*first-dfff[first])/slope\r\n out=freq/out\r\n out=(out,(arr[len(arr)-1][1]))\r\n return out", "title": "" } ]
cd411db67420250d030333d8bd595b03
Verify that the contents of the PrivateKey object are valid.
[ { "docid": "b83dd70f94096e9a51e50f0d45b3d976", "score": "0.58293355", "text": "def validate(self):\n if not isinstance(self.value, bytes):\n raise TypeError(\"key value must be bytes\")\n elif not isinstance(self.cryptographic_algorithm,\n enums.CryptographicAlgorithm):\n raise TypeError(\"key algorithm must be a CryptographicAlgorithm \"\n \"enumeration\")\n elif not isinstance(self.cryptographic_length, six.integer_types):\n raise TypeError(\"key length must be an integer\")\n elif not isinstance(self.key_format_type, enums.KeyFormatType):\n raise TypeError(\"key format type must be a KeyFormatType \"\n \"enumeration\")\n elif self.key_format_type not in self._valid_formats:\n raise ValueError(\"key format type must be one of {0}\".format(\n self._valid_formats))\n\n # TODO (peter-hamilton) Verify that the key bytes match the key format\n\n mask_count = len(self.cryptographic_usage_masks)\n for i in range(mask_count):\n mask = self.cryptographic_usage_masks[i]\n if not isinstance(mask, enums.CryptographicUsageMask):\n position = \"({0} in list)\".format(i)\n raise TypeError(\n \"key mask {0} must be a CryptographicUsageMask \"\n \"enumeration\".format(position))\n\n name_count = len(self.names)\n for i in range(name_count):\n name = self.names[i]\n if not isinstance(name, six.string_types):\n position = \"({0} in list)\".format(i)\n raise TypeError(\"key name {0} must be a string\".format(\n position))", "title": "" } ]
[ { "docid": "1b2e2eea39bb3d328413ef430619f571", "score": "0.6874005", "text": "def test_verifyValidPrivateKey(self):\n blob = keys.Key.fromString(keydata.publicRSA_openssh).blob()\n packet = (NS('foo') + NS('none') + NS('publickey') + '\\x00'\n + NS('ssh-rsa') + NS(blob))\n d = self.authServer.ssh_USERAUTH_REQUEST(packet)\n def check(ignored):\n self.assertEquals(self.authServer.transport.packets,\n [(userauth.MSG_USERAUTH_PK_OK, NS('ssh-rsa') + NS(blob))])\n return d.addCallback(check)", "title": "" }, { "docid": "6dad406291ecf5308e79f32664e016b7", "score": "0.66077715", "text": "def test_key(self):\n rv = pem.parse(KEY_PEM)\n key, = rv\n assert isinstance(key, pem.RSAPrivateKey)\n assert KEY_PEM == str(key)", "title": "" }, { "docid": "3ffb098200650ceffbed2430ae7fad62", "score": "0.65610844", "text": "def _check_validity(self) -> None:\n if self.message != self._get_message(self.representative_public_key):\n raise ValueError(\"Invalid message.\") # pragma: no cover\n ledger_api = make_ledger_api(self.ledger_id)\n public_keys = ledger_api.recover_public_keys_from_message(\n self.message, self.signature\n )\n if len(public_keys) == 0:\n raise ValueError(\"Malformed signature!\") # pragma: no cover\n public_key: Optional[str] = None\n for public_key_ in public_keys:\n address = ledger_api.get_address_from_public_key(public_key_)\n if address == self.address:\n public_key = public_key_\n break\n if public_key is None:\n raise ValueError(\n \"Invalid signature for provided representative_public_key and agent address!\"\n )\n self._public_key = public_key", "title": "" }, { "docid": "0232ed438343bc5425a262d0b5b971d3", "score": "0.6554654", "text": "def is_valid(self):\n try:\n self.verify()\n except BadSignature:\n return False\n except errors.SecretKeyFileNotFound:\n return False\n return True", "title": "" }, { "docid": "41b2ae9a8130e8f2bfa0632ca5da96d5", "score": "0.65478885", "text": "def hasPrivateKey(self):\n return self.d != 0", "title": "" }, { "docid": "371f195b6b2e6fbc6f4fc66a19762ad5", "score": "0.65281105", "text": "def key_is_private(key):\n if PRIVATE_RE.search(key):\n return True\n else:\n return False", "title": "" }, { "docid": "466400adc5855a3349a0b772400ce8ec", "score": "0.6522692", "text": "def is_private(self):\n return isinstance(self.key, bytes)", "title": "" }, { "docid": "a9edd80bc2af2a8e237bdb7466dbba50", "score": "0.6490936", "text": "def test_private_identity() -> None:\n identity = PrivateIdentity(\n CryptoProtocol.SHA256WithRSA.generate_private_key(), \"1234567\"\n )\n assert identity.identity_id == \"1234567\"\n assert isinstance(identity.private_key, PrivateKey)", "title": "" }, { "docid": "c0af4524dcfbb32870ffdbac303c2b90", "score": "0.6393453", "text": "def verify_private_key_exist(self):\n return os.path.isfile(self.configuration[\"CA_default\"][\"private_key\"])", "title": "" }, { "docid": "0f8f9b1dae2c32de762714e902b9a142", "score": "0.6271209", "text": "def verify(public_key,data,signature):\n deserialized_public_key = serialization.load_pem_public_key(public_key.encode('utf-8'),default_backend())\n (r,s) = signature\n try:\n deserialized_public_key.verify(encode_dss_signature(r,s),json.dumps(data).encode('utf-8'),ec.ECDSA(hashes.SHA256()))\n return True\n except InvalidSignature:\n return False", "title": "" }, { "docid": "f2ddbe3654c2a6f85bbfde6035e7a194", "score": "0.6234958", "text": "def test_check_invalid_key(self):\n self.assertFalse(\n CTCSerializer.check_structure(self.invalid_structure_4))", "title": "" }, { "docid": "d459b7b93e04fc8d130a7273791d7ca8", "score": "0.6199667", "text": "def test_create_invalid_key(self):\n response = self.client.post(\n self.url,\n {\n 'name': 'random-test-cert',\n 'certificate': self.cert,\n 'key': 'I am Groot.'\n }\n )\n self.assertEqual(response.status_code, 400, response.data)\n # match partial since right now the rest is pyopenssl errors\n self.assertIn('Could not load private key', response.data['key'][0])", "title": "" }, { "docid": "7dc75185999f51726ba9232516760baa", "score": "0.61488324", "text": "def is_verified(self):\n return not bool(self.private_key_hex)", "title": "" }, { "docid": "2e0b1f3b89706890d148dffba131b4bd", "score": "0.6123482", "text": "def verify_signature(self, public_key, signature):\r\n r, s, z = signature\r\n w = self.__inverse_mod(s, self.__curve.n)\r\n u1 = (z * w) % self.__curve.n\r\n u2 = (r * w) % self.__curve.n\r\n x, _ = self.__point_add( # _:y\r\n self.__scalar_multiply(u1, self.__curve.g),\r\n self.__scalar_multiply(u2, public_key)\r\n )\r\n if (r % self.__curve.n) == (x % self.__curve.n):\r\n return True # Signature matches!\r\n else:\r\n return False # Invalid signature!\r", "title": "" }, { "docid": "55b8c562cdf4bdcd30e31b2b9dfa25c3", "score": "0.6117594", "text": "def verify(self, msg, key, sig):\n try:\n msg = bytes(msg, 'utf-8') if type(msg) is not bytes else msg\n sig = bytes(sig, 'utf-8') if type(sig) is not bytes else sig\n\n if isinstance(key, Ed25519PrivateKey):\n key = key.public_key()\n key.verify(sig, msg)\n return True # If no exception was raised, the signature is valid.\n except cryptography.exceptions.InvalidSignature:\n return False", "title": "" }, { "docid": "69c0a008359fc718c7abd33569b8c9c3", "score": "0.60939306", "text": "def _validate_keys(self):\n if type(self.keys) != dict:\n raise securesystemslib.exceptions.FormatError(\n \"keys dictionary is malformed!\")\n\n securesystemslib.formats.KEYDICT_SCHEMA.check_match(self.keys)\n\n for keyid, key in six.iteritems(self.keys):\n securesystemslib.formats.PUBLIC_KEY_SCHEMA.check_match(key)", "title": "" }, { "docid": "310d22d4fa5bf79ee418d98bd5abddf9", "score": "0.6091117", "text": "def test_non_privkey(self):\n\n cmd = [\n \"python\",\n SIIPSTITCH,\n os.path.join(IMAGES_PATH, \"BIOS_old.bin\"),\n os.path.join(IMAGES_PATH, \"Vbt.bin\"),\n \"-ip\",\n \"vbt\",\n \"-k\",\n os.path.join(IMAGES_PATH, \"nonprivkey.pem\"),\n ]\n\n results = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n assert b\"is not an RSA private key\" in results.stderr", "title": "" }, { "docid": "2070826879edd5e7307894a2ba92b40c", "score": "0.6061757", "text": "def populate_private_key(self):\n info = Database.get_user_account(self.username)\n if \"private_key\" not in info:\n self.private_key = b\"\"\n return False\n\n private_key_enc = base64.b64decode(info[\"private_key\"].encode()[2:-1])\n iv = base64.b64decode(info[\"aes_iv\"].encode()[2:-1])\n tag = base64.b64decode(info[\"tag\"].encode()[2:-1])\n tag_contents = info[\"private_key\"] + info[\"aes_iv\"]\n if not Crypto_Functions.check_hmac_b64(tag_contents.encode(), tag, self.password_hmac):\n self.private_key = b\"\"\n return False\n private_key_b64 = Crypto_Functions.aes_decrypt(private_key_enc, iv, self.password_aes)\n private_key = base64.b64decode(private_key_b64.encode()[2:-1])\n\n self.private_key = private_key\n return True", "title": "" }, { "docid": "d9917412222aeb9b994450d2e943a0e1", "score": "0.60575694", "text": "def check_key(self, passphrase=None):\n signed_data = self.gpg.sign('test message to check passphrase',\n keyid=self.keyid, passphrase=passphrase)\n if signed_data.data and self.gpg.verify(signed_data.data).valid:\n return True\n print('%sWrong passphrase!%s' % (Fore.RED, Style.RESET_ALL))\n return False", "title": "" }, { "docid": "8443b615043e5b38f8d8200ef98707e5", "score": "0.5993006", "text": "def can_sign(self):\n return self._key.has_private()", "title": "" }, { "docid": "8693e5ff4c094d53221e267ac61c5edb", "score": "0.5971891", "text": "def test_failedPrivateKeyAuthenticationWithSignature(self):\n blob = keys.Key.fromString(keydata.publicRSA_openssh).blob()\n obj = keys.Key.fromString(keydata.privateRSA_openssh)\n packet = (NS('foo') + NS('none') + NS('publickey') + '\\xff'\n + NS('ssh-rsa') + NS(blob) + NS(obj.sign(blob)))\n self.authServer.transport.sessionID = 'test'\n d = self.authServer.ssh_USERAUTH_REQUEST(packet)\n return d.addCallback(self._checkFailed)", "title": "" }, { "docid": "c6f2292f32546b003f78a22d8a380229", "score": "0.5968217", "text": "def _validate_pubkeys(self):\n if type(self.pubkeys) != list:\n raise securesystemslib.exceptions.FormatError(\n \"The pubkeys field should be a list!\")\n\n for keyid in self.pubkeys:\n securesystemslib.formats.KEYID_SCHEMA.check_match(keyid)", "title": "" }, { "docid": "2f41c1aa3de17e14a1862b31916b95f6", "score": "0.5940851", "text": "def test_execute_with_rsa_private_key(self):\n self._run_token_test('----BEGIN RSA PRIVATE KEY----')", "title": "" }, { "docid": "6da38ab65e6b37be50395893d930bd5c", "score": "0.59104985", "text": "def verify(public_key: Ed25519PublicKey, signature:bytes, data: bytes) -> bool:\n try:\n public_key.verify(signature, data)\n except:\n return False\n return True", "title": "" }, { "docid": "0626abc45f5a6d7fda61e4092d1c1824", "score": "0.58758503", "text": "def check_passphrase(pem: str, passphrase: Optional[str] = None) -> bool:\n try:\n serialization.load_pem_private_key(\n pem.encode(\"utf-8\"), passphrase.encode(\"utf-8\") if passphrase else None, backend=default_backend()\n )\n return True\n except ValueError as e:\n if str(e) == \"Bad decrypt. Incorrect password?\":\n return False\n raise e\n except TypeError as e:\n if str(e) == \"Password was not given but private key is encrypted\":\n return False\n raise e", "title": "" }, { "docid": "9c68cd5ebdebe6116be1be35c8a4dd04", "score": "0.584873", "text": "def validate_signature(public_key, signature, message):\n public_key = (base64.b64decode(public_key)).hex()\n signature = base64.b64decode(signature)\n vk = ecdsa.VerifyingKey.from_string(bytes.fromhex(public_key), curve=ecdsa.SECP256k1)\n # Try changing into an if/else statement as except is too broad.\n try:\n return vk.verify(signature, message.encode())\n except ecdsa.BadSignatureError:\n print(f\"Signature {signature} is not valid!\")\n return False", "title": "" }, { "docid": "0b02a7116fd0091030394e249133e599", "score": "0.5848695", "text": "def validate_provision_key():\n\n try:\n EC2_CLIENT.describe_key_pairs(KeyNames=[PROV_KEY])\n except EC2_CLIENT.exceptions.ClientError:\n sys.exit(\n 'ERROR: requested provisioning-key [' + PROV_KEY + \\\n '] not found. Aborting...'\n )", "title": "" }, { "docid": "106352449f02f4fb93413e9941962212", "score": "0.5848276", "text": "def verify(self, data, signature, offset=0, length=0):\n assert isinstance(data, str), type(data)\n assert isinstance(signature, str), type(signature)\n assert isinstance(offset, (int, long)), type(offset)\n assert isinstance(length, (int, long)), type(length)\n\n if length == 0:\n # default LENGTH is len(DATA[OFFSET:])\n length = len(data) - offset\n\n elif len(data) < offset + length:\n # DATA is to small, we expect len(DATA[OFFSET:OFFSET+LENGTH]) to be LENGTH\n return False\n\n return self._public_key and \\\n self._signature_length == len(signature) \\\n and ec_verify(self._ec, sha1(data[offset:offset + length]).digest(), signature)", "title": "" }, { "docid": "20a226440f1e1830a4772346e71ed67b", "score": "0.5841948", "text": "def test_is_multisig_contract_invalid_public_key(self):\n script = bytearray([int(vm.OpCode.PUSHINT8)])\n # signature count\n script += b'\\x02'\n # public key 1\n script += bytearray([int(vm.OpCode.PUSHDATA1)])\n script += bytearray([33])\n script += b'\\x00' * 33\n # public key 2, but the key data is too short\n script += bytearray([int(vm.OpCode.PUSHDATA1)])\n script += b'\\xFF' * 10\n self.assertFalse(contracts.Contract.is_multisig_contract(script))", "title": "" }, { "docid": "2de66c8351f84d1806614577886c7832", "score": "0.5841092", "text": "def validate(self):\n if not isinstance(self.value, bytes):\n raise TypeError(\"key value must be bytes\")\n elif not isinstance(self.cryptographic_algorithm,\n enums.CryptographicAlgorithm):\n raise TypeError(\"key algorithm must be a CryptographicAlgorithm \"\n \"enumeration\")\n elif not isinstance(self.cryptographic_length, six.integer_types):\n raise TypeError(\"key length must be an integer\")\n\n mask_count = len(self.cryptographic_usage_masks)\n for i in range(mask_count):\n mask = self.cryptographic_usage_masks[i]\n if not isinstance(mask, enums.CryptographicUsageMask):\n position = \"({0} in list)\".format(i)\n raise TypeError(\n \"key mask {0} must be a CryptographicUsageMask \"\n \"enumeration\".format(position))\n\n name_count = len(self.names)\n for i in range(name_count):\n name = self.names[i]\n if not isinstance(name, six.string_types):\n position = \"({0} in list)\".format(i)\n raise TypeError(\"key name {0} must be a string\".format(\n position))\n\n if not self.key_wrapping_data:\n if (len(self.value) * 8) != self.cryptographic_length:\n msg = \"key length ({0}) not equal to key value length ({1})\"\n msg = msg.format(\n self.cryptographic_length,\n len(self.value) * 8\n )\n raise ValueError(msg)", "title": "" }, { "docid": "e4964f96ba0a3e2b9b2d3568f02ff699", "score": "0.5834701", "text": "def validate_public_key(self):\n if self.key:\n # Log validation start.\n start_log = logs.SSH_KEY_VALIDATION_START.format(ip=self.ip)\n self._logger.debug(start_log)\n # Read existing key information.\n expected_name = self.key.get_name()\n expected_value = self.key.asbytes()\n # Query key information from the host.\n name, value = self.query_public_key()\n # Check key validity.\n valid_key = name == expected_name and value == expected_value\n if not valid_key:\n # Log and raise exception for an invalid key.\n failure_log = logs.SSH_KEY_VALIDATION_FAILURE.format(\n ip=self.ip, expected_name=expected_name, name=name\n )\n self._logger.warn(failure_log)\n raise SSHException(failure_log)\n # Log public key validation success.\n sucess_log = logs.SSH_KEY_VALIDATION_SUCCESS.format(ip=self.ip)\n self._logger.info(sucess_log)\n else:\n skip_log = logs.SSH_KEY_VALIDATION_SKIP.format(ip=self.ip)\n self._logger.info(skip_log)", "title": "" }, { "docid": "fa08b5546073306d780672e79b5fb2ec", "score": "0.58239204", "text": "def is_private_bip32_valid(hwif, allowable_netcodes=None):\n def info_filter_f(k):\n return k.get(\"key_type\") == 'bip32' and k.get(\"is_private\") is True\n\n return _is_key_valid(hwif, allowable_netcodes, info_filter_f, types=[\"bip32\"])", "title": "" }, { "docid": "6691261f056e1933ad1517f89478f210", "score": "0.5807409", "text": "def test_generate(self):\n crypto = brkt_cli.crypto.new()\n self.assertTrue(\n isinstance(crypto.private_key, ec._EllipticCurvePrivateKey))\n self.assertTrue(\n isinstance(crypto.public_key, ec._EllipticCurvePublicKey))\n self.assertTrue('BEGIN PUBLIC KEY' in crypto.public_key_pem)\n\n pem = crypto.get_private_key_pem()\n self.assertTrue('BEGIN EC PRIVATE KEY' in pem)\n pem = crypto.get_private_key_pem('test123')\n self.assertTrue('Proc-Type: 4,ENCRYPTED' in pem)", "title": "" }, { "docid": "e7edc784f02d2f1a24d068dc898579fc", "score": "0.5804984", "text": "def check(self):\n for key, value in self.__dict__.items():\n if key not in self._keys:\n raise InvalidCredential(\"attribute not expected: %s\" % key)\n for key, value in self._keys.items():\n optional = value.get(\"optional\", False)\n if (not optional) and key not in self.__dict__:\n raise InvalidCredential(\"attribute missing: %s\" % key)\n match = value.get('match', None)\n if match is not None and self.__dict__[key] != match:\n raise InvalidCredential(\"invalid value for: %s\" % key)\n # so far so good\n return True", "title": "" }, { "docid": "5572bd8fd7a2ed28ece2262b682c023f", "score": "0.5773111", "text": "def validate_gpg_key(gpgkeyid):\n\n conf = get_config()\n _gpghome = conf[\"HOME\"] + \"/.gnupg\"\n gpg = gnupg.GPG(gnupghome=_gpghome)\n private_keys = gpg.list_keys(True, keys=gpgkeyid)\n\n return private_keys", "title": "" }, { "docid": "ff8e17b23c26294de701019ad29bcd86", "score": "0.5740869", "text": "def check_valid(self):\n if self.signature != self.C_SIGNATURE:\n raise ValueError('Invalid signature ( except: \"%s\", actual: \"%s\" )'\n %(self.C_SIGNATURE, self.signature))\n if self.header_size != self.C_STRUCTURE_SIZE:\n raise ValueError('Invalid header size ( except: %x, actual: %x )'\n %(self.C_STRUCTURE_SIZE, self.header_size))", "title": "" }, { "docid": "59bb4e6b349c4befb87d3d3224a8eb80", "score": "0.5725678", "text": "def break_key(pub):\r\n key_broken = False\r\n message = 'test'\r\n # while not key_broken:\r\n (p, q) = factor_number(pub[1])\r\n totient = get_totient(p, q)\r\n private_exponent = get_private_exponent(pub[0], totient)\r\n private_key = private_exponent, pub[1]\r\n\r\n return private_exponent, pub[1]", "title": "" }, { "docid": "7d2886ece52148dbb268fb6f42445fb2", "score": "0.57127756", "text": "def verify(self, signature, data):\n if len(signature) == 40:\n # DSA key with no padding\n signatureType, signature = b'ssh-dss', common.NS(signature)\n else:\n signatureType, signature = common.getNS(signature)\n\n if signatureType != self.sshType():\n return False\n\n keyType = self.type()\n if keyType == 'RSA':\n k = self._keyObject\n if not self.isPublic():\n k = k.public_key()\n args = (\n common.getNS(signature)[0],\n data,\n padding.PKCS1v15(),\n hashes.SHA1(),\n )\n elif keyType == 'DSA':\n concatenatedSignature = common.getNS(signature)[0]\n r = int_from_bytes(concatenatedSignature[:20], 'big')\n s = int_from_bytes(concatenatedSignature[20:], 'big')\n signature = encode_dss_signature(r, s)\n k = self._keyObject\n if not self.isPublic():\n k = k.public_key()\n args = (signature, data, hashes.SHA1())\n\n elif keyType == 'EC': # Pragma: no branch\n concatenatedSignature = common.getNS(signature)[0]\n rstr, sstr, rest = common.getNS(concatenatedSignature, 2)\n r = int_from_bytes(rstr, 'big')\n s = int_from_bytes(sstr, 'big')\n signature = encode_dss_signature(r, s)\n\n k = self._keyObject\n if not self.isPublic():\n k = k.public_key()\n\n keySize = self.size()\n if keySize <= 256: # Hash size depends on key size\n hashSize = hashes.SHA256()\n elif keySize <= 384:\n hashSize = hashes.SHA384()\n else:\n hashSize = hashes.SHA512()\n args = (signature, data, ec.ECDSA(hashSize))\n\n try:\n k.verify(*args)\n except InvalidSignature:\n return False\n else:\n return True", "title": "" }, { "docid": "1fb2319bf6a0dbf92ff10cb443165a30", "score": "0.57016116", "text": "def test_rsa_key_has_correct_repr(self):\n key = pem.RSAPrivateKey('test')\n assert \"<RSAPrivateKey(pem_str='test')>\" == repr(key)", "title": "" }, { "docid": "7834ffd0831bb81237878b064b53d231", "score": "0.5683259", "text": "def test_private_identity_invalid_identity_id(invalid_id: str) -> None:\n with pytest.raises(ValueError) as excinfo:\n PrivateIdentity(\n CryptoProtocol.SHA256WithECDSA.generate_private_key(), invalid_id\n )\n assert (\n str(excinfo.value)\n == f\"invalid identifier '{invalid_id}' - valid characters are [a-zA-Z0-9._\\\\-+]\"\n )", "title": "" }, { "docid": "c6b9f0b98cb3384f27cc8da7d5e61b7c", "score": "0.566719", "text": "def test_execute_with_pgp_private_key(self):\n self._run_token_test('----BEGIN PGP PRIVATE KEY BLOCK----')", "title": "" }, { "docid": "70fbd2b3a309e8a86e79101f067cc9fc", "score": "0.5665761", "text": "def test__private_identity_raises_typerror() -> None:\n with pytest.raises(TypeError) as excinfo:\n PrivateIdentity(\"123456\") # type: ignore[arg-type]\n assert str(excinfo.value) == \"must be PrivateKey, not str\"", "title": "" }, { "docid": "e46772b2f2c4cf45067a7fc30220adf1", "score": "0.5663114", "text": "def test_default_private_key(app, private_key):\n assert keys.default_private_key() == private_key", "title": "" }, { "docid": "88c4f165ff28d014f07f15c4c1ac762b", "score": "0.5659473", "text": "def test_verify_using_expired_keyid(self):\n\n # Create a signature.\n signer = GPGSigner(self.signing_subkey_keyid, self.gnupg_home)\n signature = signer.sign(self.test_data)\n\n # Verify signature using expired key.\n key = GPGKey.from_keyring(self.expired_keyid, self.gnupg_home)\n with self.assertRaises(VerificationError):\n key.verify_signature(signature, self.test_data)", "title": "" }, { "docid": "f5f4cc150b4f80e3582082ae967bb5f8", "score": "0.56398064", "text": "def load_pem_private_key(self, data, password):", "title": "" }, { "docid": "35d5cf63f627f00e901e1b997a5b91d7", "score": "0.56291133", "text": "def test_gpg_sign_and_verify_object(self):\n\n # Create a signature.\n signer = GPGSigner(self.signing_subkey_keyid, self.gnupg_home)\n signature = signer.sign(self.test_data)\n\n # Generate Key from gnupg keyring.\n key = GPGKey.from_keyring(self.signing_subkey_keyid, self.gnupg_home)\n\n key.verify_signature(signature, self.test_data)\n with self.assertRaises(UnverifiedSignatureError):\n key.verify_signature(signature, self.wrong_data)\n\n # Generate Key from dict.\n key_dict = export_pubkey(self.signing_subkey_keyid, self.gnupg_home)\n key = GPGKey.from_dict(key_dict[\"keyid\"], key_dict)\n\n key.verify_signature(signature, self.test_data)\n with self.assertRaises(UnverifiedSignatureError):\n key.verify_signature(signature, self.wrong_data)", "title": "" }, { "docid": "a2713f8498cbcbbe94322de327ed86f8", "score": "0.56253755", "text": "def test_signature_verify_invalid_signing_key(self):\n e = engine.KmipEngine()\n e._data_store = self.engine\n e._data_store_session_factory = self.session_factory\n e._data_session = e._data_store_session_factory()\n e._is_allowed_by_operation_policy = mock.Mock(return_value=True)\n e._logger = mock.MagicMock()\n e._cryptography_engine.logger = mock.MagicMock()\n\n signing_key = pie_objects.OpaqueObject(\n b'\\x01\\x02\\x03\\x04',\n enums.OpaqueDataType.NONE\n )\n\n e._data_session.add(signing_key)\n e._data_session.commit()\n e._data_session = e._data_store_session_factory()\n\n unique_identifier = str(signing_key.unique_identifier)\n payload = payloads.SignatureVerifyRequestPayload(\n unique_identifier=unique_identifier,\n cryptographic_parameters=attributes.CryptographicParameters(\n padding_method=enums.PaddingMethod.PSS,\n digital_signature_algorithm=enums.DigitalSignatureAlgorithm.\n SHA1_WITH_RSA_ENCRYPTION\n ),\n data=b'',\n signature_data=b''\n )\n\n args = (payload, )\n self.assertRaisesRegex(\n exceptions.PermissionDenied,\n \"The requested signing key is not a public key. A public key must \"\n \"be specified.\",\n e._process_signature_verify,\n *args\n )", "title": "" }, { "docid": "c06ac259ca2ba389fa56bae3295d753b", "score": "0.5624351", "text": "def test_execute_with_ssh_ec_private_key(self):\n self._run_token_test('----BEGIN EC PRIVATE KEY----')", "title": "" }, { "docid": "09e787f7b4700967b986f4fdff293608", "score": "0.5618602", "text": "def test_malformed_field(self):\n create_data = {'KEY_MEMBER':True, 'KEY_TYPE':'rsa-ssh', 'KEY_DESCRIPTION':'SSH key for user Arlene Brown.', 'KEY_PUBLIC':'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDhEds1KZkBCX9e91wN4ADs1+dGEm1wUYIe2WfDW3MwLkxwsiFvHAeD7uKUOOGXAZLevTaXWRuinkFaEu9hXfmnG46R2yyxgtq3zNQP+a7mPCbYV8x9LLQtGHXD9A19300WdsSmBlFvM6cTVWXeSnRSQq1LL2vbp0GlJk/UvqOoAEOEBMeQgQL4h1Bd4tMb8b2+FceFa43vDkHVy9QaVWjIVeCMqmYoR0A8MRI2Xm52KJ+XbyamtGWwyx817BSUurrVFc2levWHnz69GK9QuZWNL9LihkkMQoWRrKfr4lf5rbXCyRoUjZ+hTxxL0oEfjfXiaeinmJEMN5gudQ8oi6Y5'}\n self._test_create(create_data, 'KEY', 'KEY_ID', 3)", "title": "" }, { "docid": "f78e6cbb81fc241b6de40e5d70efc37f", "score": "0.56097734", "text": "def test_verify_signature_raises_error_no_public_key() -> None:\n public_identity = PublicIdentity(\"no.public.key\")\n\n with pytest.raises(RuntimeError) as excinf:\n public_identity.verify_signature(\"siganture\", b\"some data\")\n assert str(excinf.value) == \"identity 'no.public.key' has no public key\"", "title": "" }, { "docid": "f51e2fc32962b1a26865a302acca27f5", "score": "0.5596566", "text": "def verify(message, signature, pk):\n\n if not isinstance(pk, ec.EllipticCurvePublicKey):\n logger.error(\"The value passed as pk is not a public key (EllipticCurvePublicKey)\")\n return False\n\n if isinstance(signature, str):\n signature = unhexlify(signature)\n\n try:\n pk.verify(signature, message, ec.ECDSA(hashes.SHA256()))\n\n return True\n\n except InvalidSignature:\n return False", "title": "" }, { "docid": "9228aa108ced87fa6fde2608440b5561", "score": "0.55942196", "text": "def verify_signature(public_key, signature, data):\n\t\tsignature = tuple(map(lambda sig: int(sig, 0), signature.split(',')))\n\t\treturn ecdsa.verify(signature, data, public_key, curve=secp256k1)", "title": "" }, { "docid": "d6cc4d2cb58f37bf942a40d3ba1f37b4", "score": "0.55846715", "text": "def check_key(self, key):\n if len(key) != 8:\n raise ValueError('Auth key is not 8 bytes long.')\n return bytes(key) in self.auth_keys", "title": "" }, { "docid": "e63526df36d6d025105339e51b8350e2", "score": "0.55715114", "text": "def test_verify_jwt_fails_with_bad_pvt_key(self):\n\n # Generate a new private key\n private_key = rsa.generate_private_key(public_exponent=65537,\n key_size=2048,\n backend=default_backend())\n\n # Don't set the private key\n api_settings.JWT_PRIVATE_KEY = private_key\n\n client = APIClient(enforce_csrf_checks=True)\n orig_token = self.get_token()\n\n # Now try to get a refreshed token\n response = client.post('/auth-token-verify/', {'token': orig_token},\n format='json')\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertRegexpMatches(response.data['non_field_errors'][0],\n 'Error decoding signature')", "title": "" }, { "docid": "aa8f2375ca291254771fa751d12f68b3", "score": "0.5569112", "text": "def is_valid(self):\n # use bytes_to_bit_field on self.flags to get the flag_bits\n flag_bits = bytes_to_bit_field(self.flags)\n # set hashes to be the reversed hashes of everything in self.hashes\n hashes = [h[::-1] for h in self.hashes]\n # initialize the merkle tree with self.total\n self.merkle_tree = MerkleTree(self.total)\n # populate_tree with flag_bits and hashes\n self.merkle_tree.populate_tree(flag_bits, hashes)\n # check if the computed root [::-1] is the same as the merkle root\n return self.merkle_tree.root()[::-1] == self.header.merkle_root", "title": "" }, { "docid": "20ef53b5e0f9aed6c69e8cd510ab06e4", "score": "0.5565368", "text": "def validate(self):\n return self._hash_object.hexdigest() == self.expected_hash", "title": "" }, { "docid": "50c71f88c8a18ceca69d9b484dfe7d5f", "score": "0.5559948", "text": "def check_key(self):\n try:\n key = self.get_key()\n key.close()\n return True\n except Exception as e:\n return False", "title": "" }, { "docid": "e42c39732a02527820f4024f7d4cb0fc", "score": "0.5553956", "text": "def test_private_identity_generated_id() -> None:\n identity = PrivateIdentity(CryptoProtocol.SHA256WithECDSA.generate_private_key())\n assert uuid.UUID(identity.identity_id)", "title": "" }, { "docid": "7208ed8f4883824b20c934b2cdc10909", "score": "0.5553509", "text": "def testSignatureVerification(self):\n gnupg = self._get_gnupg()\n gnupg.import_key(test_gpg_key)\n self._assertGoodSignature(gnupg)", "title": "" }, { "docid": "95537ca41ac4ad7be033cf7be4ec1ee7", "score": "0.5551535", "text": "def is_valid(kt):\n try:\n int(kt)\n except ValueError as e:\n return False\n\n if len(kt) != 10:\n return False\n\n valid_centuries = _get_valid_centuries()\n if int(kt[-1]) not in valid_centuries:\n return False\n\n try:\n return calculate_checksum(kt) == int(kt[8])\n except InvalidKtFormat as e:\n return False", "title": "" }, { "docid": "a5a187f9c70d1744be8875ee10495152", "score": "0.55382967", "text": "def test_create_and_import_encrypted_rsa(self):\n name = \"key2\"\n password = \"123456\"\n bits = 3072\n generate_and_write_rsa_keypair(name, bits, password)\n private_key = import_rsa_key_from_file(name, password)\n public_key = import_rsa_key_from_file(name + \".pub\")\n\n securesystemslib.formats.KEY_SCHEMA.check_match(private_key)\n self.assertTrue(private_key[\"keyval\"].get(\"private\"))\n self.assertTrue(\n securesystemslib.formats.PUBLIC_KEY_SCHEMA.matches(public_key))", "title": "" }, { "docid": "ccb09c1799533dd289b66bf15073f177", "score": "0.55267334", "text": "def validate_cert(certificate, private_key=None,\n private_key_passphrase=None, intermediates=None):\n x509 = _get_x509_from_pem_bytes(certificate)\n if intermediates:\n for x509Pem in _split_x509s(intermediates):\n _get_x509_from_pem_bytes(x509Pem)\n if private_key:\n pkey = _read_privatekey(private_key, passphrase=private_key_passphrase)\n ctx = SSL.Context(SSL.TLSv1_METHOD)\n ctx.use_certificate(x509)\n try:\n ctx.use_privatekey(pkey)\n ctx.check_privatekey()\n except Exception:\n raise exceptions.MisMatchedKey\n return True", "title": "" }, { "docid": "3c288e9d9cbf732199dca20332d411b9", "score": "0.55163497", "text": "def _load_key(private_object):\n\n if libcrypto_version_info < (1,) and private_object.algorithm == 'dsa' and private_object.hash_algo == 'sha2':\n raise AsymmetricKeyError(pretty_message(\n '''\n OpenSSL 0.9.8 only supports DSA keys based on SHA1 (2048 bits or\n less) - this key is based on SHA2 and is %s bits\n ''',\n private_object.bit_size\n ))\n\n source = _unwrap_private_key_info(private_object).dump()\n\n buffer = buffer_from_bytes(source)\n evp_pkey = libcrypto.d2i_AutoPrivateKey(null(), buffer_pointer(buffer), len(source))\n if is_null(evp_pkey):\n handle_openssl_error(0)\n return PrivateKey(evp_pkey, private_object)", "title": "" }, { "docid": "428e7a81a422c37b9edfcf1eaf28c336", "score": "0.5514135", "text": "def test_create_and_import_rsa(self):\n name = \"key1\"\n generate_and_write_rsa_keypair(name)\n private_key = import_rsa_key_from_file(name)\n public_key = import_rsa_key_from_file(name + \".pub\")\n\n securesystemslib.formats.KEY_SCHEMA.check_match(private_key)\n self.assertTrue(private_key[\"keyval\"].get(\"private\"))\n self.assertTrue(\n securesystemslib.formats.PUBLIC_KEY_SCHEMA.matches(public_key))", "title": "" }, { "docid": "f134e6047e75fe15b47a1b84db340ccf", "score": "0.5513824", "text": "def test_failedPrivateKeyAuthenticationWithoutSignature(self):\n blob = keys.Key.fromString(keydata.publicDSA_openssh).blob()\n packet = (NS('foo') + NS('none') + NS('publickey') + '\\x00'\n + NS('ssh-dsa') + NS(blob))\n d = self.authServer.ssh_USERAUTH_REQUEST(packet)\n return d.addCallback(self._checkFailed)", "title": "" }, { "docid": "12e9e831607f33992e6323024c22e35c", "score": "0.5499049", "text": "def test_encrypted_key(self):\n options = self.parseArguments([\n self.command_name,\n u'--common-name=domain.com',\n u'--key-size=512',\n u'--key-password=\\u20acuro',\n ])\n\n result = generate_csr(options)\n\n # We decrypt the key and compare the unencrypted serialization.\n key = crypto.load_privatekey(\n crypto.FILETYPE_PEM,\n result['key_pem'],\n u'\\u20acuro'.encode('utf-8'))\n self.assertEqual(\n crypto.dump_privatekey(crypto.FILETYPE_PEM, key),\n crypto.dump_privatekey(crypto.FILETYPE_PEM, result['key']),\n )", "title": "" }, { "docid": "b765e58d027f3524e7d8b40e16a03d67", "score": "0.54961425", "text": "def verifyKey(params, pub, proof):\n (G, g, hs, o) = params\n c, r = proof\n gw_prime = c * pub + r * g\n return to_challenge([g, gw_prime]) == c", "title": "" }, { "docid": "cffacaf00c4f2685e0f740b517143118", "score": "0.54842347", "text": "def test_derive_key_invalid_derivation_type(self):\n e = engine.KmipEngine()\n e._data_store = self.engine\n e._data_store_session_factory = self.session_factory\n e._data_session = e._data_store_session_factory()\n e._logger = mock.MagicMock()\n e._cryptography_engine.logger = mock.MagicMock()\n\n payload = payloads.DeriveKeyRequestPayload(\n object_type=enums.ObjectType.CERTIFICATE\n )\n\n args = (payload, )\n self.assertRaisesRegex(\n exceptions.InvalidField,\n \"Key derivation can only generate a SymmetricKey or SecretData \"\n \"object.\",\n e._process_derive_key,\n *args\n )", "title": "" }, { "docid": "d9423f85d6a84aa691daa4f3ef5c329f", "score": "0.5476177", "text": "def test_invalid(self):\n invalid = [\n # Checksum error\n \"m1gZHpD1AzEixLgcnncod5df6CntYK4Jpi\",\n \"121tAaz5x1HUXrCNLbtMDqcw6o5GNn4xqX\",\n \"n3SjFgAhHAv8PcTuq5x2e9sugcXDpMTzX7\",\n \"m4dT2rG2nms1yEWP75C1oc65our7jRp4bj\",\n \"m5ifG3WbdPsVWHWP7JEQzkz2GNhRzhDKjP\",\n\n # Length error (but valid Base58Check)\n \"2REFC168wX4h1\",\n \"AH5FpvXbhaAYyM194A\",\n \"C6Vix8wPoc7cemv9Nk16Y2J4PiKQXjFHWyESmaVcoBzEAN4B\",\n\n # Version byte Error (valid length and Base58Check)\n \"uPRYFUWTGbeTdeJ1DYBECQGKq2dhgqv5MF\", # 1\n \"UpjEU4HgKsS8jJq4RD7eE9qdxQ1y7Vg1m\", # 2\n \"2HVwCges6goBmbb17FskcUhQtxuuTJsLCW\", # 3\n \"2gqYBnx9osG4b2j68gD56byCXUAr6moG79\", # 4\n \"mLfUZJYh4zNwGyWGj9WrVtpbEwUqUrZKeB\", #110\n \"n9LgXX9GVMJguqnSmzBVU9NAVwzirtA1fv\", #112\n \"2MYPkFXtBRKmQnpMdoopwiYxTjpP1rdNmEg\", #195\n \"2NM4xDkUkqghARgdoreVagoW2zptuN3foh1\", #197\n ]\n\n for address in invalid:\n with self.assertRaises(ValueError):\n BitcoinAddress(address)", "title": "" }, { "docid": "1c53d4ef7b420c4345e9213ec0251bc9", "score": "0.5468982", "text": "def valid_key_exists(nova_client, keypair_name):\n pub_key = get_public_key(nova_client, keypair_name)\n priv_key = get_private_key(keypair_name)\n if not all([pub_key, priv_key]):\n return False\n return cert.is_keys_valid(pub_key, priv_key)", "title": "" }, { "docid": "cac3c1050ecbb64f7d253ff92da94d5f", "score": "0.546239", "text": "def test_valid(self):\n valid = [\n \"mjgZHpD1AzEixLgcnncod5df6CntYK4Jpi\",\n \"1F1tAaz5x1HUXrCNLbtMDqcw6o5GNn4xqX\",\n \"n2SjFgAhHAv8PcTuq5x2e9sugcXDpMTzX7\",\n \"mgdT2rG2nms1yEWP75C1oc65our7jRp4bj\",\n \"1NqCDioR4G6TeoxcVxx5wpghQ7UmESD4uz\",\n \"2N66DDrmjDCMM3yMSYtAQyAqRtasSkFhbmX\",\n \"mogLTTLNLBVvYw3WxbKGnvMJqeGvqPDpbp\",\n \"16WKTYdxxd2jp9CLFzQu5HJQpDE485rRUc\",\n \"mpifG3WbdPsVWHWP7JEQzkz2GNhRzhDKjP\"]\n\n for address in valid:\n BitcoinAddress(address)", "title": "" }, { "docid": "7cb6396bd9795569c154af445631d20b", "score": "0.5451556", "text": "def assert_keys_valid(self, dictionary):\n for key in dictionary.keys():\n self.assertIsInstance(key, str)\n self.assertNotIn('.', key)\n self.assertNotIn('$', key)\n if isinstance(dictionary[key], dict):\n self.assert_keys_valid(dictionary[key])", "title": "" }, { "docid": "4b04f05e3728b310d39482fe8e5dc60f", "score": "0.54432833", "text": "def test_empty_privkey(self):\n\n with open(\"empty_key.pem\", \"wb\") as f:\n pass\n\n cmd = [\n \"python\",\n SIIPSTITCH,\n os.path.join(IMAGES_PATH, \"BIOS_old.bin\"),\n os.path.join(IMAGES_PATH, \"Vbt.bin\"),\n \"-ip\",\n \"vbt\",\n \"-k\",\n \"empty_key.pem\",\n ]\n\n results = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n assert (\n b\"the key file size must be greater than 0 and less than 2k!\"\n in results.stderr\n )\n os.remove(\"empty_key.pem\")", "title": "" }, { "docid": "4ba215d0c287f88d6bc2b29b4ce549f8", "score": "0.54298186", "text": "def test_check_no_key(self):\n self.assertFalse(\n CTCSerializer.check_structure(self.invalid_structure_6))", "title": "" }, { "docid": "8406920ef6079298bec83e0345763941", "score": "0.5414546", "text": "def validate(self):\n if not isinstance(self.value, bytes):\n raise TypeError(\"secret value must be bytes\")\n elif not isinstance(self.data_type, enums.SecretDataType):\n raise TypeError(\"secret data type must be a SecretDataType \"\n \"enumeration\")\n\n mask_count = len(self.cryptographic_usage_masks)\n for i in range(mask_count):\n mask = self.cryptographic_usage_masks[i]\n if not isinstance(mask, enums.CryptographicUsageMask):\n position = \"({0} in list)\".format(i)\n raise TypeError(\n \"secret data mask {0} must be a CryptographicUsageMask \"\n \"enumeration\".format(position))\n\n name_count = len(self.names)\n for i in range(name_count):\n name = self.names[i]\n if not isinstance(name, six.string_types):\n position = \"({0} in list)\".format(i)\n raise TypeError(\"secret data name {0} must be a string\".format(\n position))", "title": "" }, { "docid": "e8fe8de8969e64bfa4c715b681901e0e", "score": "0.5401104", "text": "def _key_validation(self, key):\n if not isinstance(key, str):\n raise BoxKeyError(\"keys must be strings\")\n if key[0].isdigit():\n return False\n elif key[0].isupper():\n return False\n elif len(key.split(\".\")) < 2:\n return False\n return True", "title": "" }, { "docid": "bb13e524cb5fa265a6bcaa50bf2c2ed8", "score": "0.5397689", "text": "def _assert_integrity_signature(self, signature):\n self.assertEqual(signature.user, self.user)\n self.assertEqual(signature.course_key, self.course.id)", "title": "" }, { "docid": "a5339aa86498aa0b4cc239d5ef620b64", "score": "0.53905624", "text": "def assert_valid(self):\n if \"_is_valid\" in self.__dict__:\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "title": "" }, { "docid": "a5339aa86498aa0b4cc239d5ef620b64", "score": "0.53905624", "text": "def assert_valid(self):\n if \"_is_valid\" in self.__dict__:\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "title": "" }, { "docid": "a5339aa86498aa0b4cc239d5ef620b64", "score": "0.53905624", "text": "def assert_valid(self):\n if \"_is_valid\" in self.__dict__:\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "title": "" }, { "docid": "a5339aa86498aa0b4cc239d5ef620b64", "score": "0.53905624", "text": "def assert_valid(self):\n if \"_is_valid\" in self.__dict__:\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "title": "" }, { "docid": "a5339aa86498aa0b4cc239d5ef620b64", "score": "0.53905624", "text": "def assert_valid(self):\n if \"_is_valid\" in self.__dict__:\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "title": "" }, { "docid": "a5339aa86498aa0b4cc239d5ef620b64", "score": "0.53905624", "text": "def assert_valid(self):\n if \"_is_valid\" in self.__dict__:\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "title": "" }, { "docid": "a5339aa86498aa0b4cc239d5ef620b64", "score": "0.53905624", "text": "def assert_valid(self):\n if \"_is_valid\" in self.__dict__:\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "title": "" }, { "docid": "a5339aa86498aa0b4cc239d5ef620b64", "score": "0.53905624", "text": "def assert_valid(self):\n if \"_is_valid\" in self.__dict__:\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "title": "" }, { "docid": "a5339aa86498aa0b4cc239d5ef620b64", "score": "0.53905624", "text": "def assert_valid(self):\n if \"_is_valid\" in self.__dict__:\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "title": "" }, { "docid": "a5339aa86498aa0b4cc239d5ef620b64", "score": "0.53905624", "text": "def assert_valid(self):\n if \"_is_valid\" in self.__dict__:\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "title": "" }, { "docid": "a5339aa86498aa0b4cc239d5ef620b64", "score": "0.53905624", "text": "def assert_valid(self):\n if \"_is_valid\" in self.__dict__:\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "title": "" }, { "docid": "a5339aa86498aa0b4cc239d5ef620b64", "score": "0.53905624", "text": "def assert_valid(self):\n if \"_is_valid\" in self.__dict__:\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "title": "" }, { "docid": "a5339aa86498aa0b4cc239d5ef620b64", "score": "0.53905624", "text": "def assert_valid(self):\n if \"_is_valid\" in self.__dict__:\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "title": "" }, { "docid": "a5339aa86498aa0b4cc239d5ef620b64", "score": "0.53905624", "text": "def assert_valid(self):\n if \"_is_valid\" in self.__dict__:\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "title": "" }, { "docid": "a5339aa86498aa0b4cc239d5ef620b64", "score": "0.53905624", "text": "def assert_valid(self):\n if \"_is_valid\" in self.__dict__:\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "title": "" }, { "docid": "a5339aa86498aa0b4cc239d5ef620b64", "score": "0.53905624", "text": "def assert_valid(self):\n if \"_is_valid\" in self.__dict__:\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "title": "" }, { "docid": "a5339aa86498aa0b4cc239d5ef620b64", "score": "0.53905624", "text": "def assert_valid(self):\n if \"_is_valid\" in self.__dict__:\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "title": "" }, { "docid": "a5339aa86498aa0b4cc239d5ef620b64", "score": "0.53905624", "text": "def assert_valid(self):\n if \"_is_valid\" in self.__dict__:\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "title": "" }, { "docid": "a5339aa86498aa0b4cc239d5ef620b64", "score": "0.53905624", "text": "def assert_valid(self):\n if \"_is_valid\" in self.__dict__:\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "title": "" }, { "docid": "a5339aa86498aa0b4cc239d5ef620b64", "score": "0.53905624", "text": "def assert_valid(self):\n if \"_is_valid\" in self.__dict__:\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "title": "" }, { "docid": "a5339aa86498aa0b4cc239d5ef620b64", "score": "0.53905624", "text": "def assert_valid(self):\n if \"_is_valid\" in self.__dict__:\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "title": "" }, { "docid": "a5339aa86498aa0b4cc239d5ef620b64", "score": "0.53905624", "text": "def assert_valid(self):\n if \"_is_valid\" in self.__dict__:\n assert self.__dict__[\"_is_valid\"](), \"Variable has already been deleted\"", "title": "" } ]
7ba3ba8012712185005d25e1ff0f1266
Method to update a review object
[ { "docid": "fa9ba80f1b94b488f94d4ff7576153ec", "score": "0.677282", "text": "def put_review(review_id):\n payload = request.get_json(silent=True)\n reviews = storage.all(Review)\n\n if payload is None:\n abort(400, 'Not a JSON')\n\n for review in reviews.values():\n if review.id == review_id:\n for k, v in payload.items():\n if k != 'created_at' and k != 'updated_at' and k != 'id':\n setattr(review, k, v)\n review.save()\n return(jsonify(review.to_dict()), 200)\n abort(404)", "title": "" } ]
[ { "docid": "6beb46614b3a073877df4d460fc26c60", "score": "0.79591966", "text": "def update(self, request, *args, **kwargs):\n try:\n resources.review_request.get_object(request, *args, **kwargs)\n review = resources.review.get_object(request, *args, **kwargs)\n except ObjectDoesNotExist:\n return DOES_NOT_EXIST\n\n return self.update_review(request, review, *args, **kwargs)", "title": "" }, { "docid": "2c3a425a0e0615d34d110aff607b1453", "score": "0.7330764", "text": "def update_review(review_id):\n review = storage.get(\"Review\", review_id)\n if review is None:\n abort(404)\n review_json = request.get_json()\n if review_json is None:\n abort(400, {\"Not a JSON\"})\n ignore_keys = [\"id\", \"user_id\", \"place_id\", \"created_at\", \"updated_at\"]\n for key, value in review_json.items():\n if key not in ignore_keys:\n setattr(review, key, value)\n storage.save()\n return jsonify(review.to_dict()), 200", "title": "" }, { "docid": "c21625778485cdccf93c77b43523a6cf", "score": "0.72612685", "text": "def update_review(review_id):\n if not request.json:\n return make_response(jsonify({\"error\": \"Not a JSON\"}), 400)\n try:\n review_obj = storage.get(Review, review_id)\n data = request.get_json()\n\n for key, value in data.items():\n if key != 'updated_at' or key != 'created_at':\n if key != 'id' or key != 'place_id' or key != 'user_id':\n setattr(review_obj, key, value)\n\n storage.save()\n return jsonify(review_obj.to_dict()), 200\n except:\n abort(404)", "title": "" }, { "docid": "321782bfca368df05fb405e594fef2fe", "score": "0.72301614", "text": "def update_review(review_id):\n review_obj = get_object(Review, review_id)\n data = request.get_json()\n if type(data) is not dict:\n return make_response(jsonify({'error': 'Not a JSON'}), 400)\n for key, value in data.items():\n if (key != 'id' and key != 'created_at' and key != 'updated_at' and\n key != 'user_id'):\n setattr(review_obj, key, value)\n review_obj.save()\n return jsonify(review_obj.to_dict())", "title": "" }, { "docid": "cba4ecd59dd69130a2196b9226087881", "score": "0.713787", "text": "def put(self, id):\n parser = reqparse.RequestParser()\n parser.add_argument('content', location='json', required=False)\n parser.add_argument('rating', location='json', required=False)\n parser.add_argument('photo', location='json', required=False)\n data = parser.parse_args()\n\n review = Reviews.query.get(id)\n\n # check review is valid\n if review is None:\n return {'code': 404, 'message': 'Review Not Found'}, 404\n\n # update review content\n if data['content'] is not None:\n review.content = data['content']\n\n # update review rating\n if data['rating'] is not None:\n review.content = data['rating']\n\n # update review photo\n if data['photo'] is not None:\n review.content = data['photo']\n\n db.session.commit()\n\n return {'code': 200, 'message': 'Review updated'}, 200", "title": "" }, { "docid": "8ccd05fb68d2598158845853d5fff03e", "score": "0.7054351", "text": "def update(self, review_id, name, genre, image_url, author, review_title, review_message, overall_score):\n params = {'name': name, 'genre': genre, 'image_url': image_url, 'author': author, 'review_date': date.today(), \n 'review_title': review_title, 'review_message': review_message, 'review_id': review_id, 'overall_score': overall_score}\n connection = sqlite3.connect(DB_FILE)\n cursor = connection.cursor()\n cursor.execute(\"UPDATE game_reviews SET name = :name, genre = :genre, image_url = :image_url, author = :author\" + \n \", review_date = :review_date, review_title = :review_title, review_message = :review_message, overall_score = :overall_score WHERE id = :review_id\", params)\n\n connection.commit()\n cursor.close()\n return True", "title": "" }, { "docid": "e92ef7a1bfeda5d0b3134d37c7433df6", "score": "0.69231933", "text": "def edit_review(review_id):\n review_obj = storage.get(\"Review\", review_id)\n if review_obj is None:\n abort(404)\n if request.json is None:\n return \"Not a JSON\", 400\n fields = request.get_json()\n for key in fields:\n if key in ['id', 'user_id', 'place_id', 'created_at', 'update_at']:\n continue\n if hasattr(review_obj, key):\n setattr(review_obj, key, fields[key])\n review_obj.save()\n return jsonify(review_obj.to_dict()), 200", "title": "" }, { "docid": "f54f1dc591ff678017fc66331bc1c305", "score": "0.6843834", "text": "def put_review(review_id):\n review = storage.get(Review, review_id)\n if not review:\n abort(404)\n\n review_body = request.get_json()\n if not review_body:\n return make_response(jsonify({\"error\": \"Not a JSON\"}), 400)\n\n for key, value in review_body.items():\n if key not in [\"id\", \"user_id\", \"place_id\",\n \"created_at\", \"updated_at\"]:\n setattr(review, key, value)\n review.save()\n\n return make_response(jsonify(review.to_dict()), 200)", "title": "" }, { "docid": "377881b05f1e0d73e34b7d21c6a86ce8", "score": "0.66797656", "text": "def edit_review(request, pk):\n review = get_object_or_404(Review, id=pk)\n\n if request.method == 'POST':\n edit_form = reviewCreationForm(request.POST, instance=review)\n\n if edit_form.is_valid():\n edit_form.save()\n messages.success(\n request, \"Review has successfully been updated!\")\n return redirect(reverse('profile'))\n else:\n messages.error(\n request, \"Unable to update. Please rectify the problems below\")\n else:\n edit_form = reviewCreationForm(instance=review)\n\n args = {\n 'edit_form': edit_form,\n \"review\": review\n }\n return render(request, 'edit_review.html', args)", "title": "" }, { "docid": "84a9f4853fdd3369649faa69cd65a1e9", "score": "0.6679078", "text": "def edit_review(review_id):\n # cheks if user is logged in\n if \"user\" in session:\n user = mongo.db.users.find_one({\"username\": session[\"user\"]})\n review = mongo.db.reviews.find_one({\"_id\": ObjectId(review_id)})\n # checks if review exists in database\n if review:\n # checks if review belongs to user\n if review[\"reviewed_by\"] == str(user[\"_id\"]):\n book = {}\n # finds book associated with review\n books = mongo.db.books.find()\n for doc in books:\n if str(doc[\"_id\"]) == review[\"book_id\"]:\n book = doc\n\n if request.method == \"POST\":\n # updates \"review\" and \"rating\" fields\n mongo.db.reviews.update(\n {\"_id\": ObjectId(review_id)},\n {\"$set\": {\"review\": request.form.get(\"review\"),\n \"rating\": request.form.get(\"rating\")}})\n\n flash(\"Review Successfully Edited\")\n # updates book rating\n update_book_rating(book)\n\n return redirect(url_for('my_reviews'))\n\n return render_template(\n \"edit_review.html\", review=review, book=book)\n flash(\"You can only edit your own reviews\")\n return render_template(\"401.html\")\n flash(\"Sorry, this review no longer exists\")\n render_template(\"404.html\")\n flash(\"You need to be logged in to do that\")\n return render_template(\"log_in.html\")", "title": "" }, { "docid": "bf0e498f68e18149038a63090aa77bba", "score": "0.66340065", "text": "def _update_rating(self):\n reviews = LessonReview.objects.filter(lesson=self)\n if reviews:\n total_rating = 0\n no_of_reviews = 0\n for review in reviews:\n total_rating += int(review.rating)\n no_of_reviews += 1\n new_rating = total_rating / no_of_reviews\n self.rating = new_rating\n else:\n self.rating = None\n self.save()\n # Send a list of all lessons by this instructor to\n # its profile for rating update\n lessons_by_this_instructor = Lesson.objects.filter(\n instructor_profile=self.instructor_profile)\n\n self.instructor_profile._update_rating(lessons_by_this_instructor)", "title": "" }, { "docid": "c2d31ec9889cac7346dcdac560aa9343", "score": "0.64578253", "text": "def submit_review(request):\n if request.user.is_stylist == 'YES':\n if request.method == 'POST':\n review = Review.objects.get(pk=request.POST.get('review_pk'))\n review.customer_rating = int(request.POST.get('rating'))\n review.save()\n UserLogic.update_average(review)\n return redirect('stylist:dashboard')\n else:\n return redirect('core:logout')", "title": "" }, { "docid": "9333f9ebe27305bcd4bcf926bed4b18c", "score": "0.64515203", "text": "def update(self, request, *args, **kwargs):\n response = {'message': 'You can\\'t update a rating like that'}\n return Response(response, status=status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "3bbe847ccbd0946b95e8501e909068aa", "score": "0.6435886", "text": "def edit_review(review_id):\n\n if not g.user:\n flash(\"Access unauthorized.\", \"danger\")\n return redirect(\"/\")\n\n review = Review.query.get_or_404(review_id)\n form = EditReviewForm(obj=review)\n if g.user.id != review.user.id:\n flash(\"Access unauthorized.\", \"danger\")\n return redirect(\"/\")\n\n if form.validate_on_submit():\n if User.authenticate(g.user.username, form.password.data) != g.user:\n flash(\"Incorrect password\", \"danger\")\n return redirect(\"/\")\n review.title = form.name.data\n review.text = form.text.data\n review.rating = form.rating.data\n\n db.session.add(review)\n db.session.commit()\n\n return redirect(f\"/reviews/{review_id}\")\n return render_template(\"reviews/edit_review.html\", form=form, review=review)", "title": "" }, { "docid": "7c6c932413d52714aef7eea675215ea4", "score": "0.6396508", "text": "def update(self, interest, doc):\n self.interest = interest\n self.doc = doc\n self.save()\n\n repo = self.repository\n ratings = self.__class__.objects.filter(repository=repo)\n l = float(len(ratings))\n i = d = 0\n for r in ratings:\n i += r.interest\n d += r.doc\n\n num_scores = 2.0\n repo.rating_avg = float(i + d) / (num_scores * l)\n repo.rating_avg_interest = float(i) / l\n repo.rating_avg_doc = float(d) / l\n repo.rating_votes = l\n repo.save()", "title": "" }, { "docid": "e7ace1cc402bf375b0f6e738c3991c56", "score": "0.6250957", "text": "def update_reviews_cmd(reviews_id, **reviews_properties):\n return UpdatereviewsCommand(reviews_id, **reviews_properties)", "title": "" }, { "docid": "1b3ef2d533e2c96f2b0444d89c4c276c", "score": "0.6213342", "text": "def update(self,**kwargs):\n pass", "title": "" }, { "docid": "1b3ef2d533e2c96f2b0444d89c4c276c", "score": "0.6213342", "text": "def update(self,**kwargs):\n pass", "title": "" }, { "docid": "e5327af1fc8500249ca1d96c75469169", "score": "0.6158669", "text": "def update_review_request(reqId):\n req = ReviewRequestModel().get_request_review(reqId)\n form = ReviewRequest(request.form)\n if req and req[\"username\"] == session.get('username'):\n if form.validate():\n ReviewRequestModel().insert_(form.data)\n # TODO not a string but response\n return \"ok\"\n #return redirect(url_for('respond_for_review',reqId=reqId))\n #logger.debug(form.errors)\n # TODO should return 404 and not form invalid\n return \"form invalid %s \" % form.errors", "title": "" }, { "docid": "4b98fbc744dfd92e6d4e3a5a33bdba93", "score": "0.6156441", "text": "def update_review(self,\n request,\n review,\n public=None,\n publish_to_owner_only=False,\n publish_and_archive=False,\n extra_fields={},\n ship_it=None,\n *args,\n **kwargs):\n if not self.has_modify_permissions(request, review):\n # Can't modify published reviews or those not belonging\n # to the user.\n return self.get_no_access_error(request)\n\n if ship_it is not None:\n review.ship_it = ship_it\n\n self.set_text_fields(review, 'body_top', **kwargs)\n self.set_text_fields(review, 'body_bottom', **kwargs)\n\n try:\n self.import_extra_data(review, review.extra_data, extra_fields)\n except ImportExtraDataError as e:\n return e.error_payload\n\n review.save()\n\n if public:\n try:\n review.publish(user=request.user,\n to_owner_only=publish_to_owner_only,\n request=request)\n except PublishError as e:\n return PUBLISH_ERROR.with_message(str(e))\n\n if publish_and_archive:\n ReviewRequestVisit.objects.update_visibility(\n review.review_request, request.user,\n ReviewRequestVisit.ARCHIVED)\n\n return 200, {\n self.item_result_key: review,\n }", "title": "" }, { "docid": "16b4544b9fb1e9086c64d51bb8f33339", "score": "0.6156426", "text": "def put_reviews(review_id):\n\n data = storage.get(Review, review_id)\n\n if not data:\n abort(404)\n if not request.is_json:\n abort(400, description=\"Not a JSON\")\n\n my_req = request.get_json()\n\n for k, v in my_req.items():\n if k != \"id\" and k != \"created_at\" and k != \"updated_at\" and\\\n k != \"user_id\" and k != \"place_id\":\n setattr(data, k, v)\n\n storage.save()\n return data.to_dict(), 200", "title": "" }, { "docid": "44d8bb88fc73339d32bb54ecbd5032d7", "score": "0.6060753", "text": "def update(self, request, pk=None):\n user = OpenUser.objects.get(user=request.auth.user)\n post = Post.objects.get(pk=pk)\n post.title = request.data['title']\n post.publication_date = request.data['publication_date']\n post.image_url = request.data['image_url']\n post.content = request.data['content']\n post.approved = request.data['approved']\n post.user = user.user\n\n if post.visual_schedule is None:\n try: post.visual_schedule = VisualSchedule.objects.get(pk=request.data[\"visual_schedule\"])\n except: post.visual_schedule = None\n elif request.data['visual_schedule'] is not None:\n post.visual_schedule = VisualSchedule.objects.get(pk=request.data[\"visual_schedule\"])\n if post.social_story is None:\n try: post.social_story = SocialStory.objects.get(pk=request.data[\"social_story\"])\n except: post.social_story = None\n elif request.data['social_story'] is not None:\n post.social_story = SocialStory.objects.get(pk=request.data[\"social_story\"])\n \n\n category = Category.objects.get(pk=request.data[\"category_id\"])\n post.category = category\n\n # Try/Except try to save new post instance and use serializer to convert to json\n try:\n post.save()\n return Response({}, status=status.HTTP_204_NO_CONTENT)\n except Exception as ex:\n return HttpResponseServerError(ex)", "title": "" }, { "docid": "e7b52ae8dfeffebdbe0182c592b8fef4", "score": "0.60364115", "text": "def update_book_rating(book):\n reviews = mongo.db.reviews.find()\n list_of_ratings = []\n for review in reviews:\n # checks reviews against book\n if review[\"book_id\"] == str(book[\"_id\"]):\n # adds rating from reviews to list_of_ratings variable\n list_of_ratings.append(int(review[\"rating\"]))\n # checks at least one review exists\n if len(list_of_ratings) > 0:\n # calculates avarage rating\n new_rating = sum(list_of_ratings) / len(list_of_ratings)\n print(round(new_rating))\n # updates books rating\n mongo.db.books.update(\n book, {\n \"$set\": {\"rating\": round(new_rating)}})", "title": "" }, { "docid": "dac5c04b0375e8ba60c75e2d628bb008", "score": "0.60289735", "text": "def do_Review(self, arg):\n self.class_action(\"Review\", arg)", "title": "" }, { "docid": "67ec5b535002edea67d45abf158bb5b3", "score": "0.6014147", "text": "def save(self, *args, **kwargs):\n super().save(*args, **kwargs)\n self.lesson._update_rating()", "title": "" }, { "docid": "56db6e4befa41cfee2b770533aeb3e99", "score": "0.5998544", "text": "def mark_as_reviewed(instance):\n response = self.api.post(\n all_models.Review,\n {\n \"review\": {\n \"reviewable\": {\n \"type\": instance.type,\n \"id\": instance.id,\n },\n \"context\": None,\n \"notification_type\": \"email\",\n \"status\": all_models.Review.STATES.REVIEWED,\n \"access_control_list\": review.build_reviewer_acl(),\n },\n },\n )\n self.assertStatus(response, 201)", "title": "" }, { "docid": "ac612faeb3bf9dabbacd5ea87275791a", "score": "0.59880316", "text": "def update(self, instance, validated_data):\n instance.brand = validated_data.get('brand', instance.brand)\n instance.interviewee = validated_data.get('interviewee', instance.interviewee)\n instance.favorite = validated_data.get('favorite', instance.favorite)\n instance.disliked = validated_data.get('disliked', instance.disliked)\n instance.reason = validated_data.reason('reason', instance.reason)\n instance.longitude = validated_data.longitude('longitude', instance.longitude)\n instance.latitude = validated_data.get('latitude', instance.latitude)\n instance.save()\n return instance", "title": "" }, { "docid": "c9b057771de5a361818f771cd4f972eb", "score": "0.59587944", "text": "def test_update_Review(self):\n i = \"Review\"\n attr = [\"place_id\", \"user_id\", \"text\", \"name\", \"code\"]\n value = [\"985\", \"7621\", \"Random Text\", \"Holberton\", \"123\"]\n typeval = [str, str]\n with patch('sys.stdout', new=io.StringIO()) as f:\n HBNBCommand().onecmd(\"create \" + i)\n id_st = f.getvalue()\n alldic = storage.all()\n self.assertTrue((i + '.' + id_st[:-1]) in alldic.keys())\n for j, k in zip(attr, value):\n with patch('sys.stdout', new=io.StringIO()) as f:\n HBNBCommand().onecmd(\"update \" + i + \" \" + id_st +\n \" \" + j + \" \" + k)\n alldic = storage.all()\n ins = alldic[i + '.' + id_st[:-1]]\n for j, k, m in zip(attr, value, typeval):\n gattr = getattr(ins, j, False)\n self.assertEqual(gattr, k)\n self.assertEqual(m, type(gattr))", "title": "" }, { "docid": "6432cab8c1ca6ca49a88d74fe3044a33", "score": "0.59119594", "text": "def put(self, paper_id):\n data = request.json\n update_annotation(id, data)\n return None, 204", "title": "" }, { "docid": "b199c4268bad8378ae5af290a02e0acc", "score": "0.5898937", "text": "def update(self, instance, validated_data):\n instance.date = validated_data.get('date', instance.date)\n instance.costs = validated_data.get('costs', instance.costs)\n instance.created_date = validated_data.get('created_date', instance.created_date)\n instance.published_date = validated_data.get('published_date', instance.published_date)\n instance.keyword = validated_data.get('keyword', instance.keyword)\n instance.save()\n return instance", "title": "" }, { "docid": "f9ae4434a07fc72ee099dc9b3ad9db22", "score": "0.5891465", "text": "def mark_as_reviewed(self):\n if not self.is_reviewed:\n self.is_reviewed = True\n self.save()\n return self", "title": "" }, { "docid": "87ec35bafe461fb52eafaaa5e14ce388", "score": "0.584937", "text": "def Save_review(request, Re_id):\n restaurant = Restaurant.objects.get(id=Re_id)\n review_by_user = RestaurantReview.objects.filter(restaurant=restaurant, user=request.user)\n if review_by_user.exists():\n return JsonResponse({'error': \"You can't submit a review again\"})\n\n form = Review_Form(data=request.POST)\n if form.is_valid():\n review = form.save(commit=False)\n review.user = request.user\n review.restaurant = restaurant # Restaurant.objects.get(id=id)\n review.save()\n # The success message is sent via ShowMessageAjax.\n data = {\n 'success': 'Your review is saved',\n 'review': review.description\n }\n return JsonResponse(data)\n else:\n # else => form is not valid\n return JsonResponse({'error': 'Your form is not valid'})", "title": "" }, { "docid": "fed2a5c9e21642afbd9d85628104f614", "score": "0.5823385", "text": "def update(self,request,pk=None):\n return Response({'http_method':'PUT'})", "title": "" }, { "docid": "c8c4634b4c7a9fe9826ba6fa632e4e17", "score": "0.58226764", "text": "def update(self, *args, **kwargs):\n return None", "title": "" }, { "docid": "9ced06a2ce4ec7280ef58bd6f3a8f520", "score": "0.5819736", "text": "def rating(self, value):\n self._edit_review({'review[rating]': value})", "title": "" }, { "docid": "45ed35433f0127564f390c9005542fa0", "score": "0.5810005", "text": "def test_update_learner(self):\n learnerObj = Learner()\n response = self.client.open(\n '/learner-inferences/learners/{learnerId}'.format(learnerId='learnerId_example'),\n method='PATCH',\n data=json.dumps(learnerObj),\n content_type='application/ld+json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "title": "" }, { "docid": "fb7149fb6d85d046a680323bab1e3d2a", "score": "0.5784173", "text": "def update_ftext_review(review_id, user_id, abstract):\n conn = dblib.create_con(VERBOSE=True)\n cur = conn.cursor()\n cur.execute(\"\"\"\n UPDATE freetext_reviews\n SET abstract = %s, date_updated = NOW()\n WHERE review_id = %s\n AND user_id = %s\n RETURNING review_id;\n \"\"\", (abstract, review_id, user_id))\n conn.commit()\n idx = cur.fetchone()\n idx = idx[0] if idx else None\n conn.close()\n return idx", "title": "" }, { "docid": "66403dc28195279d2d3088308df283e7", "score": "0.57826436", "text": "def update(self, object_):\n raise NotImplementedError()", "title": "" }, { "docid": "b78ff5d9c6987e79d48de4ab78df0c4a", "score": "0.5776106", "text": "def approve_review(request, review_id):\n if request.user.is_staff:\n try:\n review = get_object_or_404(ProductReview, pk=review_id)\n product = get_object_or_404(Product, pk=review.product_id)\n rating = product.reviews.aggregate(Avg('stars'))['stars__avg']\n review.authorised = True\n review.save(update_fields=['authorised'])\n product.rating = round(rating, 1)\n product.save(update_fields=['rating'])\n messages.success(request, f'{review.product} review by\\\n {review.user} approved')\n except Exception as e:\n messages.error(request, f'Error deleting review: {e}')\n else:\n messages.error(request, 'You need to be assigned as administrator \\\n to approve reviews')\n return redirect(reverse('products'))\n return redirect(reverse('site_management'))", "title": "" }, { "docid": "a2c4a928f3c89efda310fd582c666df2", "score": "0.5770544", "text": "def update(self, instance, validated_data):\n \n genre = validated_data.pop('genre')\n movie_obj = Movies.objects.filter(id=instance.id)\n ins = movie_obj.first()\n ins.genre = str(genre)\n ins.save()\n movie_obj.update(**validated_data)\n return movie_obj", "title": "" }, { "docid": "f68b5f79c566d7a12ac820c89b847327", "score": "0.5767811", "text": "def _update(self):\r\n self.book.rate = (self.book.rate*self.book.rate_num+self.rate) / \\\r\n (self.book.rate_num+1)\r\n self.book.rate_num = self.book.rate_num+1\r\n self.book.save()", "title": "" }, { "docid": "dd421721083652de1b7247cd7d951169", "score": "0.57633203", "text": "def update(self, instance, validated_data):\n instance.question_text = validated_data.get('question_text', instance.question_text)\n instance.pub_date = validated_data.get('pub_date', instance.pub_date)\n instance.save()\n return instance", "title": "" }, { "docid": "63458821f42294a691085b76c863a1ae", "score": "0.57449394", "text": "def update(self, *args: Any, **kwargs: Any) -> None:\n pass", "title": "" }, { "docid": "5485c96e78d5e2d71fe4983c11ab8f09", "score": "0.5743105", "text": "def post(self):\n try:\n new_user_review = UserReview(**api.payload)\n db.session.add(new_user_review)\n db.session.commit()\n return new_user_review\n except IntegrityError:\n return flask_abort(409, \"Review has already been created\")\n except ValueError:\n return flask_abort(400, \"Score must be between 1 and 4\")", "title": "" }, { "docid": "21041f9dd81e0ed3b892b74079950684", "score": "0.5723016", "text": "def review_version(self, review_version):\n\n self._review_version = review_version", "title": "" }, { "docid": "07fa14ca2030b48d24299f1dd3cbd0b3", "score": "0.5715088", "text": "def update(self, instance, validated_data):\n instance.title = validated_data.get('title', instance.title)\n instance.code = validated_data.get('code', instance.code)\n instance.linenos = validated_data.get('linenos', instance.linenos)\n instance.language = validated_data.get('language', instance.language)\n instance.style = validated_data.get('style', instance.style)\n instance.save()\n return instance", "title": "" }, { "docid": "4e0edebd791df6e0a101ea23fdde5bf1", "score": "0.5714136", "text": "def update(self,request, pk = None):\n return Response({'http_method':'PUT'})", "title": "" }, { "docid": "852be5e7f07ef0949abb50143dbcc607", "score": "0.57129854", "text": "def add_to_review(self, patch, review):\n review_id = review['permaId']['id']\n data = self.add_patch_data(patch)\n\n if 'anchor' in data and not self.configuration.new_patch_source:\n matching_patch_groups = self.get_iterable_patchgroups(review_id, data['anchor']['anchorRepository'])\n self.configuration.choose_source(matching_patch_groups)\n if self.configuration.patch_source:\n data['source'] = self.configuration.patch_source\n\n logging.debug('Adding patch to review %s' % review_id)\n\n patch_response = self._request('/rest-service/reviews-v1/%s/patch' % review_id, data=data, http_handlers={\n 409: self.handle_anchor_error\n })\n self.add_reviewers(review_id, self.configuration.reviewers)\n\n Console.success('Updated review %(id)s (state: %(state)s) - %(url)s/cru/%(id)s'\n % ({'id': review_id, 'state': patch_response['state'],'url':self.configuration.url}))", "title": "" }, { "docid": "586cb8187be86de5d01c2b2d1c8c034d", "score": "0.5712918", "text": "def update(self, instance, validated_data):\n raise NotImplementedError(\"This is not meant to be used\")", "title": "" }, { "docid": "20d30386881a9055000f0e7fb1fe2c0e", "score": "0.5698027", "text": "def update(self, request, pk=None):\n return Response({'method':'PUT'})", "title": "" }, { "docid": "bf3e42363b795d8a358a79ef4ccd8c74", "score": "0.56974965", "text": "def update(self, request, pk=None):\n return Response({\"method\":\"UPDATE\"})", "title": "" }, { "docid": "a14df45a7e4377cbfbcbb358513ebdeb", "score": "0.56953365", "text": "def save_review(self, record, worker_id, rating=0.0):\n assert 'reviews' in record\n assert 'reviewed_by' in record\n record['used'] = len(record['reviewed_by']) + 1\n record['reviewed_by'].append({worker_id: rating})\n self.mturk.update_one({'_id': record['_id']}, {\"$set\": record}, upsert=False)", "title": "" }, { "docid": "2aa4790ee76fd5adda5e398090d7c52a", "score": "0.567472", "text": "def update(self, **kwargs):\n raise exceptions.MethodNotImplemented(method=self.update,\n details='The method update is not available for this resource')", "title": "" }, { "docid": "44ff9889db3df374624b85c3000fa726", "score": "0.567162", "text": "def update(self, instance, validated_data):\n try:\n catalog_id=validated_data.pop('catalog_id').id\n except catalog_id.DoesNotExit:\n raise\n instance.name = validated_data.get('name', instance.name)\n instance.price = validated_data.get('price', instance.price)\n instance.image = validated_data.get('image', instance.image)\n instance.slug = validated_data.get('slug', instance.slug)\n instance.color = validated_data.get('color', instance.color)\n instance.size = validated_data.get('size', instance.size)\n instance.available = validated_data.get('available', instance.available)\n instance.is_new = validated_data.get('is_new', instance.is_new)\n instance.quantity = validated_data.get('quantity', instance.quantity)\n instance.catalog_id = validated_data.get('catalog_id', instance.catalog_id)\n\n if str(catalog_id):\n pass\n # catalog=Catalog.objects.get(pk=catalog_id)\n # print(\"catalog for updating:\",catalog)\n # catalog.url=catalog.get('url',catalog)\n # catalog.save()\n instance.save()\n return instance", "title": "" }, { "docid": "eca11eb8e35d6837c9ed2d1b35a956eb", "score": "0.5664494", "text": "def put(self, request, *args, **kwargs):\n\t\treturn self.update(request, *args, **kwargs)", "title": "" }, { "docid": "7000412774f50d95bd384606d29a0d78", "score": "0.56555504", "text": "def update(self, request, pk=None):\n return Response({'http_method':'PUT'})", "title": "" }, { "docid": "f7e782e6f8e04edeb89380e25be79b77", "score": "0.5653213", "text": "def update(self, request, pk=None):\n\n return Response({'method': 'PUT'})", "title": "" }, { "docid": "ef08da12dcf06a6737f9a6d9c2dfa3a8", "score": "0.5651035", "text": "def put(self, request, *args, **kwargs):\n return self.update(request, *args, **kwargs)", "title": "" }, { "docid": "17cdee7241a64644d90b31f802646a84", "score": "0.56460345", "text": "def _review(self, app, data):\n for rev in data:\n s = ApplicationReview(\n user_pic = rev['image'],\n user = rev['author'],\n user_rating = rev['rating'],\n text = rev['review-text'],\n title = rev['title'],\n app = app\n )\n s.save()", "title": "" }, { "docid": "3758c35f3bb05b040027bfefe4e19694", "score": "0.5636048", "text": "def update(self, instance: Film, validated_data):\n instance.film_name = validated_data.get('film_name', instance.film_name)\n instance.film_length = validated_data.get('film_length', instance.film_length)\n instance.price = validated_data.get('price', instance.price)\n instance.id = validated_data.get('id', instance.id)\n # instance.style = validated_data.get('style', instance.style)\n instance.save()\n return instance", "title": "" }, { "docid": "bb33229f651241610ef1956c233132f9", "score": "0.56306857", "text": "def get_review(self):", "title": "" }, { "docid": "a3e66e62559f94c4df8cda6e6831717c", "score": "0.5630225", "text": "def update(self, instance, validated_data):\n instance.user = validated_data.get('user', instance.user)\n if 'date' in validated_data:\n instance.date = validated_data.get('date', instance.date.strftime('YYYY-MM-DDThh:mm'))\n else:\n instance.date = datetime.strptime(str(datetime.now())[:19], \"%Y-%m-%d %H:%M:%S\").strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n instance.text = validated_data.get('text', instance.text)\n instance.is_seen = validated_data.get('is_seen', instance.is_seen)\n instance.is_read = validated_data.get('is_read', instance.is_read)\n\n instance.save()\n return instance", "title": "" }, { "docid": "388b866d3f5d66089a53b5434c221029", "score": "0.5615542", "text": "def update(self, request, pk=None):\n grant_update = Grant.objects.get(pk=pk)\n grant_update.wisher_id = request.auth.user.wisher.id\n grant_update.wish_id = request.data['wish']\n grant_update.memo = request.data['memo']\n grant_update.status = request.data['status']\n\n grant_update.save()\n return Response({}, status=status.HTTP_204_NO_CONTENT)", "title": "" }, { "docid": "a51d736e02057bc7dee7712c69fbf65f", "score": "0.56135726", "text": "def update (self, request, pk=None):\n return Response({'HTTP_method':'PUT'})", "title": "" }, { "docid": "5b7c4e3a2d03c872743edfa6223dec93", "score": "0.5612995", "text": "def update(self, instance, validated_data):\n\t\tinstance.id = validated_data.get('id', instance.id)\n\t\tinstance.name = validated_data.get('name', instance.name)\n\t\tinstance.description = validated_data.get('description', instance.description)\n\t\tinstance.price = validated_data.get('language', instance.price)\n\t\tinstance.save()\n\t\treturn instance", "title": "" }, { "docid": "28e997c1c7324e29a47808d714add6fc", "score": "0.56006765", "text": "def update(self, request, pk = None):\n return Response({\"method\": 'PUT'})", "title": "" }, { "docid": "774ab5fe492d0df5a40727e37ab0a26f", "score": "0.55969477", "text": "def update(self, instance, validated_data):\n raise NotImplementedError(\"Cannot update a tag from the REST API\")", "title": "" }, { "docid": "7a73ce00829c70863b00759d004fa840", "score": "0.55947524", "text": "def update(self, request, pk=None):\n return Response({'http_method': 'PUT'})", "title": "" }, { "docid": "7a73ce00829c70863b00759d004fa840", "score": "0.55947524", "text": "def update(self, request, pk=None):\n return Response({'http_method': 'PUT'})", "title": "" }, { "docid": "7a73ce00829c70863b00759d004fa840", "score": "0.55947524", "text": "def update(self, request, pk=None):\n return Response({'http_method': 'PUT'})", "title": "" }, { "docid": "7a73ce00829c70863b00759d004fa840", "score": "0.55947524", "text": "def update(self, request, pk=None):\n return Response({'http_method': 'PUT'})", "title": "" }, { "docid": "7a73ce00829c70863b00759d004fa840", "score": "0.55947524", "text": "def update(self, request, pk=None):\n return Response({'http_method': 'PUT'})", "title": "" }, { "docid": "04294928c43f0e82279e0195e1504101", "score": "0.5589572", "text": "def review_book(request, pk):\n book = get_object_or_404(Book, pk=pk)\n if request.method == 'POST':\n form = ReviewForm(request.POST)\n if form.is_valid():\n book.is_favorite = form.cleaned_data['is_favorite']\n book.review = form.cleaned_data['review']\n book.save()\n return redirect('review-books')\n else:\n form = ReviewForm \n\n context = {\n\t'book': book,\n 'form': form,\n }\n\t\n return render(request, \"review-book.html\", context)", "title": "" }, { "docid": "180856cffc936d7987fa513fe9e2e98e", "score": "0.55874246", "text": "def update(self, request, pk=None):\n return Response({\"http_method\": \"PUT\"})", "title": "" }, { "docid": "d381457de143ec78b0600a6e2e44ab29", "score": "0.55828243", "text": "def update(self, request, pk=None):\r\n return Response({'http_method': 'PUT'})", "title": "" }, { "docid": "48468d639f45d4e355d7639bd864c185", "score": "0.5577764", "text": "def update(self, instance, validated_data, **kwargs):\n instance.body = validated_data.get('body', instance.body)\n instance.save()\n return instance", "title": "" }, { "docid": "48967caeb396ee1fcd2bbafc83c9f500", "score": "0.5574865", "text": "def update(self, book_id: ObjectId, book: dict):\n update = self.model.update(book_id, book)\n if not update:\n return update", "title": "" }, { "docid": "2e1222b82642776f1537bb4194e958e0", "score": "0.55692244", "text": "def review(book_id):\n\t#ensure rating user submitted is an integer\n\ttry:\n\t\trating = int(request.form.get(\"num_review\"))\n\texcept ValueError:\n\t\treturn render_template(\"error.html\", message=\"Submit a number rating.\")\n\n\treview = request.form.get(\"text_review\")\n\n\tif rating < 1 or rating > 5:\n\t\treturn render_template(\"error.html\",message=\"Your rating must be between 1 and 5\")\n\tuser_id = session.get(\"user_id\")\n\t#select all reviews from user for current book\n\tquery = db.execute(\"SELECT * FROM reviews \\\n\t\t\t\tWHERE reviews.book_id = :id AND reviews.user_id = :uid\",\n\t\t\t\t{\"id\": book_id, \"uid\": user_id})\n\n\t#check if user already submitted a review for this book\n\tif query.rowcount != 0:\n\t\treturn render_template(\"error.html\", message=\"You can only submit one review per book.\")\n\n\tdb.execute(\"INSERT INTO reviews (review, rating, book_id, user_id)\\\n\t\t\t\tVALUES (:review, :rating, :book_id, :user_id)\",\n\t\t\t\t{\"review\":review, \"rating\":rating, \"book_id\":book_id, \"user_id\":user_id})\n\tdb.commit()\n\treturn render_template(\"review.html\")", "title": "" }, { "docid": "58bea3c1657cf2d5f392a2b7b6265451", "score": "0.55679345", "text": "def test_update(self):\n risk = factories.RiskFactory()\n url = \"/api/risks/%s\" % risk.id\n\n risk_body = self.generate_risk_body()\n risk_body[\"id\"] = risk.id\n response = self.api.put(url, {\"risk\": risk_body})\n self.assert200(response)\n\n risk = all_models.Risk.query.get(risk.id)\n self.assert_instance(risk_body, risk)", "title": "" }, { "docid": "2e25eb1f9470ec79809ef537f0b42f6f", "score": "0.55671203", "text": "def create(self, request, *args, **kwargs):\n try:\n review_request = \\\n resources.review_request.get_object(request, *args, **kwargs)\n except ObjectDoesNotExist:\n return DOES_NOT_EXIST\n\n review, is_new = Review.objects.get_or_create(\n review_request=review_request,\n user=request.user,\n public=False,\n **self.get_base_reply_to_field(*args, **kwargs))\n\n if is_new:\n status_code = 201 # Created\n else:\n # This already exists. Go ahead and update, but we're going to\n # redirect the user to the right place.\n status_code = 303 # See Other\n\n result = self.update_review(request, review, *args, **kwargs)\n\n if not isinstance(result, tuple) or result[0] != 200:\n return result\n else:\n return status_code, result[1], {\n 'Location': self.get_href(review, request, *args, **kwargs),\n }", "title": "" }, { "docid": "332faf1cd8fd24b72bfa99ab44f6d4fe", "score": "0.55630046", "text": "def _updateObject(self, xref, text, page=None):\n if self.isClosed or self.isEncrypted:\n raise ValueError(\"document closed or encrypted\")\n\n return _fitz.Document__updateObject(self, xref, text, page)", "title": "" }, { "docid": "d04649f6629b4b20f856bc7f5b5c982b", "score": "0.55629885", "text": "def review():\n\n userReview = request.args.get(\"userReview\")\n isbn = request.args.get(\"isbn\")\n\n # Check if user and book row exist in reviews table.\n if not db.execute(\"SELECT * FROM reviews WHERE id = :id AND isbn = :isbn\", {\"id\": session[\"user_id\"], \"isbn\": isbn}).fetchone():\n # User with this book does not exist in reviews table yet\n db.execute(\"INSERT INTO reviews (id, isbn, review) VALUES (:id, :isbn, :review)\",\n {\"id\": session[\"user_id\"], \"isbn\": isbn, \"review\": userReview})\n db.commit()\n else:\n # User with this book already exists in reviews table, update review only.\n db.execute(\"UPDATE reviews SET review = :review WHERE id = :id AND isbn = :isbn\",\n {\"review\": userReview, \"id\": session[\"user_id\"], \"isbn\": isbn})\n db.commit()\n\n return (\"OK\")", "title": "" }, { "docid": "edac0d0a48e3c9eb04bd9cc2ad1aa562", "score": "0.55629057", "text": "def update(self, *args, **kwargs):\n raise NotImplementedError", "title": "" }, { "docid": "8dad3f3c26aa9621e2de094f0c7b5fbd", "score": "0.5559356", "text": "def _updateObject(self, xref, text, page=None):\n if self.isClosed or self.isEncrypted:\n raise ValueError(\"operation illegal for closed / encrypted doc\")\n\n return _fitz.Document__updateObject(self, xref, text, page)", "title": "" }, { "docid": "b3463e1913dec24a5bc65a741d6911f5", "score": "0.5556174", "text": "def update_rating(self, user_id: int, book_id: int, rating: int) -> None:\n self.delete_rating(user_id, book_id)\n df = pd.DataFrame({\"user_id\": user_id, \"book_id\": book_id, \"rating\": rating}, index=[0])\n self.ratings = self.ratings.append(df, ignore_index=True)\n self.predictions = self.renew_predictions()", "title": "" }, { "docid": "4edbd3ac137fe98e051c69623ccd4244", "score": "0.5545514", "text": "def patch(self, request, *args, **kwargs):\r\n instance = request.user\r\n instance.first_name = request.data['first_name']\r\n instance.last_name = request.data['last_name']\r\n instance.email = request.data['email']\r\n instance.photo_link = request.data['photo_link']\r\n instance.biography = request.data['biography']\r\n instance.save()\r\n\r\n serializer = self.edit_serializer(instance, data=request.data)\r\n serializer.is_valid(raise_exception=True)\r\n\r\n self.perform_update(serializer)\r\n return Response(serializer.data)", "title": "" }, { "docid": "1ac1af7a8909127d6c2a1d53fc96e184", "score": "0.554121", "text": "def update(self, instance, validated_data):\n instance.description = validated_data.get('description', instance.description)\n instance.type = validated_data.get('type', instance.type)\n # for opt in validated_data.get('options', instance.options):\n # print(validated_data)\n instance.save()\n return instance", "title": "" }, { "docid": "2f5620320ca3f4a8c9e89325a5e2b6f1", "score": "0.5540962", "text": "def review(book_id):\n # Get book information\n the_book = db.execute(\"SELECT * FROM book WHERE id = :id\", {\"id\" : book_id}).fetchall()\n \n # Get book reviews information\n review_list = db.execute(\"SELECT rating, book_review, book_id, username, customer_id, customer.id FROM review JOIN customer ON review.customer_id = customer.id WHERE book_id = :id\", {\"id\" : book_id}).fetchall()\n \n # Get username for the customer db\n customer_name = db.execute(\"SELECT username FROM customer WHERE id = :id\", {\"id\" : session[\"user_id\"]}).fetchone()\n \n # Getting Goodreads API information\n rating_results = goodreads_review(the_book[0][\"isbn\"])\n if rating_results == None:\n total_ratings = 0\n average_ratings = 0.0\n else:\n total_ratings = commaSeparator(rating_results[\"rate_count\"])\n average_ratings = rating_results[\"rate_average\"]\n \n # Get xml data from goodreads api\n description, cover_img = goodreads_descriptions(the_book[0][\"isbn\"])\n \n if request.method == \"POST\":\n # Readers review\n book_rating = request.form.get(\"book_rating\")\n book_review = request.form.get(\"reader_review\")\n \n if not book_rating:\n book_rating = 0\n \n if not book_review:\n book_review = \"\"\n \n # Make sure a posted review by the user doesn't already exist for the book requested before inserting it into db.\n if db.execute(\"SELECT * FROM review WHERE book_id = :book_id AND customer_id = :customer_id\", {\"book_id\" : book_id, \"customer_id\" : session[\"user_id\"]}).rowcount != 1:\n db.execute(\"INSERT INTO review (rating, book_review, book_id, customer_id) VALUES (:rating, :book_review, :book_id, :customer_id)\", {\"rating\" : book_rating, \"book_review\" : book_review, \"book_id\" : book_id, \"customer_id\" : session[\"user_id\"]})\n total_rate_number = the_book[0][\"rate_count\"]\n db.execute(\"UPDATE book SET rate_count = :rate_count WHERE id = :id\", {\"rate_count\" : total_rate_number + 1, \"id\" : the_book[0][\"id\"]})\n else:\n error = 'I see that you have already submitted a review.'\n return render_template(\"book.html\", error=error, customer_name=customer_name[\"username\"], review_list=review_list, book_id=the_book[0][\"id\"], the_title=the_book[0][\"title\"], the_author=the_book[0][\"author\"], the_year=the_book[0][\"year\"], the_isbn=the_book[0][\"isbn\"], total_ratings=total_ratings, average_ratings=average_ratings, cover_img=cover_img, description=description)\n \n # Calculate the average of the user's rating's and then insert into review.\n rating_avg = db.execute(\"SELECT COALESCE(AVG(rating),0) AS rating_avg FROM review WHERE book_id = :book_id;\", {\"book_id\" : the_book[0][\"id\"]}).fetchone()\n db.execute(\"UPDATE book SET rate_average = :rate_average WHERE id = :id\", {\"rate_average\" : rating_avg[0], \"id\" : the_book[0][\"id\"]})\n \n db.commit()\n \n return render_template(\"book.html\", customer_name=customer_name[\"username\"], review_list=review_list, book_id=the_book[0][\"id\"], the_title=the_book[0][\"title\"], the_author=the_book[0][\"author\"], the_year=the_book[0][\"year\"], the_isbn=the_book[0][\"isbn\"], total_ratings=total_ratings, average_ratings=average_ratings, cover_img=cover_img, description=description)\n \n else:\n return redirect(\"/search\")", "title": "" }, { "docid": "e8182f6f3175685615de4a784d7b09c2", "score": "0.55349565", "text": "def updateObject(self, xref, text, page=None):\n return self._updateObject(xref, text, page=page)", "title": "" }, { "docid": "d4532b8edba185e933ede662b614a460", "score": "0.5526847", "text": "def put(self, id):\n product = [product for product in products if product['id'] == id]\n if len(product) == 0:\n abort(404)\n product = product[0]\n args = self.reqparse.parse_args()\n for k, v in args.items():\n\t\tif k == 'ratings':\n\t\t\tproduct['ratings'][v] += 1\n\t\telif k== 'comments':\n\t\t\tif v is not None:\n \t\t product[k].append(v)\n\t\t\telse:\n\t\t\t product[k].append('')\n return {'product': marshal(product, product_fields)}", "title": "" }, { "docid": "03b52adc7bf2a0a0c98fd1de1ef64845", "score": "0.552646", "text": "def update(self, instance, validated_data):\n instance.like = validated_data.get('like', instance.like)\n instance.save()\n return instance", "title": "" }, { "docid": "630a27d853c57fce281c365d51ccde98", "score": "0.55152166", "text": "def post(self):\n parser = reqparse.RequestParser()\n parser.add_argument('recipeID', location='json', required=True)\n parser.add_argument('historyID', location='json', required=True)\n parser.add_argument('content', location='json', required=False)\n parser.add_argument('rating', location='json', required=True)\n parser.add_argument('photo', location='json', required=False)\n\n data = parser.parse_args()\n\n # get claims\n claims = get_jwt_claims()\n\n # add dataReview to reviews model\n review = Reviews(claims['id'], data['recipeID'], data['historyID'],\n data['content'], data['rating'], data['photo'])\n db.session.add(review)\n db.session.commit()\n\n app.logger.debug('DEBUG : %s', review)\n\n # add reviewCount\n recipe = Recipes.query.get(data['recipeID'])\n recipeReviewCount = int(\n marshal(recipe, Recipes.responseFields)['reviewCount'])\n recipeRating = int(marshal(recipe, Recipes.responseFields)['rating'])\n\n recipe.reviewCount = recipeReviewCount + 1\n recipe.rating = ((recipeRating * recipeReviewCount) +\n int(data['rating'])) / (recipeReviewCount + 1)\n db.session.commit()\n\n return {\n 'code': 201,\n 'message': 'oke',\n 'data': marshal(review, Reviews.responseFields)\n }, 201", "title": "" }, { "docid": "b1fb96c35a7dd96e5b3ab710b6103472", "score": "0.5512518", "text": "def update(self, *args, **kwargs):\n self.params.update(*args, **kwargs)", "title": "" }, { "docid": "1a3f7a76b6a8ba2ab4e3955a685baaca", "score": "0.550813", "text": "def update_subreddit_obj(submission_obj) -> Subreddit:\n subreddit = submission_obj.subreddit\n\n subreddit.average_submission_polarity = update_average(\n subreddit.average_submission_polarity,\n submission_obj.polarity,\n subreddit.tracked_submissions)\n subreddit.average_submission_subjectivity = update_average(\n subreddit.average_submission_subjectivity,\n submission_obj.subjectivity,\n subreddit.tracked_submissions)\n subreddit.average_upvote_ratio = update_average(\n subreddit.average_upvote_ratio,\n submission_obj.upvote_ratio,\n subreddit.tracked_submissions)\n subreddit.average_gilded_silver = update_average(\n subreddit.average_gilded_silver,\n submission_obj.comments_gilded_silver,\n subreddit.tracked_submissions)\n subreddit.average_gilded_gold = update_average(\n subreddit.average_gilded_gold,\n submission_obj.comments_gilded_gold,\n subreddit.tracked_submissions)\n subreddit.average_gilded_platinum = update_average(\n subreddit.average_gilded_platinum,\n submission_obj.comments_gilded_platinum,\n subreddit.tracked_submissions)\n subreddit.average_is_op = update_average(\n subreddit.average_is_op,\n submission_obj.comments_op,\n subreddit.tracked_submissions)\n subreddit.average_is_mod = update_average(\n subreddit.average_is_mod,\n submission_obj.comments_mod,\n subreddit.tracked_submissions)\n subreddit.average_is_admin = update_average(\n subreddit.average_is_admin,\n submission_obj.comments_admin,\n subreddit.tracked_submissions)\n subreddit.average_is_special = update_average(\n subreddit.average_is_special,\n submission_obj.comments_special,\n subreddit.tracked_submissions)\n\n subreddit.score = subreddit.score + submission_obj.score\n subreddit.num_comments = subreddit.num_comments + submission_obj.num_comments\n subreddit.tracked_submissions = subreddit.tracked_submissions + 1\n subreddit.save()\n\n return subreddit", "title": "" }, { "docid": "f6463833d9554f5951b1832ed8bf05e5", "score": "0.5496046", "text": "def update(self, instance, validated_data):\n instance.title = validated_data.get(\"title\", instance.title)\n instance.body = validated_data.get(\"body\", instance.body)\n instance.created_at = validated_data.get(\"created_at\", instance.created_at)\n instance.slug = validated_data.get(\"slug\", instance.slug)\n instance.is_static_url = validated_data.get(\n \"is_static_url\", instance.is_static_url\n )\n instance.save()\n return instance", "title": "" }, { "docid": "2093263851a7f555f70d1abb1ac5c107", "score": "0.5492757", "text": "def update(self, instance, validated_data):\n instance.url = validated_data.get('url', instance.url)\n instance.user_date = validated_data.get(\n 'user_date', instance.user_date)\n instance.comments = validated_data.get('comments', instance.comments)\n if 'person_id' in validated_data:\n try:\n authUser = User.objects.get(pk=validated_data['person_id'])\n instance.person = authUser\n except User.DoesNotExist:\n logger.error('Person was not in validated data')\n try:\n instance.save()\n except APIException:\n logger.error('Could not save url model')\n update_center_of_mass(instance)\n instance.url = None\n return instance", "title": "" }, { "docid": "61ce8e2fe38146d5df7ff1441ede544d", "score": "0.5485235", "text": "def update_assignment(self, name, submissionQuestion, submissionStartDate = 1472352458, submissionStopDate = 2472352458, reviewStartDate = 1472352458, reviewStopDate = 2472352458, markPostDate = 2472352458, appealStopDate = 2472352458, courseID = None, day_offset = 0, maxSubmissionScore = 10, maxReviewScore = 5, defaultNumberOfReviews = 3, submissionType = 'essay'):\n if courseID == None:\n courseID = self.courseID\n assignment_params = locals()\n del assignment_params['self']\n defaults = {'password' : 'null', 'passwordMessage' : 'null', 'visibleToStudents' : 1, 'assignmentType' : 'peerreview', 'dateFormat' : 'MMMM Do YYYY, HH:mm', 'calibrationPoolAssignmentIds' : [], 'extraCalibrations' : 0, 'calibrationStartDate' : 0, 'calibrationStopDate' : 0, 'showMarksForReviewsReceived' : 1, 'showOtherReviewsByStudents' : 0, 'showOtherReviewsByInstructors' : 0, 'showMarksForOtherReviews' : 0, 'showMarksForReviewedSubmissions' : 0, 'showPoolStatus' : 0, 'calibrationMinCount' : 0,'calibrationMaxScore' : 0,'calibrationThresholdMSE' : 0,'calibrationThresholdScore' : 0, 'allowRequestOfReviews' : 0, 'submissionSettings' : {'topics' : [], 'autoAssignEssayTopic' : 1, 'essayWordLimit' : 10000}}\n\n defaults.update(assignment_params)\n self.convert_assignment_datetimes_to_unix_time(defaults)\n if day_offset:\n self.add_day_offset(day_offset, defaults)\n return requests.post(self.server_url + 'assignment/update', data = json.dumps(assignment_params))", "title": "" }, { "docid": "9cbf57673cb493bb523fb01b76e8b6a0", "score": "0.5471549", "text": "def update_discount(self, **kwargs):\n return self.client.execute(\"discount/update\", \"POST\", kwargs)", "title": "" }, { "docid": "959cc59a4596f9dfa97aff7ea3289c84", "score": "0.5468233", "text": "def update(self, instance, validated_data):\n instance.name = validated_data.get('name', instance.name)\n instance.price = validated_data.get('price', instance.price)\n instance.stock = validated_data.get('stock', instance.stock)\n instance.description = validated_data.get('description', instance.description)\n instance.category = validated_data.get('category', instance.category)\n instance.save()\n return instance", "title": "" }, { "docid": "2874e4e3cc2a75cad3d9c6d2936c3b3a", "score": "0.5466198", "text": "def update_model(self, **kwargs):\n\n pass", "title": "" } ]
e3b1a8a1106dc43c9f0ef69adf7d44d8
3x3 convolution with padding
[ { "docid": "52cc726eb46371a1f313f7a15369e092", "score": "0.71639067", "text": "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation)", "title": "" } ]
[ { "docid": "704b1ec366cc2821e73ccd551ecf5e75", "score": "0.7521464", "text": "def conv3x3(in_planes, out_planes, stride=1, dilation=1, padding=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, dilation=dilation,\n padding=padding, bias=False)", "title": "" }, { "docid": "9d3c607767283ab7b84200354a64eb4c", "score": "0.74464816", "text": "def conv3d(x, W):\n return tf.nn.conv3d(x, W, strides=[1, 1, 1, 1, 1], padding='SAME')", "title": "" }, { "docid": "aa7740b5f7c49a7e512410a1792fba3b", "score": "0.74359393", "text": "def conv3x3(in_planes, out_planes, stride=1,dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation,bias=False, dilation=dilation)", "title": "" }, { "docid": "f10db6a41649035dbfec82dfadaaffe0", "score": "0.7397175", "text": "def dwconv3x3_block(\n in_channels: int,\n out_channels: int,\n stride: int = 1,\n padding: int = 1,\n dilation: int = 1,\n bias: bool = False,\n bn_eps: float = 1e-5,\n activation=(lambda: nn.ReLU(inplace=True))\n):\n return dwconv_block(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=3,\n stride=stride,\n padding=padding,\n dilation=dilation,\n bias=bias,\n bn_eps=bn_eps,\n activation=activation\n )", "title": "" }, { "docid": "5ee4e6b019ed1a666a458023986ed170", "score": "0.7342658", "text": "def conv3x3(in_planes, out_planes, stride=1, dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation)", "title": "" }, { "docid": "f02792e5fceb0f6b4aae5e1a5fd4a183", "score": "0.7288809", "text": "def conv3x3(in_planes, out_planes, stride=1, dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, dilation=dilation, bias=False)", "title": "" }, { "docid": "528e8b5c565248056cbc7a89055d4af6", "score": "0.72754425", "text": "def conv3x3(in_planes, out_planes, stride=1):\n pad=nn.ReplicationPad2d(1)\n padding=0\n conv_mod = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=padding, bias=False)\n return nn.Sequential(pad,conv_mod)", "title": "" }, { "docid": "18c4dc49187a5387da74952de1bfa086", "score": "0.72747284", "text": "def conv3x3(in_planes, out_planes, stride=1, dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, dilation=dilation,\n padding=1, bias=False)", "title": "" }, { "docid": "236971586912b7288d213dcdbc21b7ce", "score": "0.7256192", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(int(in_planes), int(out_planes), kernel_size=3, stride=stride,padding=1, bias=False)", "title": "" }, { "docid": "4812c528d5290fa7ca2238e4e86f19dd", "score": "0.7238403", "text": "def calc_conv(filter_dim, stride_dim, input_dim, padding):\n out_w = (input_dim[0] - filter_dim[0] + 2 * padding) / stride_dim[0] + 1\n out_h = (input_dim[1] - filter_dim[1] + 2 * padding) / stride_dim[1] + 1\n return out_w, out_h", "title": "" }, { "docid": "39df7339ca6f33aba5f747a7b79959e7", "score": "0.7228797", "text": "def __conv3d(self, x, W):\r\n return tf.nn.conv3d(x, W, strides=[1, 1, 1, 1, 1], padding='SAME')", "title": "" }, { "docid": "155cc9e1cd6c881902be2a5f383be394", "score": "0.7214909", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,padding=1, bias=False)", "title": "" }, { "docid": "24a3749d9972c269cbe05fc21f30d1a6", "score": "0.7184758", "text": "def conv3x3(in_planes: int, out_planes: int, stride: int = 1) -> Callable:\n return nn.Conv2D(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias_attr=False)", "title": "" }, { "docid": "4ccf19a2c6749331c0cfd00288900f75", "score": "0.71667117", "text": "def convolve_grayscale_padding(images, kernel, padding):\n ph, pw = padding\n m, h, w = images.shape[-3:]\n kh, kw = kernel.shape[-2:]\n out = np.zeros((m, h + 2*ph - kh + 1, w + 2*pw - kw + 1))\n\n images = np.pad(\n images,\n (\n (0, 0), # no padding img axis\n (ph, ph),\n (pw, pw),\n ),\n mode='constant'\n )\n \n for row in range(out.shape[-2] - kh + 1):\n for col in range(out.shape[-1] - kw + 1):\n img = images[:, row:row+kh, col:col+kw]\n conv = (img * kernel).sum(axis=(-1, -2))\n out[:, row, col] = conv\n return out", "title": "" }, { "docid": "1f79b2009498d4d534624e1ce2018a27", "score": "0.7155487", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)", "title": "" }, { "docid": "1f79b2009498d4d534624e1ce2018a27", "score": "0.7155487", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)", "title": "" }, { "docid": "1f79b2009498d4d534624e1ce2018a27", "score": "0.7155487", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)", "title": "" }, { "docid": "1f79b2009498d4d534624e1ce2018a27", "score": "0.7155487", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)", "title": "" }, { "docid": "1f79b2009498d4d534624e1ce2018a27", "score": "0.7155487", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)", "title": "" }, { "docid": "1f79b2009498d4d534624e1ce2018a27", "score": "0.7155487", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)", "title": "" }, { "docid": "1f79b2009498d4d534624e1ce2018a27", "score": "0.7155487", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)", "title": "" }, { "docid": "1f79b2009498d4d534624e1ce2018a27", "score": "0.7155487", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)", "title": "" }, { "docid": "1f79b2009498d4d534624e1ce2018a27", "score": "0.7155487", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)", "title": "" }, { "docid": "0b27c4389402b326c005fdb1131bf4b7", "score": "0.7140969", "text": "def __init__(self, c1, c2, D_in, D_out):\n super(Inception3D, self).__init__()\n D_out_2 = int(D_out/2)\n # compute padding o = [i + 2*p - k - (k-1)*(d-1)]/s + 1\n # from https://discuss.pytorch.org/t/how-to-keep-the-shape-of-input-and-output-same-when-dilation-conv/14338 for input 8x8x8 and c1=3, c2=5\n\n self.conv1 = torch.nn.Conv3d(in_channels=D_in, out_channels=D_out, kernel_size=1, stride=1)\n self.bn1 = torch.nn.BatchNorm3d(D_out)\n self.conv2 = torch.nn.Conv3d(in_channels=D_in, out_channels=D_out_2, kernel_size=c1, stride=1, padding=1)\n self.bn2 = torch.nn.BatchNorm3d(D_out_2)\n self.conv3 = torch.nn.Conv3d(in_channels=D_in, out_channels=D_out_2, kernel_size=c2, stride=1, padding=2)\n self.bn3 = torch.nn.BatchNorm3d(D_out_2)\n self.avgpool = torch.nn.AvgPool3d(kernel_size=c1, stride=1, padding=1)\n self.conv4 = torch.nn.Conv3d(in_channels=D_in, out_channels=D_out, kernel_size=1, stride=1)\n self.bn4 = torch.nn.BatchNorm3d(D_out)", "title": "" }, { "docid": "2758d00c16d671b86721e7a78f24edf8", "score": "0.71325696", "text": "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n return nn.Conv2d(\n in_planes,\n out_planes,\n kernel_size=3,\n stride=stride,\n padding=dilation,\n groups=groups,\n bias=False,\n dilation=dilation)", "title": "" }, { "docid": "a31b8c54bf363d9e543c361130cd7436", "score": "0.7122867", "text": "def conv3D_func(A, W, right, down, back):\n\n # if len(A.shape) != 3 or len(W.shape) != 3:\n # print(\"Invalid Parameters for a 3D convolution.\")\n # exit(1)\n #\n # for i in range(0, 3):\n # if W.shape[i] > A.shape[i]:\n # print(\"conv2D_error: Mask parameter has a dimension which's bigger than A's same dimension.\")\n # exit(1)\n\n # else:\n\n m, n, l = A.shape\n mask_height, mask_width, mask_depth = W.shape\n\n # check if any padding is needed, and add it.\n\n # after padding:\n\n conv_height = int((m-mask_height) / down + 1)\n conv_width = int((n-mask_width) / right + 1)\n conv_depth = int((l-mask_depth) / back + 1)\n\n B = np.zeros((conv_height, conv_width, conv_depth))\n\n for i in prange(conv_height):\n for j in prange(conv_width):\n for k in prange(conv_depth):\n\n for a in prange(0, mask_height):\n for b in prange(0, mask_width):\n for c in prange(0, mask_depth):\n B[i, j, k] += A[i*down+a, j*right+b, k*back+c] * W[a, b, c]\n\n return B", "title": "" }, { "docid": "f6144c8d5815b83d3b9a9628528abe92", "score": "0.7121584", "text": "def conv3x3x3(in_planes, out_planes, stride=1, groups=1, padding=None, dilation=1, kernel_size=3):\n if padding is None:\n padding = kernel_size // 2\n return nn.Conv3d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, bias=False, dilation=dilation)", "title": "" }, { "docid": "f74930736dc001bc0c7f7e05623db72a", "score": "0.71184987", "text": "def Conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(\n in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False\n )", "title": "" }, { "docid": "50da677597bccbdef05db769ba2bf253", "score": "0.71158177", "text": "def conv3x3(in_planes, out_planes, stride=1):\r\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)", "title": "" }, { "docid": "84118c7a70619a34e43e12fdbb6a9998", "score": "0.71107614", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=True)", "title": "" }, { "docid": "a672a60c483854020b35e12f3aa374e3", "score": "0.7105505", "text": "def conv3x3(in_planes, out_planes, stride=1):\r\n\treturn nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\r\n\t\t\t\t\t padding=1, bias=False)", "title": "" }, { "docid": "3331496072e91e7697d9a5257a91fe13", "score": "0.70922214", "text": "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)", "title": "" }, { "docid": "3331496072e91e7697d9a5257a91fe13", "score": "0.70922214", "text": "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)", "title": "" }, { "docid": "3331496072e91e7697d9a5257a91fe13", "score": "0.70922214", "text": "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)", "title": "" }, { "docid": "3331496072e91e7697d9a5257a91fe13", "score": "0.70922214", "text": "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)", "title": "" }, { "docid": "3331496072e91e7697d9a5257a91fe13", "score": "0.70922214", "text": "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)", "title": "" }, { "docid": "3331496072e91e7697d9a5257a91fe13", "score": "0.70922214", "text": "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)", "title": "" }, { "docid": "3331496072e91e7697d9a5257a91fe13", "score": "0.70922214", "text": "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)", "title": "" }, { "docid": "3331496072e91e7697d9a5257a91fe13", "score": "0.70922214", "text": "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)", "title": "" }, { "docid": "3331496072e91e7697d9a5257a91fe13", "score": "0.70922214", "text": "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)", "title": "" }, { "docid": "3331496072e91e7697d9a5257a91fe13", "score": "0.70922214", "text": "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)", "title": "" }, { "docid": "dd98f4d07c7cfc313ecd5cbd0a0fc569", "score": "0.70899266", "text": "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, groups=groups,\n padding=dilation, dilation=dilation, bias=False)", "title": "" }, { "docid": "2251afa8a1498e90376c48e92536dfdf", "score": "0.70833814", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2D(out_planes, in_channels=in_planes, kernel_size=3, strides=stride,\n padding=1, use_bias=False)", "title": "" }, { "docid": "e5dff71bda84e06059119f81bd1d2915", "score": "0.70808023", "text": "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\r\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\r\n padding=dilation, groups=groups, bias=False, dilation=dilation)", "title": "" }, { "docid": "e5dff71bda84e06059119f81bd1d2915", "score": "0.70808023", "text": "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\r\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\r\n padding=dilation, groups=groups, bias=False, dilation=dilation)", "title": "" }, { "docid": "f07f48d08d8158803b10862acf481393", "score": "0.70801866", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "f07f48d08d8158803b10862acf481393", "score": "0.70801866", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "f07f48d08d8158803b10862acf481393", "score": "0.70801866", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "f07f48d08d8158803b10862acf481393", "score": "0.70801866", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "f07f48d08d8158803b10862acf481393", "score": "0.70801866", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "f07f48d08d8158803b10862acf481393", "score": "0.70801866", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "f07f48d08d8158803b10862acf481393", "score": "0.70801866", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "f07f48d08d8158803b10862acf481393", "score": "0.70801866", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "f07f48d08d8158803b10862acf481393", "score": "0.70801866", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "f07f48d08d8158803b10862acf481393", "score": "0.70801866", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "f07f48d08d8158803b10862acf481393", "score": "0.70801866", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "f07f48d08d8158803b10862acf481393", "score": "0.70801866", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "f07f48d08d8158803b10862acf481393", "score": "0.70801866", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "f07f48d08d8158803b10862acf481393", "score": "0.70801866", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "f07f48d08d8158803b10862acf481393", "score": "0.70801866", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "f07f48d08d8158803b10862acf481393", "score": "0.70801866", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "f07f48d08d8158803b10862acf481393", "score": "0.70801866", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "f07f48d08d8158803b10862acf481393", "score": "0.70801866", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "f07f48d08d8158803b10862acf481393", "score": "0.70801866", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "f07f48d08d8158803b10862acf481393", "score": "0.70801866", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "f07f48d08d8158803b10862acf481393", "score": "0.70801866", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "f07f48d08d8158803b10862acf481393", "score": "0.70801866", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "f07f48d08d8158803b10862acf481393", "score": "0.70801866", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "f07f48d08d8158803b10862acf481393", "score": "0.70801866", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "f07f48d08d8158803b10862acf481393", "score": "0.70801866", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "f07f48d08d8158803b10862acf481393", "score": "0.70801866", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "f07f48d08d8158803b10862acf481393", "score": "0.70801866", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "f07f48d08d8158803b10862acf481393", "score": "0.70801866", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "f07f48d08d8158803b10862acf481393", "score": "0.70801866", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "f07f48d08d8158803b10862acf481393", "score": "0.70801866", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "f07f48d08d8158803b10862acf481393", "score": "0.70801866", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "f07f48d08d8158803b10862acf481393", "score": "0.70801866", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "f07f48d08d8158803b10862acf481393", "score": "0.70801866", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "f07f48d08d8158803b10862acf481393", "score": "0.70801866", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "f07f48d08d8158803b10862acf481393", "score": "0.70801866", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "f07f48d08d8158803b10862acf481393", "score": "0.70801866", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "a62c97661e6508ca2d36689fbbdb333d", "score": "0.70761406", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "72d2165dd103a880f7e771a426756f93", "score": "0.70580596", "text": "def conv3x3(in_planes, out_planes, stride=1):\r\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\r\n padding=1, bias=False)", "title": "" }, { "docid": "72d2165dd103a880f7e771a426756f93", "score": "0.70580596", "text": "def conv3x3(in_planes, out_planes, stride=1):\r\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\r\n padding=1, bias=False)", "title": "" }, { "docid": "72d2165dd103a880f7e771a426756f93", "score": "0.70580596", "text": "def conv3x3(in_planes, out_planes, stride=1):\r\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\r\n padding=1, bias=False)", "title": "" }, { "docid": "72d2165dd103a880f7e771a426756f93", "score": "0.70580596", "text": "def conv3x3(in_planes, out_planes, stride=1):\r\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\r\n padding=1, bias=False)", "title": "" }, { "docid": "72d2165dd103a880f7e771a426756f93", "score": "0.70580596", "text": "def conv3x3(in_planes, out_planes, stride=1):\r\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\r\n padding=1, bias=False)", "title": "" }, { "docid": "c9df776103f013adc5c949e096e381e0", "score": "0.70485824", "text": "def convolve(images, kernels, padding='same', stride=(1, 1)):\n\n c, w, = images.shape[3], images.shape[2]\n h, m = images.shape[1], images.shape[0]\n nc, kw, kh = kernels.shape[3], kernels.shape[1], kernels.shape[0]\n sw, sh = stride[1], stride[0]\n\n pw, ph = 0, 0\n\n if padding == 'same':\n ph = int(((h - 1) * sh + kh - h) / 2) + 1\n pw = int(((w - 1) * sw + kw - w) / 2) + 1\n\n if isinstance(padding, tuple):\n ph = padding[0]\n pw = padding[1]\n\n images = np.pad(images,\n pad_width=((0, 0),\n (ph, ph),\n (pw, pw),\n (0, 0)),\n mode='constant', constant_values=0)\n\n new_h = int(((h + 2 * ph - kh) / sh) + 1)\n new_w = int(((w + 2 * pw - kw) / sw) + 1)\n\n output = np.zeros((m, new_h, new_w, nc))\n\n for y in range(new_h):\n for x in range(new_w):\n for v in range(nc):\n output[:, y, x, v] = \\\n (kernels[:, :, :, v] *\n images[:,\n y * sh: y * sh + kh,\n x * sw: x * sw + kw,\n :]).sum(axis=(1, 2, 3))\n\n return output", "title": "" }, { "docid": "25a493a0ed2b5ac7949146932a131698", "score": "0.7042446", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(\n in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False\n )", "title": "" }, { "docid": "6ab8b99a3e7b7a39a4a4c27eb12a9915", "score": "0.70181686", "text": "def conv3x3(in_planes, out_planes, stride=1, bias=False, group=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,padding=1, groups=group, bias=bias)", "title": "" }, { "docid": "765b01d62a79e834f4a2531316c77294", "score": "0.7016908", "text": "def conv3x3(in_planes: int, out_planes: int, stride: int = 1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "ac921e53fec364e6a97d895a573a5b4f", "score": "0.6949784", "text": "def conv3x3(in_planes, out_planes, stride=1, groups=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False)", "title": "" }, { "docid": "30235595480c29d042c027bb01a7ae29", "score": "0.69467884", "text": "def conv3x3(in_planes, out_planes, stride=1):\n ret = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True)\n ret.apply(KaimingNormalWeightsZeroBias())\n return ret", "title": "" }, { "docid": "2cc764612f81a68dce709b010cf13ef2", "score": "0.6939263", "text": "def conv3x3(in_planes, out_planes, stride=1):\n return torch.nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "46ae5ba6ba29d4d0335615552fd52d4e", "score": "0.6907197", "text": "def conv3x3(layer_types, in_planes, out_planes, stride=1):\n return layer_types.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)", "title": "" }, { "docid": "b82eefe2340f82e5fcf38b1af7084f32", "score": "0.6902309", "text": "def conv3d(x, name, num_filters, filter_size, stride=(1, 1, 1), pad=\"SAME\", dilations=(1, 1, 1),\n data_format=\"NCDHW\", wt_device=DEFAULT_DEVICE, op_device=DEFAULT_DEVICE):\n _conv_warning(\"NCDHW\", \"NDHWC\", data_format, op_device)\n assert len(filter_size) == 3\n assert len(stride) == 3\n assert len(dilations) == 3\n\n stride = [1, *stride, 1] if data_format == \"NDHWC\" else [1, 1, *stride]\n dilations = [1, *dilations, 1] if data_format == \"NDHWC\" else [1, 1, *dilations]\n\n channel_idx = 1 if data_format == \"NCDHW\" else 4\n b_shape = [1, 1, 1, 1, 1]\n b_shape[channel_idx] = num_filters\n w_shape = [*filter_size, int(x.get_shape()[channel_idx]), num_filters]\n\n with tf.variable_scope(name, caching_device=op_device, reuse=tf.AUTO_REUSE):\n with tf.device(wt_device):\n w = tf.get_variable(\"W\", w_shape, tf.float32, tf.glorot_normal_initializer())\n b = tf.get_variable(\"b\", b_shape, tf.float32, tf.zeros_initializer())\n\n with tf.device(op_device):\n return tf.nn.conv3d(x, w, stride, pad, data_format, dilations) + b", "title": "" }, { "docid": "d3297a3f34f50c6b197762d879f9fc3d", "score": "0.68946636", "text": "def conv3(self, out_channels, kernel_size, stride=1, padding=0):\r\n if isinstance(kernel_size, int):\r\n kernel_size = (kernel_size, kernel_size, kernel_size)\r\n if isinstance(stride, int):\r\n stride = (stride, stride, stride)\r\n if isinstance(padding, int):\r\n padding = (padding, padding, padding)\r\n\r\n if len(self.dims) != 4:\r\n raise ValueError(f'cannot 3d convolve {self.dims} (should be Cin, D, W, H)')\r\n\r\n kdep, khei, kwid = kernel_size\r\n sdep, shei, swid = stride\r\n pdep, phei, pwid = padding\r\n\r\n _, depth, width, height = self.dims\r\n\r\n if kdep > depth:\r\n raise ValueError(f'kernel size cannot exceed depth of {depth} (got {kdep})')\r\n if khei > height:\r\n raise ValueError(f'kernel size cannot exceed height of {height} (got {khei})')\r\n if kwid > width:\r\n raise ValueError(f'kernel size cannot exceed width of {width} (got {kwid})')\r\n\r\n out_depth = int((depth - kdep + 2 * pdep)/sdep) + 1\r\n out_height = int((height - khei + 2 * phei)/shei) + 1\r\n out_width = int((width - kwid + 2 * pwid)/swid) + 1\r\n\r\n return FluentShape((out_channels, out_depth, out_height, out_width), self.verbose)", "title": "" }, { "docid": "384b6ed1eee60a0ff1b1248e0c3bdcd7", "score": "0.68865937", "text": "def trans_conv_3d_pad(in_channels, out_channels,\n kernel_size=3, stride=1, padding=1, bias=False):\n if stride == 1:\n return nn.ConvTranspose3d(\n in_channels, out_channels, kernel_size,\n stride=1, padding=padding, bias=bias\n )\n elif stride == 2:\n return nn.ConvTranspose3d(\n in_channels, out_channels, kernel_size,\n stride=2, padding=padding, output_padding=1, bias=bias\n )", "title": "" } ]
3498a7cc29ff5590fe2fde94dffe5450
Save network layouts to self.full_graph. These will be outputted to the analyzed gml file.
[ { "docid": "c5f34d9464c49c44a9d4c58072641df8", "score": "0.78721035", "text": "def _save_network_layouts(self):\n\n # Set values for network layouts and minimum edge weights\n funcs = [nx.fruchterman_reingold_layout, nx.kamada_kawai_layout]\n min_weights = [0.0, 0.2, 0.4, 0.6, 0.8]\n\n # Iterate through the values of xpath_edge_weight, creating xpath_edge_weight\n for min_weight in min_weights:\n G_copy = self.full_graph.copy()\n G_copy = self._add_xpath_edge_weights(G_copy, min_weight)\n coords_list = []\n titles = []\n # Iterate through the network layout functions\n for func in funcs:\n # Getting x,y positions\n pos = func(G_copy, weight=\"xpath_edge_weight\")\n coords_list.append(pos)\n\n # Saving titles for plots and saved node attribute\n if func == nx.fruchterman_reingold_layout:\n title = f\"fr_{min_weight}\"\n else:\n title = f\"kk_{min_weight}\"\n titles.append(title)\n\n # Saving x,y pairs to the full graph\n x = {k: v[0] for k, v in pos.items()}\n y = {k: v[1] for k, v in pos.items()}\n nx.set_node_attributes(self.full_graph, x,\n f'x_{title.replace(\"0.\", \"\")}')\n nx.set_node_attributes(self.full_graph, y,\n f'y_{title.replace(\"0.\", \"\")}')\n\n # Plotting network layouts\n out_path = pathlib.Path(self.output_path) / \"network_layouts/\"\n out_path.mkdir(parents=True, exist_ok=True)\n for func, pos, title in zip(funcs, coords_list, titles):\n plt.figure()\n plt.title(title)\n nx.draw(self.full_graph, pos=pos, with_labels=True)\n plt.savefig(str(out_path / f\"{title}.png\"))", "title": "" } ]
[ { "docid": "fb02b937a6fd588a1c140c9a3dad723f", "score": "0.746374", "text": "def saveGraph(self):\n\t\tif self.save_folder == None:\n\t\t\tnx.write_gexf(self.DG, \"graph.gexf\")\n\t\telse:\n\t\t\tnx.write_gexf(self.DG, self.save_folder + \"/\" + \"graph.gexf\")", "title": "" }, { "docid": "1bc5934209f7e8cb5f958b512c67d78e", "score": "0.7257206", "text": "def _export_graph(self):\n target_nodes = ','.join(self._process_graph())\n ckpt = tf.train.get_checkpoint_state(self.model_path)\n freeze_graph.freeze_graph(\n input_graph=self.model_path + '/raw_graph_def.pb',\n input_binary=True,\n input_checkpoint=ckpt.model_checkpoint_path,\n output_node_names=target_nodes,\n output_graph=(self.model_path + '/' + self.env_name + '_'\n + self.run_id + '.bytes'),\n clear_devices=True, initializer_nodes='', input_saver='',\n restore_op_name='save/restore_all',\n filename_tensor_name='save/Const:0')", "title": "" }, { "docid": "e9428dcb50f814ada98632b18fffd88c", "score": "0.7168998", "text": "def saveAll(self):\n if not self.graph.allNodesHaveKernels():\n self.app.message(self.graph.getLastError())\n return\n if not self.graph.allInputsAndOutputsMet():\n self.app.message(self.graph.getLastError())\n return\n filename = self.app.getSaveFile()\n if os.path.exists(filename):\n if not messagebox.askokcancel(\"Graph XML Save\",\n \"File %s exists. Overwrite?\" %\n filename): return\n self.graph.saveToXML(filename)\n self.app.message(\"Saved\", filename)\n\n # Also write the corresponding C file\n filename = os.path.splitext(filename)[0]+'.c'\n writer = GraphCodeWriter(\"context\", self.graph)\n writer.writeCfile(filename)", "title": "" }, { "docid": "246ac48589fc0dca46aa3c7416055494", "score": "0.70068705", "text": "def store_graph(self):\n file_name = self.__generate_file_name('dag', '-representation')\n with open(file_name, \"w\") as f:\n f.write('Graph {\\n')\n f.write('\\tId: ')\n f.write(str(self.id))\n f.write('\\n')\n f.write('\\tProcessors: ')\n f.write(str(self.processors))\n f.write('\\n')\n f.write('\\tNodes: ')\n f.write(str(self.nodes))\n f.write('\\n')\n f.write('\\tNodeCosts: ')\n f.write(str(self.nodeCost))\n f.write('\\n')\n f.write('\\tLevels: ')\n f.write(str(self.treelevels))\n f.write('\\n')\n f.write('\\tLinks: ')\n links_str = ';'.join(map(lambda x: '({},{},{})|({},{},{})|({})'.format(x.orig.level,\n x.orig.block,\n x.orig.position,\n x.dest.level,\n x.dest.block,\n x.dest.position,\n x.cost),\n self.treelinks))\n f.write(links_str)\n f.write('\\n')\n f.write('\\tLowerBound: ')\n f.write(str(self.lowerbound))\n f.write('\\n')\n f.write('\\tDeadline: ')\n f.write(str(self.deadline))\n f.write('\\n')\n f.write('}')", "title": "" }, { "docid": "f77b013c052c24fb5cc015788cdf2594", "score": "0.6754013", "text": "def save_graph(self):\n name = \"k-%s.png\" % self.connectedness\n nx.draw(self.network,node_color=list(dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS).keys())[self.network_iteration], with_labels=True, font_weight='bold')\n plt.savefig(name)", "title": "" }, { "docid": "b51e6ae8fe18f20cc973a52e94b7f56a", "score": "0.6727926", "text": "def dump_graph(_graph, out='transactions'):\n nx.write_graphml_lxml(_graph, f'output/{out}.graphml')", "title": "" }, { "docid": "5e72919e04e26b3490bca3211d5b6790", "score": "0.6725993", "text": "def export_graph(self, filepath):\n\n if filepath[-1] != \"/\":\n filepath = filepath + \"/\"\n\n plot_model(self.encoder,\n to_file=filepath + \"encoder.png\",\n show_shapes=True,\n expand_nested=False,\n show_layer_names=True)\n\n plot_model(self.decoder,\n to_file=filepath + \"decoder.png\",\n show_shapes=True,\n expand_nested=False,\n show_layer_names=True)\n\n plot_model(self.autoencoder,\n to_file=filepath + \"autoencoder.png\",\n show_shapes=True,\n expand_nested=True,\n show_layer_names=True)\n\n print(\"Model graphs saved.\\n\")", "title": "" }, { "docid": "893cec4cd4c67f5b875c90aeab0e4d69", "score": "0.67019856", "text": "def export_graph(self, filepath):\n\n if filepath[-1] != \"/\":\n filepath = filepath + \"/\"\n\n plot_model(self.encoder,\n to_file=filepath + \"encoder.png\",\n show_shapes=True,\n expand_nested=False,\n show_layer_names=True)\n\n plot_model(self.decoder,\n to_file=filepath + \"decoder.png\",\n show_shapes=True,\n expand_nested=False,\n show_layer_names=True)\n\n plot_model(self.autoencoder,\n to_file=filepath + \"autoencoder.png\",\n show_shapes=True,\n expand_nested=True,\n show_layer_names=True)\n\n plot_model(self.discriminator,\n to_file=filepath + \"discriminator.png\",\n show_shapes=True,\n expand_nested=False,\n show_layer_names=True)\n\n print(\"Model graphs saved.\\n\")", "title": "" }, { "docid": "05994214cb51efadc1baa8e1a8aa7b1a", "score": "0.66181046", "text": "def export_graph(self, filepath):\n\n if filepath[-1] != \"/\":\n filepath = filepath + \"/\"\n\n plot_model(self.encoder,\n to_file=filepath + \"encoder.png\",\n show_shapes=True,\n expand_nested=False,\n show_layer_names=True)\n\n plot_model(self.decoder,\n to_file=filepath + \"decoder.png\",\n show_shapes=True,\n expand_nested=False,\n show_layer_names=True)\n\n plot_model(self.autoencoder,\n to_file=filepath + \"autoencoder.png\",\n show_shapes=True,\n expand_nested=True,\n show_layer_names=True)\n\n plot_model(self.discriminator,\n to_file=filepath + \"discriminator.png\",\n show_shapes=True,\n expand_nested=False,\n show_layer_names=True)\n\n plot_model(self.discriminator_cat,\n to_file=filepath + \"discriminator_cat.png\",\n show_shapes=True,\n expand_nested=False,\n show_layer_names=True)\n\n print(\"Model graphs saved.\\n\")", "title": "" }, { "docid": "e1c2c3fc0e08432a9f12eb7d26c86f00", "score": "0.6600136", "text": "def save_graph(self, filename):\n with open(filename, 'wb') as f:\n pickle.dump(self._graph, f)", "title": "" }, { "docid": "7a9dad71c010d5b0a5964775dfed53c7", "score": "0.64828485", "text": "def export_graph(self, filename=None):\n self.graph_executor.export_graph_definition(filename)", "title": "" }, { "docid": "0a92e5ef2174b3962caf01fbb8527d08", "score": "0.6441036", "text": "def writeGraph(self, dirName, fName=None):\n n = self.name + '.graphml' if fName is None else fName\n graphml.write_graphml(\n self._graph, dirName + sep + n)", "title": "" }, { "docid": "719d6197d75a9fcc9167f62482de503f", "score": "0.63905686", "text": "def save(self):\n print('Saving processed complex data...')\n graphs = [self.a2a_graphs, self.b2a_graphs, self.b2b_grpahs_list]\n global_feat = [self.inter_feats_list, self.bond_types_list, self.type_count_list]\n with open(self.graph_path, 'wb') as f:\n pickle.dump((graphs, global_feat, self.labels), f)", "title": "" }, { "docid": "23528a7955bb9866f91845097717581b", "score": "0.6383832", "text": "def write_graph_and_checkpoint(inference_graph_def,\n model_path,\n input_saver_def,\n trained_checkpoint_prefix):\n for node in inference_graph_def.node:\n node.device = ''\n with tf.Graph().as_default():\n tf.import_graph_def(inference_graph_def, name='')\n with tf.Session() as sess:\n saver = tf.train.Saver(\n saver_def=input_saver_def, save_relative_paths=True)\n saver.restore(sess, trained_checkpoint_prefix)\n saver.save(sess, model_path)", "title": "" }, { "docid": "c35347fbdb21799b72054b4ffbfb6aa8", "score": "0.6328886", "text": "def save_best_network(self):\n\n filename = input(\"Where do you want to save it? \")\n # Structure the output format\n output = \"\"\n for row in self.networks[0][\"weights\"]:\n output += ','.join(str(weight) for weight in list(row.flatten()))\n output += \"\\n\"\n \n # Write the output\n write_file = open(filename, \"w\")\n write_file.write(output)", "title": "" }, { "docid": "3a1c553d9527e057210ceadbcc9bdd9c", "score": "0.6317635", "text": "def save(self):\n graph_path = os.path.join(self.save_path,\n self.save_name + '.bin')\n info_path = os.path.join(self.save_path,\n self.save_name + '.pkl')\n save_graphs(str(graph_path), self._g)\n save_info(str(info_path), {'num_classes': self.num_classes})", "title": "" }, { "docid": "0fee28a6b6d6724ce162f67c071941b3", "score": "0.62887144", "text": "def save(self, filename):\n g = self.tg.copy()\n for n, data in g.nodes(data=True):\n # Decompose position because tuples are not supported\n data[\"lat\"] = data[\"pos\"][0]\n data[\"lon\"] = data[\"pos\"][1]\n # Must delete or stringify the arrival and departure datetimes for saving\n data[\"arrivalTime\"] = str(data[\"arrivalTime\"])\n data[\"departureTime\"] = str(data[\"departureTime\"])\n\n for n, v, data in g.edges(data=True):\n # Must delete or stringify the duration timedelta for saving\n data[\"duration\"] = str(data[\"duration\"])\n\n base_path = os.path.dirname(os.path.realpath(__file__))\n base_path = os.path.join(base_path, \"../../../graphs\")\n assert os.path.exists(base_path)\n nx.write_gml(g, os.path.join(base_path, \"%s.gml\" % filename))", "title": "" }, { "docid": "343dc56e199c651911ac8575b6c623d2", "score": "0.6281873", "text": "def save_graph(self):\n r = Report(self.id_dds)\n outdir = 'out/cover-progress/%s/' % self.id_dds\n self.draw_graph(r)\n filename = os.path.join(outdir, 'graphs.html')\n logger.info('Writing to %r' % filename)\n r.to_html(filename)", "title": "" }, { "docid": "3665251a5b99238dd784df5da8b0cde9", "score": "0.6264171", "text": "def dump_graph(self, path: str):\n nx.drawing.nx_pydot.write_dot(self._nx_graph, path)", "title": "" }, { "docid": "d2a6ce4ad23ddd2a5cf20e9eb6503807", "score": "0.6230253", "text": "def writeGraph(self, file_):\n def toGraph_(node, graph):\n if node.node.pruned:\n color = \"cyan\"\n elif node.isLeaf:\n color = genre_map[node.node.value]\n else:\n color = feature_map[node.node.col]\n if node.isLeaf:\n desc = node.node.leaf_description\n else:\n desc = node.node.description\n vertex = pydot.Node(desc, style=\"filled\", fillcolor=color)\n graph.add_node(vertex)\n if not node.isLeaf:\n lNode = toGraph_(node.left, graph)\n rNode = toGraph_(node.right, graph)\n graph.add_node(lNode)\n graph.add_node(rNode)\n graph.add_edge(pydot.Edge(vertex, lNode))\n graph.add_edge(pydot.Edge(vertex, rNode))\n return vertex\n graph = pydot.Dot(graph_type='digraph', ordering='out')\n toGraph_(self, graph)\n graph.write_png(file_)", "title": "" }, { "docid": "faf36e5f51745b5879f2e2718fb35afb", "score": "0.6190183", "text": "def export_model(self, filepath):\n\n if filepath[-1] != \"/\":\n filepath = filepath + \"/\"\n\n self.autoencoder.save(filepath + 'autoencoder.h5')\n self.discriminator.save(filepath + 'discriminator.h5')\n self.discriminator_cat.save(filepath + 'discriminator_cat.h5')\n self.encoder.save(filepath + 'encoder.h5')\n self.decoder.save(filepath + 'decoder.h5')\n\n print(\"All networks exported in h5 format.\")", "title": "" }, { "docid": "dafc0ebf5ada04236fefd1bb1d16d83b", "score": "0.61864674", "text": "def export_model(self, filepath):\n\n if filepath[-1] != \"/\":\n filepath = filepath + \"/\"\n\n self.autoencoder.save(filepath + 'autoencoder.h5')\n self.discriminator.save(filepath + 'discriminator.h5')\n self.encoder.save(filepath + 'encoder.h5')\n self.decoder.save(filepath + 'decoder.h5')\n\n print(\"All networks exported in h5 format.\")", "title": "" }, { "docid": "95307d2e9067618afd40cb47e94d61e3", "score": "0.6174772", "text": "def print_graph_to_file(G, multi_edge_dic, folder, filename):\n # Print to file\n graph_filename = os.path.join(folder, filename[:-3] + '.txt')\n if DEBUG:\n print('Printing graph to file : ', graph_filename)\n\n with open(graph_filename, 'w') as f:\n\n # GENERAL\n f.write('#nodes: ' + str(G.number_of_nodes()) + '\\n')\n f.write('#edges: ' + str(G.number_of_edges()) + '\\n\\n')\n\n # INFORMATION ON NODES\n # all\n f.write('Nodes (' + str(G.number_of_nodes()) + '):\\n')\n f.write('-' * 80 + '\\n')\n for n, data in sorted(G.nodes(data=True), key=lambda x: sort_key(x)):\n f.write('{n:<60}, {w}\\n'.format(n=n[:60], w=data['id']))\n\n # local\n f.write('\\nLocal identifier nodes: \\n')\n print_node_family_to_file(G, f, 'local')\n\n # block references\n f.write('\\nBlock reference nodes: \\n')\n print_node_family_to_file(G, f, 'label')\n\n # global\n f.write('\\nGlobal nodes: \\n')\n print_node_family_to_file(G, f, 'global')\n\n # immediate value\n f.write('\\nImmediate value nodes: \\n')\n print_node_family_to_file(G, f, 'imm_val')\n\n # ad_hoc\n f.write('\\nAd hoc value nodes: \\n')\n print_node_family_to_file(G, f, 'ad_hoc')\n\n # leaf\n f.write('\\nLeaf nodes: \\n')\n print_node_family_to_file(G, f, 'leaf')\n\n # root\n f.write('\\nRoot nodes: \\n')\n print_node_family_to_file(G, f, 'root')\n\n # isolated\n f.write('\\nIsolated nodes: \\n')\n print_node_family_to_file(G, f, 'isolated')\n f.write('\\n\\n')\n\n # INFORMATION ON EDGES\n # all\n f.write('Edges (' + str(G.number_of_edges()) + ')\\n')\n f.write('-' * 80 + '\\n')\n for a, b, data in sorted(G.edges(data=True), key=lambda x: sort_key(x)):\n f.write('({a:<30}, {b:<30}) {w}\\n'.format(a=a[:30], b=b[:30], w=data['stmt']))\n\n # data flow edges\n dataedges = [(str(n[0]), str(n[1]), str(n[2])) for n in sorted(list(G.edges(data=True)),\n key=lambda x: sort_key(x))\n if n[2]['flow'] == 'data']\n f.write('\\nData flow edges: \\n')\n f.write('#edges: ' + str(len(dataedges)) + ' (' + str(int(len(dataedges)) / G.number_of_edges() * 100)[:5] +\n '%)\\n')\n f.write('-' * 80 + '\\n')\n for e in dataedges:\n f.write('({a:<30}, {b:<30}) {c}\\n'.format(a=e[0][:30], b=e[1][:30], c=e[2]))\n\n # control flow edges\n ctrledges = [(str(n[0]), str(n[1]), str(n[2])) for n in sorted(list(G.edges(data=True)),\n key=lambda x: sort_key(x))\n if n[2]['flow'] == 'ctrl']\n f.write('\\nCtrl flow edges: \\n')\n f.write('#edges: ' + str(len(ctrledges)) + ' (' + str(int(len(dataedges)) / G.number_of_edges() * 100)[:5] +\n '%)\\n')\n f.write('-' * 80 + '\\n')\n for e in ctrledges:\n f.write('({a:<30}, {b:<30}) {c}\\n'.format(a=e[0][:30], b=e[1][:30], c=e[2]))\n\n # multi-edges\n f.write('\\nMulti-edges: \\n')\n multi_edge_list = list()\n for k, v in multi_edge_dic.items(): # Compile the multi-edges\n multi_edge_list += v\n f.write('#multi-edges: ' + str(len(multi_edge_list)) + ' (' +\n str(int(len(multi_edge_list)) / G.number_of_edges() * 100)[:5] + '%)\\n')\n f.write('#node pairs connected by multi-edges: ' + str(len(multi_edge_dic.keys())) + ' (' +\n str(int(len(multi_edge_dic)) / G.number_of_edges() * 100)[:5] + '%)\\n')\n f.write('-' * 80 + '\\n')\n for k, v_ in multi_edge_dic.items():\n n = re.match(r'(.*) \\|\\|\\| (.*)', k)\n assert n is not None, \"Could not identify nodes in \" + k\n f.write('{m:<60} {p:<60}\\n'.format(m=n.group(1)[:60], p=n.group(2)[:60]))\n for v in v_:\n f.write('\\t{}\\n'.format(v))\n f.write('\\n')", "title": "" }, { "docid": "df100b18f8b9e58776177840f7180224", "score": "0.61514246", "text": "def export_model(self, filepath):\n\n if filepath[-1] != \"/\":\n filepath = filepath + \"/\"\n\n self.autoencoder.save(filepath + 'autoencoder.h5')\n\n self.encoder.save(filepath + 'encoder.h5')\n\n self.decoder.save(filepath + 'decoder.h5')\n\n print(\"All networks exported in h5 format.\")", "title": "" }, { "docid": "300012601bd2eb6328fa79f160cfb1f5", "score": "0.6132131", "text": "def save_graph_file(G, name_file='map'):\n name_len = len(name_file)\n\n # the length of extension\n if name_len > 7:\n file_extension = name_file[name_len - 8] + name_file[name_len - 7] + \\\n name_file[name_len - 6] + name_file[name_len - 5] + \\\n name_file[name_len - 4] + name_file[name_len - 3] + \\\n name_file[name_len - 2] + name_file[name_len - 1]\n if file_extension != '.graphml':\n name_file += '.graphml'\n else:\n name_file += '.graphml'\n\n ox.io.save_graphml(G, filepath=name_file)\n #ox.io.save_graph_shapefile(G, filepath= directory + 'shapefile')", "title": "" }, { "docid": "27c5b18302b351bf99c5166662a77202", "score": "0.6125915", "text": "def save(self, path):\n gs, ys = zip(*self.ds)\n graph_labels = {'y': torch.stack(ys)}\n save_graphs(path, list(gs), graph_labels)", "title": "" }, { "docid": "f34eb6297a6cf451dc6fe05837abe6c2", "score": "0.6120599", "text": "def add_summary_graph(self, sess: tf.Session) -> None:\n self.file_writer.add_graph(sess.graph)", "title": "" }, { "docid": "a8a12f219d43783921e83b26f03886bb", "score": "0.6097246", "text": "def write_graph(self, dot):\n dot.set_name(\"G\")\n for start_index in range(0, len(self._graph)):\n end = self._graph[start_index]\n for end_index in end:\n dot.add_edge(repr(start_index), repr(end_index[0]), \\\n repr(end_index[1]))\n dot.finish_output()", "title": "" }, { "docid": "06cdd14dbafe88980e8911820e39c032", "score": "0.60867715", "text": "def save_graph_only(caffe_def_path, caffemodel_path, inputs, output_file_path, output_node_names, graph_name='Graph',\n use_padding_same=False):\n with caffe_to_tensorflow_session(caffe_def_path, caffemodel_path, inputs, graph_name=graph_name,\n use_padding_same=use_padding_same) as sess:\n tf_freeze.save_graph_only(sess, output_file_path, output_node_names)", "title": "" }, { "docid": "621aa6bcc3309b0dfac7b88ad8667697", "score": "0.6053789", "text": "def save(self, save_file=None):\n if not save_file:\n save_file = Markov.SAVE_FILE\n with open(save_file, \"w\") as data:\n dump(self.graph, data)", "title": "" }, { "docid": "a648cd4e930fffc87e78c9eac8dedd82", "score": "0.6051887", "text": "def to_dense(self):\n n_graph = self.labels.shape[0]\n for id in tqdm(range(n_graph), desc = 'processing graphs'):\n graph = self.graphs[id]\n n_nodes = graph.num_nodes()\n row = th.arange(n_nodes, dtype = th.long)\n col = th.arange(n_nodes, dtype = th.long)\n\n row = row.view(-1,1).repeat(1, n_nodes).view(-1)\n col = col.repeat(n_nodes)\n\n src = graph.edges()[0]\n dst = graph.edges()[1]\n\n idx = src * n_nodes + dst\n size = list(graph.edata['edge_attr'].size())\n size[0] = n_nodes * n_nodes\n edge_attr = graph.edata['edge_attr'].new_zeros(size)\n \n edge_attr[idx] = graph.edata['edge_attr']\n \n pos = graph.ndata['pos']\n dist = th.norm(pos[col] - pos[row], p=2, dim=-1).view(-1, 1)\n \n new_edge_attr = th.cat([edge_attr, dist.type_as(edge_attr)], dim = -1)\n \n new_graph = dgl.graph((row,col))\n \n new_graph.ndata['attr'] = graph.ndata['attr']\n new_graph.edata['edge_attr'] = new_edge_attr\n new_graph = new_graph.remove_self_loop()\n \n self.graphs[id] = new_graph", "title": "" }, { "docid": "694c1a9fa69634f5f95f09e989ae9067", "score": "0.604962", "text": "def Write(self):\n #nodes = self.Nodes()\n #edges = self.Edges()\n pass", "title": "" }, { "docid": "403f3ec319e829e2179d166b28501ef1", "score": "0.6047407", "text": "def create_graphml_file(self, filename, grouping = None):\r\n \r\n file = open(filename+\".graphml\", \"w\")\r\n \r\n if grouping == None:\r\n grouping = Grouping.create_from_vector([0]*self.number_of_nodes)\r\n \r\n n = self.number_of_nodes\r\n m = self.number_of_edges\r\n\r\n nodes = grouping.get_nodes()\r\n non_empty_groups = grouping.get_non_empty_groups()\r\n c = len(non_empty_groups) \r\n A = self.adjacency_matrix\r\n \r\n print n,m,c,len(nodes),len(colors)\r\n \r\n # set palettes...\r\n if c <= len(colors):\r\n if c == 1:\r\n palette = [\"#FFFFFF\"]\r\n else:\r\n palette = graphml_colors\r\n else:\r\n palette = map(lambda x: \"gray\"+str(x), range(0,100))\r\n \r\n \r\n # Preamble stuff...\r\n file.write('<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>\\n<graphml xmlns=\"http://graphml.graphdrawing.org/xmlns/graphml\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns:y=\"http://www.yworks.com/xml/graphml\" xsi:schemaLocation=\"http://graphml.graphdrawing.org/xmlns/graphml http://www.yworks.com/xml/schema/graphml/1.0/ygraphml.xsd\">\\n<key for=\"node\" id=\"d0\" yfiles.type=\"nodegraphics\"/>\\n<key attr.name=\"description\" attr.type=\"string\" for=\"node\" id=\"d1\"/>\\n<key for=\"edge\" id=\"d2\" yfiles.type=\"edgegraphics\"/>\\n<key attr.name=\"description\" attr.type=\"string\" for=\"edge\" id=\"d3\"/>\\n<key for=\"graphml\" id=\"d4\" yfiles.type=\"resources\"/>\\n')\r\n \r\n # Graph header\r\n \r\n file.write(\"\"\"<graph edgedefault=\"directed\" id=\"G\" parse.edges=\"%d\" parse.nodes=\"%d\" parse.order=\"free\">\\n\"\"\" % (m,n))\r\n\r\n\r\n # Now we write the nodes...\r\n for i in xrange(0,n):\r\n file.write(\"\"\"<node id=\"n%d\">\r\n <data key=\"d0\">\r\n <y:ShapeNode>\r\n <y:Geometry height=\"30.0\" width=\"30.0\" x=\"80.56138057743618\" y=\"0.0\"/>\r\n <y:Fill color=\"%s\" transparent=\"false\"/>\r\n <y:BorderStyle color=\"#000000\" type=\"line\" width=\"1.0\"/>\r\n <y:NodeLabel alignment=\"center\" autoSizePolicy=\"content\" fontFamily=\"Dialog\" fontSize=\"12\" fontStyle=\"plain\" hasBackgroundColor=\"false\" hasLineColor=\"false\" height=\"18.701171875\" modelName=\"internal\" modelPosition=\"c\" textColor=\"#000000\" visible=\"true\" width=\"33.0\" x=\"-1.5\" y=\"5.6494140625\">%d</y:NodeLabel>\r\n <y:Shape type=\"ellipse\"/>\r\n </y:ShapeNode>\r\n </data>\r\n <data key=\"d1\"/>\r\n </node>\\n\"\"\" % (i, palette[non_empty_groups.index(nodes[i])], i))\r\n \r\n current_edge = 0\r\n # Now we do the edges...\r\n for i in range(0, n):\r\n for j in range(0,i+1):\r\n if A[i,j] == 1:\r\n file.write(\"\"\"<edge id=\"e%d\" source=\"n%d\" target=\"n%d\">\r\n <data key=\"d2\">\r\n <y:PolyLineEdge>\r\n <y:Path sx=\"0.0\" sy=\"0.0\" tx=\"0.0\" ty=\"0.0\"/>\r\n <y:LineStyle color=\"#000000\" type=\"line\" width=\"1.0\"/>\r\n <y:Arrows source=\"none\" target=\"none\"/>\r\n <y:BendStyle smoothed=\"false\"/>\r\n </y:PolyLineEdge>\r\n </data>\r\n <data key=\"d3\"/>\r\n </edge>\\n\"\"\" % (current_edge, i, j))\r\n current_edge += 1\r\n\r\n\r\n # close off tags...\r\n file.write(\"\"\" </graph>\r\n <data key=\"d4\">\r\n <y:Resources/>\r\n </data>\r\n</graphml>\"\"\")\r\n file.close()", "title": "" }, { "docid": "113c0a695a607a20f1692ee1b3257a11", "score": "0.6017589", "text": "def save_network_to_disk(graph):\n graph_json = graph.convert_to_json()\n with open(\"json/save_data.json\", 'wt') as out:\n res = json.dump(graph_json, out, sort_keys=True, indent=4, separators=(',', ': '))", "title": "" }, { "docid": "7201f51d18f84fa4c5a93c9a13c93c7e", "score": "0.600976", "text": "def write_graph(g, filename):\n with open(filename, 'w') as f:\n f.write(repr(g))", "title": "" }, { "docid": "768e6caf545861c4f6fc263143eb3fd6", "score": "0.60063756", "text": "def generate(self):\n self._assemble()\n self.graph.save(self.dotfile)\n self.graph.render(self.fcfile.parent.joinpath(self.fcfile.stem),\n cleanup=True)", "title": "" }, { "docid": "177a1a013290db4acbfe09b7601b676e", "score": "0.59976816", "text": "def save_network(self, path=None):\r\n if path==None and hasattr(self, 'paths') is False:\r\n print('No path is available to load the network.')\r\n elif path is None:\r\n path = self.paths['network_pickle']\r\n try:\r\n io.pickle_links_and_nodes(self.links, self.nodes, path)\r\n print('Links and nodes saved to pickle file: {}.'.format(self.paths['network_pickle']))\r\n except AttributeError:\r\n print('Network has not been computed yet. Use the compute_network() method first.')", "title": "" }, { "docid": "16c057a17d3f95d8eb22d6c3599bf4d2", "score": "0.5968342", "text": "def export_to_graphml(self):\n filename = AskFile(1, \"*.graphml\", \"File to export to?\")\n\n if not filename:\n # For example, file dialog was closed\n print 'Error getting filename'\n return None\n\n if self.cache.trace_d:\n # Already populated\n trace_d = self.cache.trace_d\n\n else:\n print '[!] Could not find trace dictionary on cache'\n return None\n\n # TODO: working with tid 0 for now\n # Maybe just generate several filename_N.graphml?\n t0_list = trace_d[0]\n edge_list = [map(lambda x: GetFunctionName(x), e) for e in t0_list]\n\n return graphing.write_to_graphml(edge_list, filename)", "title": "" }, { "docid": "870e92c797bfb5e77285563da7a6d89b", "score": "0.59642977", "text": "def Save(self, SOut):\n return _snap.PNGraph_Save(self, SOut)", "title": "" }, { "docid": "dfa693c144fc14033606e04a52569df9", "score": "0.5942422", "text": "def store(self) -> None:\n graph = self._generate_graph()\n self._show_graph(graph)", "title": "" }, { "docid": "c9823f78c2c27ff4947bd3e1d0688aea", "score": "0.5931906", "text": "def Save(self, SOut):\n return _snap.TNEGraph_Save(self, SOut)", "title": "" }, { "docid": "2c0bc438f60f0c18d28061577bf80d91", "score": "0.5917545", "text": "def export_network(self, path, networks=None):\n results = None\n try:\n results = self._return_networks(networks)\n if path:\n for network in results:\n name = path + '/' + network + '.graphml'\n nx.write_graphml(results[network], name)\n except Exception:\n logger.error(\"Could not write database graph to GraphML file. \\n\", exc_info=True)\n return results", "title": "" }, { "docid": "45c1c951b33768cceb1b711141808513", "score": "0.5913544", "text": "def savegraph(fname = 'graph'):\n plt.savefig(fname)", "title": "" }, { "docid": "d3d210412832f5f648db6a5b059b5674", "score": "0.59046733", "text": "def _graph_save_to_png(GEN_GRAPH, graph):\n if GEN_GRAPH:\n print(\"Printing graph...\")\n graph.layout(prog='dot')\n graph.draw('./tmp/cell_viz.png')", "title": "" }, { "docid": "11f98cdb084afab31f4027e790c2a2e9", "score": "0.5895195", "text": "def graphVizExport(self, filename, size = (8,11)):\n f = open(filename, 'w')\n f.write(\"digraph G {\\nrankdir=LR;\")\n f.write(\"size = \\\"\" + str(size[0]) + \",\" + str(size[1]) + \"\\\";\")\n\t#f.write(\"rotate = 90;\")\n\n if self.finalStates:\n f.write(\"node [shape = doublecircle];\\n\")\n for state in self.finalStates:\n f.write(\"\\\"\" + str(state) + \"\\\" \")\n f.write(\";\\n\")\n\n if self.states:\n f.write(\"node [shape = circle];\\n\")\n\n## if self.startState:\n## f.write(\" -> \" +\n## self.startState +\n## \";\")\n \n for state in list(self.states.values()):\n for epsilon in state.epsilon:\n f.write(\"\\\"\" + str(state.name) +\n \"\\\" -> \\\"\" + \n epsilon +\n \"\\\" [fontname=\\\"Symbol\\\", label=\\\"e\\\"];\\n\")\n for symbol in list(state.transitions.keys()):\n transitions = state.transitions[symbol]\n if transitions.__class__ is not [].__class__:\n transitions = [transitions]\n for transition in transitions:\n f.write(\"\\\"\" + str(state.name) +\n \"\\\" -> \\\"\" +\n str(transition) +\n \"\\\" [ label = \\\"\" +\n str(symbol) +\n \"\\\" ];\\n\")\n f.write(\"}\\n\")\n f.close()", "title": "" }, { "docid": "5056c8cbc8a5dd79dd1c3310186afdf4", "score": "0.5895085", "text": "def write_visualization(self, path):\n return tree.export_graphviz(self._learner, out_file='{}.dot'.format(path))", "title": "" }, { "docid": "67d50aca6d970394ec565cdf094bca84", "score": "0.5887464", "text": "def Save(self, SOut):\n return _snap.TUNGraph_Save(self, SOut)", "title": "" }, { "docid": "2fd4fb24f20c8e8f48d3f6abb02bb535", "score": "0.5884249", "text": "def export(self):\n self._replace_symbolic_related()\n onnx.save(self.onnx_model, self.export_path)", "title": "" }, { "docid": "d5f7178119faebed7e22df6af6ab35c5", "score": "0.58562", "text": "def freeze_graph(model_dir, output_node_names):", "title": "" }, { "docid": "4c30d15fb2be398be6bf4f4c9910f07d", "score": "0.5850406", "text": "def save(self, prefix=None):\n prefix = prefix if prefix is not None else \"inference\"\n self.params.save(prefix=prefix)\n self.save_configuration(prefix + '_configuration.json')\n graphs = [g.as_json()for g in self._graphs]\n FactorGraph.save(prefix + \"_graphs.json\", graphs)", "title": "" }, { "docid": "2d79d531410e04a81ec8d3d717ab902c", "score": "0.58463395", "text": "def Save(self, SOut):\n return _snap.PUNGraph_Save(self, SOut)", "title": "" }, { "docid": "84db506f9de381ec592d52857f710088", "score": "0.5834121", "text": "def saveGraph(gr, fout):\n f = ROOT.TFile(fout, \"update\")\n gr.Write(gr.GetName(), ROOT.TObject.kOverwrite)\n f.Close()", "title": "" }, { "docid": "3a9242cadf80c5b9364a0d1fd34bddd7", "score": "0.583177", "text": "def Save(self, SOut):\n return _snap.TNGraph_Save(self, SOut)", "title": "" }, { "docid": "6e4e2e71690ef95c8fdc19ac2b9d18bc", "score": "0.58218795", "text": "def save(self):\n self.edges.save()", "title": "" }, { "docid": "6960cdd1c52cd8ed65c1d28bea13977f", "score": "0.5818983", "text": "def make_graph(AllLinks):\n MyGraph = nx.Graph(name=\"word-graph\")\n for Link in AllLinks:\n Node1 = Link[0]\n Node2 = Link[1]\n Weight = Link[2]*100\n print(Node1, Node2, Weight)\n MyGraph.add_node(Node1, label=Node1)\n MyGraph.add_edge(Node1, Node2, weight=Weight)\n nx.write_gexf(MyGraph,\"mygraph_contemps.gexf\")\n return MyGraph", "title": "" }, { "docid": "8b1eab98e759ae34c75c830c6d182d63", "score": "0.5800677", "text": "def save_layout(self):\n return self.manager.save_layout()", "title": "" }, { "docid": "264cb4400fdc72a2132e865d55e8d1ef", "score": "0.57810074", "text": "def exportGML(self, o, nodes, conf):\n o.write(\" edge [\\n\")\n o.write(\" source %d\\n\" % nodes[self.src].id)\n o.write(\" target %d\\n\" % nodes[self.dest].id)\n o.write(\" label\\n\")\n o.write(\" \\\"%s\\\"\\n\" % self.getLabel(nodes, conf).encode(latinenc, errors=\"ignore\"))\n o.write(\" ]\\n\")", "title": "" }, { "docid": "c11041b60f0ed99973991df6f78723b4", "score": "0.5756969", "text": "def write_graph(graph, filename, append=False):\n mode = 'a' if append else 'w'\n with open(filename, mode) as OUT:\n OUT.write(\"Node1\\tx1\\ty1\\tz1\\tNode2\\tx2\\ty2\\tz2\\n\")\n for edge in graph:\n OUT.write(edge.tabrepr)", "title": "" }, { "docid": "60e5965393e306f89f4ca912dfc6bbed", "score": "0.5754509", "text": "def view(export_dir:str, output_node_names:List[str], frozen:bool):\n writer=tf.summary.FileWriter(get_log_dir(split(export_dir)[-1]+('-f' if frozen else '')))\n g=restore(export_dir, output_node_names, frozen)\n writer.add_graph(g)", "title": "" }, { "docid": "69a933f0a3d07ddcb63c32685773589c", "score": "0.5738027", "text": "def save_networks(self, epoch):\n for name in self.model_names:\n if isinstance(name, str):\n save_filename = '%s_net_%s.pth' % (epoch, name)\n save_path = os.path.join(self.config.weights_dir, save_filename)\n net = getattr(self, 'net' + name)\n torch.save(net.state_dict(), save_path)", "title": "" }, { "docid": "2b51775cffa1e55c1b83553c6a1b98ad", "score": "0.57332456", "text": "def save_flat(self):\n\n lun = open(self.file_flat_save, 'wb')\n\n pickle.dump((self.flattened, \n self.arr_flat, \n self.wcs_flat,\n self.num_planes), lun)\n lun.close()\n print(\"Wrote: \" + self.file_flat_save)", "title": "" }, { "docid": "1a0cd902a64995bd1ab2c45bec70826f", "score": "0.57279974", "text": "def dump_graph(self, external=None):\n\n # Use the parameter graph if provided\n comp_graph = external if external else self.comp_graph\n # Iterate through the edges\n for edge in comp_graph.get_edgelist():\n # we need to use the two vertice id's to get the edge id\n edge_id = comp_graph.get_eid(edge[0], edge[1])\n # log the names of the vertices with the capacity of the edge\n logging.info(\"{}->{} = {}\".format(comp_graph.vs[edge[0]]['name'],\n comp_graph.vs[edge[1]]['name'],\n comp_graph.es[edge_id]['capacity']))", "title": "" }, { "docid": "9508814f32719da02cc9400467746560", "score": "0.5721993", "text": "def save(self, savedir, timestep):\n if not self.finalized:\n raise RuntimeError()\n model_dir = self.model_dir if savedir is None else savedir\n\n # Write structure to file\n with open(os.path.join(model_dir, '{}_{}.nns'.format(self.name, timestep)), \"w+\") as f:\n for layer in self.layers[:-1]:\n f.write(\"%s\\n\" % repr(layer))\n last_layer_copy = self.layers[-1].copy()\n last_layer_copy.set_activation(self.end_act_name)\n if self.is_probabilistic:\n last_layer_copy.set_output_dim(last_layer_copy.get_output_dim() // 2)\n else:\n last_layer_copy.set_output_dim(last_layer_copy.get_output_dim())\n f.write(\"%s\\n\" % repr(last_layer_copy))\n\n # Save network parameters (including scalers) in a .mat file\n var_vals = {}\n for i, var_val in enumerate(self.sess.run(self.nonoptvars + self.optvars)):\n var_vals[str(i)] = var_val\n savemat(os.path.join(model_dir, '{}_{}.mat'.format(self.name, timestep)), var_vals)", "title": "" }, { "docid": "d743b03a007ae05cd274a0bce1a99541", "score": "0.57186383", "text": "def graph_viz(self, filename, mode='read'):\n formater = GraphVizFormater(mode)\n with open(filename, 'w') as file:\n file.write('digraph A{\\n')\n for peak in self.peaks.difference(set(self.graph.keys())): # specify label for peaks without edges from them\n formater.peak = peak\n file.write(formater.format_peak())\n file.write('\\n')\n for peak, outs in self.graph.items():\n formater.peak = peak\n file.write(formater.format_peak())\n formater.outs = outs\n for out in outs:\n formater.out = out\n file.write(formater.format_edge())\n file.write('}')\n return filename", "title": "" }, { "docid": "ec09b924d1ee2f4c63fa8896ea557984", "score": "0.5714368", "text": "def save_graph(self, h_gs, h_Ps, t_gs, t_Ps, Hs, Ps):\n with open('{}_{}.pickle'.format(self.filename, self.method), 'wb') as f:\n # pickle.dump(h_g, f)\n # pickle.dump(h_g1, f)\n # pickle.dump(h_g2, f)\n # pickle.dump(h_g3, f)\n # pickle.dump(h_g4, f)\n for h_g in h_gs:\n pickle.dump(h_g, f)\n\n # pickle.dump(h_P10, f)\n # pickle.dump(h_P21, f)\n # pickle.dump(h_P32, f)\n # pickle.dump(h_P43, f)\n for h_P in h_Ps:\n pickle.dump(h_P, f)\n\n # pickle.dump(t_g, f)\n # pickle.dump(t_g1, f)\n # pickle.dump(t_g2, f)\n # pickle.dump(t_g3, f)\n for t_g in t_gs:\n pickle.dump(t_g, f)\n\n # pickle.dump(t_P10, f)\n # pickle.dump(t_P21, f)\n # pickle.dump(t_P32, f)\n for t_P in t_Ps:\n pickle.dump(t_P, f)\n\n # pickle.dump(H_inv, f)\n # pickle.dump(P, f)\n if self.method == 'graclus_hier':\n pickle.dump(Hs, f)\n pickle.dump(Ps, f)\n elif self.method == 'embedding':\n for H in Hs:\n pickle.dump(H, f)\n \n for P in Ps:\n pickle.dump(P, f)\n else:\n raise NotImplementedError", "title": "" }, { "docid": "6b012d92e2c2c56899c2e3a022b55f03", "score": "0.5712257", "text": "def outputGraphInCliquerFormat(graph, output_fname):\n\t\tof = open(output_fname, 'w')\n\t\tof.write(\"p edge %s %s\\n\"%(graph.number_of_nodes(), graph.number_of_edges()))\n\t\tnode_name2index = {}\n\t\tfor n in graph.nodes():\n\t\t\tnode_name2index[n] = len(node_name2index)+1\n\t\tfor e in graph.edges():\n\t\t\tof.write(\"e %s %s\\n\"%(node_name2index[e[0]], node_name2index[e[1]]))\n\t\tdel of", "title": "" }, { "docid": "af31a5275bc600a424b05855df418ae7", "score": "0.57107466", "text": "def Save(self, SOut):\n return _snap.TBPGraph_Save(self, SOut)", "title": "" }, { "docid": "51869d49a08cfcbcc6245dc6707a1eb6", "score": "0.5704901", "text": "def save(self, file: str) -> None:\n path = os.path.split(file)\n self.graph.save(filename=path[-1], directory=os.path.join(*path[:-1]))", "title": "" }, { "docid": "0b9a8f9531199b55d60f8b2e7fa16703", "score": "0.57038003", "text": "def write_gexf(self,\n filepath,\n exclude_node_fields=None,\n exclude_edge_fields=None,\n layout=None,\n ):\n\n layout_key = None\n if layout is not None:\n layout_key = '_layouts/{}'.format(layout)\n if layout not in self.layouts:\n raise ValueError(\"Layout not found, use None for no layout\")\n\n ### filter the node and edge attributes\n\n # to do this we need to get rid of the assignments in the\n # nodes though since this is not really supported or good to\n # store in a gexf file which is more for visualization as an\n # XML format, so we copy and modify then write the copy\n gexf_graph = deepcopy(self._graph)\n\n ## Nodes\n\n if exclude_node_fields is None:\n exclude_node_fields = [self.ASSIGNMENTS]\n else:\n exclude_node_fields.append(self.ASSIGNMENTS)\n exclude_node_fields = list(set(exclude_node_fields))\n\n # exclude the layouts, we will set the viz manually for the layout\n exclude_node_fields.extend(['_layouts/{}'.format(layout_name)\n for layout_name in self.layouts])\n\n for node in gexf_graph:\n\n # remove requested fields\n for field in exclude_node_fields:\n del gexf_graph.nodes[node][field]\n\n # also remove the fields which are not valid gexf types\n fields = list(gexf_graph.nodes[node].keys())\n for field in fields:\n\n if (type(gexf_graph.nodes[node][field]) not in\n nx.readwrite.gexf.GEXF.xml_type):\n\n del gexf_graph.nodes[node][field]\n\n if layout_key is not None:\n\n # set the layout as viz attributes to this\n gexf_graph.nodes[node]['viz'] = self._graph.nodes[node][layout_key]\n\n ## Edges\n\n if exclude_edge_fields is None:\n exclude_edge_fields = ['all_transitions']\n else:\n exclude_edge_fields.append('all_transitions')\n exclude_edge_fields = list(set(exclude_edge_fields))\n\n # TODO: viz and layouts not supported for edges currently\n #\n # exclude the layouts, we will set the viz manually for the layout\n # exclude_edge_fields.extend(['_layouts/{}'.format(layout_name)\n # for layout_name in self.layouts])\n\n for edge in gexf_graph.edges:\n\n # remove requested fields\n for field in exclude_edge_fields:\n\n del gexf_graph.edges[edge][field]\n\n # also remove the fields which are not valid gexf types\n fields = list(gexf_graph.edges[edge].keys())\n\n for field in fields:\n\n if (type(gexf_graph.edges[edge][field]) not in\n nx.readwrite.gexf.GEXF.xml_type):\n\n del gexf_graph.edges[edge][field]\n\n # TODO,SNIPPET: we don't support layouts for the edges,\n # but maybe we could\n\n # if layout_key is not None:\n # # set the layout as viz attributes to this\n # gexf_graph.nodes[node]['viz'] = self._graph.nodes[node][layout_key]\n\n # then write this filtered gexf to file\n nx.write_gexf(gexf_graph, filepath)", "title": "" }, { "docid": "d33f9f7e248c2b1df806506fcab8d21f", "score": "0.5689645", "text": "def exportXML(self, filesave='', **kwargs):\n for key in kwargs:\n print('WARNING GraphIO.exportXML: received keyword', key, ':',\n kwargs[key], \", don't know what to do with it.\")\n try:\n from lxml import etree\n except ImportError as e:\n print('Exception', type(e), 'in GraphIO.graphToXML:')\n print(e)\n return\n root = etree.Element(\"graph\")\n # write headers\n for container in ['headers', 'sampleInfo', 'graphInfo']:\n if hasattr(self, container) and len(getattr(self, container)):\n tmp = etree.SubElement(root, container)\n for key in getattr(self, container):\n tmp.set(key, str(getattr(self, container)[key]))\n # write curves\n for c in range(self.length()):\n curve = self.curve(c)\n tmp = etree.SubElement(root, 'curve'+str(c))\n attr = etree.SubElement(tmp, 'attributes')\n for key in curve.getAttributes():\n attr.set(key, str(curve.attr(key)))\n data = etree.SubElement(tmp, 'data')\n x = etree.SubElement(data, 'x')\n y = etree.SubElement(data, 'y')\n x.text = ','.join([str(e) for e in curve.x()])\n y.text = ','.join([str(e) for e in curve.y()])\n # save\n filesave = GraphIO.filesave_default(self, filesave)\n if len(filesave) < 4 or filesave[-4:] != '.xml':\n filesave += '.xml'\n tree = etree.ElementTree(root)\n tree.write(filesave, pretty_print=True, xml_declaration=True, encoding=\"utf-8\")\n if self.attr('saveSilent') != True:\n print('Graph data saved as', filesave.replace('/', '\\\\'))\n return filesave", "title": "" }, { "docid": "47f0c5ea994fc7bf4de5210be1a4cb1f", "score": "0.5688072", "text": "def visualize(self,outfiletype='pdf'):\n import pydot\n from networkx.drawing.nx_pydot import write_dot\n from plumbum import local\n filename = os.path.join(self.location,'grid.dot')\n write_dot(self.graph, filename)\n local['neato']('-T'+outfiletype,filename,'-o{}{}'.format(filename[:-3],outfiletype))", "title": "" }, { "docid": "381d19542fdffac5914517fdf05a4995", "score": "0.567607", "text": "def save(self, dot_graph: Digraph, filename:\n str, file_format: str) -> None:\n try:\n DigraphConverter().render(dot_graph, filename)\n except Exception as error:\n raise DigraphSaveException(\"Failed to save digraph\")", "title": "" }, { "docid": "336528546922e22c11f1ad8b98b65bd4", "score": "0.56558627", "text": "def store_graph(graph):\n bounds = reduce(lambda x, y: x + y, (into_interval(node, node, 0.0)\n for node in graph.iter_nodes()))\n\n SCALE = 100\n \n with open(\"ghent.svg\", \"w+\") as f:\n f.write('<svg xmlns=\"http://www.w3.org/2000/svg\" \\\n xmlns:xlink=\"http://www.w3.org/1999/xlink\">\\n')\n for edge in graph.iter_edges():\n f.write('<line x1=\"%f\" y1=\"%f\" x2=\"%f\" y2=\"%f\" style=\"stroke:#000000;\"/>\\n' %\n ((-graph.get(edge.id).x + bounds.maxx) * SCALE,\n (-graph.get(edge.id).y + bounds.maxy) * SCALE,\n (-graph.get(edge.to).x + bounds.maxx) * SCALE,\n (-graph.get(edge.to).y + bounds.maxy) * SCALE))\n f.write(\"</svg>\")\n\n with open(\"edges.txt\", \"w+\") as f:\n for edge in graph.iter_edges():\n f.write(\"{}\".format(edge))\n f.write(\"\\n\")", "title": "" }, { "docid": "f55500dbb0dc20ca05972bd01a826b10", "score": "0.5653662", "text": "def Save(self, SOut):\n return _snap.TNGraphMP_Save(self, SOut)", "title": "" }, { "docid": "9043bf6ab201bd1e9a66084ad9909fb8", "score": "0.56521404", "text": "def render_local(self, filename):\n # Get the graph information\n node_list, edge_list = self.__generate_image()\n # Generate the graph\n from pygraphviz import AGraph\n G=AGraph(strict=True,directed=True) # Create a graph\n for node in node_list:\n G.add_node(node)\n for edge in edge_list:\n G.add_edge(edge[0], edge[1]) \n G.layout('dot') # Set hierarchical layout\n G.draw(filename) # Save the image.", "title": "" }, { "docid": "6984efa98fd278b2abd05ba9c6cceaeb", "score": "0.5646517", "text": "def Save(self, SOut):\n return _snap.PNGraphMP_Save(self, SOut)", "title": "" }, { "docid": "acee2122dc20180571c054e74384ae81", "score": "0.56434375", "text": "def save_net(net):\n\t\t\n\t\"\"\" save network to file \"\"\"\n\tn_file = open(os.path.join('output', net.name, 'Network'), 'w')\n\tpickle.dump(net, n_file)\n\tn_file.close()\n\n\tsave_file = os.path.join('output', net.name, net.name + '_params.txt')\n\tif hasattr(net, 'runtime'):\n\t\tprint_params(vars(net), save_file, runtime=net.runtime)\n\telse:\n\t\tprint_params(vars(net), save_file)", "title": "" }, { "docid": "94a846281b0d1fe008390eb3daa233d7", "score": "0.56300956", "text": "def save_model(self):\n if not os.path.exists(self.model_out_dir):\n os.makedirs(self.model_out_dir)\n out_dir = os.path.join(self.model_out_dir, self.net_type)\n if not os.path.exists(out_dir):\n os.makedirs(os.path.join(out_dir))\n if not os.path.exists(os.path.join(out_dir, \"model_number.txt\")):\n model_number = np.array([0])\n else:\n model_number = np.fromfile(os.path.join(out_dir, \"model_number.txt\"),\n dtype=int)\n model_file_name = self.net_type + \"-\" + str(model_number[0])\n with open(os.path.join(self.model_out_dir, self.net_type, model_file_name + \".json\"), \"a+\") as jfile:\n jfile.write(self.model.to_json())\n self.model.save_weights(os.path.join(out_dir, model_file_name + \".h5\"))\n model_number[0] += 1\n model_number.tofile(os.path.join(out_dir, \"model_number.txt\"))", "title": "" }, { "docid": "2bac53f7ffade3d10c58e97226cf8281", "score": "0.5628187", "text": "def draw_network(graph, users, filename):\n plt.figure(figsize=(6,6))\n nx.draw_networkx(graph,with_labels=True,labels=d,alpha=0.3,node_size=30)\n #plt.show(graph)\n plt.savefig(filename)", "title": "" }, { "docid": "f842d4798d97694401fcb4778e5c1c1c", "score": "0.56235766", "text": "def write_graph_mathematica(self, out_file = \"graph.txt\"):\n f = open(out_file,'w')\n my_string_list = []\n for node_id, node_childs in self.graph.items(): # v is a list\n for child in node_childs :\n my_string_list.append(\"%i -> %i\"%(node_id, child))\n f.write(\",\".join(my_string_list))\n f.close()", "title": "" }, { "docid": "31c321076feb3b857cbd234777e358a4", "score": "0.56125456", "text": "def save(self, save_as):\r\n \"\"\":param save_as: boolean paramater for option save as (not just save)\"\"\"\r\n if save_as or self.file_name is None:\r\n file = filedialog.asksaveasfile(\r\n mode='w', defaultextension=\".txt\", filetypes=((\"Textové súbory\", \"*.txt\"), (\"Všetky súbory\", \"*.*\")))\r\n if file is None:\r\n return\r\n self.file_name = file.name\r\n with open(self.file_name, 'w') as file:\r\n for i, graph in enumerate([self.first_graph, self.second_graph]):\r\n file.write(f'Výsledky siete {(i+1)}:\\n\\nZákladné štatistiky:\\n')\r\n for index, result in enumerate(graph.basic_results):\r\n to_write = f'{LABELS[index]} '\r\n to_write += \"-\" if result == -3 else str(result)\r\n file.write(f'{to_write}\\n')\r\n used = len(self.first_graph.basic_results)\r\n file.write('Grafletové štatistiky:\\n')\r\n for index, result in enumerate(graph.graphlets_sum_counts):\r\n to_write = f'{LABELS[index + used]} '\r\n to_write += \"-\" if result == -3 else str(result)\r\n file.write(f'{to_write}\\n')\r\n if to_write[-1] != \"-\" and index != len(graph.graphlets_sum_counts)-1:\r\n to_write = f'{LABELS[index+used][:6] + \"ne\" + LABELS[index+used][6:]} '\r\n if index == 0:\r\n to_write += str(graph.motifs_counts[0])\r\n elif index == 1:\r\n to_write += str(sum(graph.motifs_counts[4:6]))\r\n else:\r\n to_write += str(sum(graph.motifs_counts[12:]))\r\n file.write(f'{to_write}\\n')\r\n file.write(\"-------------------\\n\\n\")\r\n if graph.comparision_results[0] != -3:\r\n file.write('Porovnanie sietí:\\n\\n')\r\n file.write(f'{LABELS[13]} {graph.comparision_results[0]}\\n')\r\n file.write(f'{LABELS[14]} {graph.comparision_results[1]}\\n')", "title": "" }, { "docid": "84d9744d46cd3a9dc2229769c9d42f78", "score": "0.56121105", "text": "def save_networks(self, epoch):\n for name in self.model_names:\n if isinstance(name, str):\n save_filename = '%s_net_%s.pth' % (epoch, name)\n save_path = os.path.join(self.save_dir, save_filename)\n net = getattr(self, 'net' + name)\n\n if len(self.gpu_ids) > 0 and torch.cuda.is_available():\n torch.save(net.module.cpu().state_dict(), save_path)\n net.cuda(self.gpu_ids[0])\n else:\n torch.save(net.cpu().state_dict(), save_path)", "title": "" }, { "docid": "42bae3d7a4e947e1e70a06ed71bf5d46", "score": "0.5610976", "text": "def create_full_graph(self):\n conn = sqlite3.connect(self._database)\n c = conn.cursor()\n out = set() #Removes duplicates\n out_graph = []\n out_graph.append('digraph {ranksep=2\\n')\n for row in c.execute ('''SELECT A.SOURCE, A.TARGET FROM DATAFLOWS AS A'''):\n out.add('\"' + row[0] + '\"' + ' -> ' + '\"' + row[1] + '\"' + '\\n')\n for line in out: out_graph.append(line)\n out_graph.append('}')\n return out_graph", "title": "" }, { "docid": "97551bfcab2ae2a89b19b432dce74b11", "score": "0.5605996", "text": "def download(self, graph, outputfile, format_options):", "title": "" }, { "docid": "16bfa8c3312324f145f082000867a57b", "score": "0.5600805", "text": "def write(self, outedgefile):\n\t\tpath.makedirs_file(outedgefile)\n\t\tnp.savetxt(outedgefile, self.net.data, delimiter='\\t')", "title": "" }, { "docid": "7223f5f2410508c6f47b38e419142670", "score": "0.5597986", "text": "def _write_network(self, use_cse=False):\n self.use_cse = use_cse\n\n # Prepare RHS terms\n self.compose_ydot()\n self.compose_jacobian()\n\n # Process template files\n for tfile in self.template_files:\n tfile_basename = os.path.basename(tfile)\n outfile = tfile_basename.replace('.template', '')\n ifile, of = self.io_open(tfile, outfile)\n for l in ifile:\n ls = l.strip()\n foundkey = False\n for k in self.ftags:\n if k in ls:\n foundkey = True\n n_indent = self.get_indent_amt(ls, k)\n self.ftags[k](n_indent, of)\n if not foundkey:\n of.write(l)\n self.io_close(ifile, of)\n\n # Copy any tables in the network to the current directory\n # if the table file cannot be found, print a warning and continue.\n for i_tab in self.tabular_rates:\n tr = self.rates[i_tab]\n tdir = os.path.dirname(tr.rfile_path)\n if tdir != os.getcwd():\n tdat_file = os.path.join(tdir, tr.table_file)\n if os.path.isfile(tdat_file):\n shutil.copy(tdat_file, os.getcwd())\n else:\n print('WARNING: Table data file {} not found.'.format(tr.table_file))", "title": "" }, { "docid": "cbed3e27a11ead653fd94625a9633077", "score": "0.55969536", "text": "def save_weights(self, dir, epoch):\n torch.save(self.netG_A.state_dict(), dir + 'generatorA-{}.pkl'.format(epoch))\n torch.save(self.netG_B.state_dict(), dir + 'generatorB-{}.pkl'.format(epoch))\n torch.save(self.netD_A.state_dict(), dir + 'discriminatorA-{}.pkl'.format(epoch))\n torch.save(self.netD_B.state_dict(), dir + 'discriminatorB-{}.pkl'.format(epoch))", "title": "" }, { "docid": "3a8002771a704ed1052ff4811c370e61", "score": "0.5589631", "text": "def plot_network(self, file_path, **kwargs):\n if self.verbose:\n print(\n f\"Saving Bayesian Network plot to the following PNG file: {file_path}\"\n )\n\n # Identify target variable so we can highlight it in the plot\n target_index = list(self.structure_model).index(self.target_variable)\n node_size_list = [300] * len(list(self.structure_model.nodes))\n node_color_list = [\"#95ABDF\"] * len(list(self.structure_model.nodes))\n node_size_list[target_index] = 1500\n node_color_list[target_index] = \"#F09A9A\"\n\n # Clear any existing pyplot fig, create plot, and save to disk\n plt.clf()\n nx.draw(\n self.structure_model,\n node_size=node_size_list,\n node_color=node_color_list,\n with_labels=True,\n **kwargs,\n )\n plt.savefig(expanduser(file_path), format=\"PNG\", dpi=300)", "title": "" }, { "docid": "35d6ad89434f573ad411dec0fe8bc563", "score": "0.5589559", "text": "def renderGraph(self, graph):\n\n self.assertLayouterPresent()\n\n tool = os.path.join(config['graphviz_path'], self._layouter)\n\n # 2006-08-03 Seperate streams for output and error.\n # Avoids problems with fonts not found.\n cmd = \"%s -Tpng\" %(tool)\n (pout, pin, perr) = popen2.popen3(cmd=cmd, mode=\"b\")\n pin.write(graph)\n pin.close()\n\n data = pout.read()\n pout.close()\n error = perr.read()\n perr.close()\n\n if error:\n raise PyolsEnvironmentError(\"The command %r produced text on \"\n \"stderr: %s\" %(cmd, error))\n\n return data", "title": "" }, { "docid": "1b983847f534664fb7dc7f312491ae9e", "score": "0.5587143", "text": "def draw_network(self, layout=None):\n from matador.crystal.network import draw_network\n\n draw_network(self, layout=layout)", "title": "" }, { "docid": "39e1170f7ab05200022a7277a9b2751e", "score": "0.5580948", "text": "def save_net(self, model_file_name):\n pass", "title": "" }, { "docid": "c6099dbf5992cbb0e00a99c2a6a36c3d", "score": "0.55771786", "text": "def save_networks(self, epoch):\n for name in self.net_names:\n if isinstance(name, str):\n save_filename = '{}_net_{}.pth'.format(epoch, name)\n self.save_path = os.path.join(self.save_dir, save_filename)\n net = getattr(self, 'net_' + name)\n\n if len(self.gpu_ids) > 0 and torch.cuda.is_available():\n torch.save(net.module.state_dict(), self.save_path)\n else:\n torch.save(net.state_dict(), self.save_path)", "title": "" }, { "docid": "3f59bfab8463615feef907a289dc5b5a", "score": "0.5567558", "text": "def print_network(self):\n s, n = self.get_network_description(self.model)\n if isinstance(self.model, nn.DataParallel):\n net_struc_str = '{} - {}'.format(self.model.__class__.__name__,\n self.model.module.__class__.__name__)\n else:\n net_struc_str = '{}'.format(self.model.__class__.__name__)\n\n print(\"==================================================\")\n print(\"===> Network Summary\\n\")\n net_lines = []\n line = s + '\\n'\n print(line)\n net_lines.append(line)\n line = 'Network structure: [{}], with parameters: [{:,d}]'.format(net_struc_str, n)\n print(line)\n net_lines.append(line)\n\n if self.is_train:\n with open(os.path.join(self.ckpt_root, 'network_summary.txt'), 'w') as f:\n f.writelines(net_lines)\n\n print(\"==================================================\")", "title": "" }, { "docid": "6e6151191d1aebd8ab1cf448c1fe96cd", "score": "0.5566078", "text": "def create_file(self, filename='out.dot', draw=False, prog='dot',\n include_orphans=False, **kwargs):\n nxgraph = self.get_networkx_graph(include_orphans)\n agraph = nx.nx_agraph.to_agraph(nxgraph)\n agraph.node_attr['style'] = 'filled'\n if draw:\n agraph.draw(filename, prog=prog, **kwargs)\n else:\n agraph.write(filename, **kwargs)", "title": "" }, { "docid": "e72b36e92d64e682da995c1bdab747ca", "score": "0.5562362", "text": "def save_model(self) -> None:\n time_str = str(time.time())\n if self.config.save_dir is not None:\n if self.config.attacker:\n path = self.config.save_dir + \"/\" + time_str + \"_attacker_policy_network.pt\"\n self.config.logger.info(\"Saving policy-network to: {}\".format(path))\n torch.save(self.attacker_policy_network.state_dict(), path)\n if self.config.defender:\n path = self.config.save_dir + \"/\" + time_str + \"_defender_policy_network.pt\"\n self.config.logger.info(\"Saving policy-network to: {}\".format(path))\n torch.save(self.defender_policy_network.state_dict(), path)\n else:\n self.config.logger.warning(\"Save path not defined, not saving policy-networks to disk\")", "title": "" }, { "docid": "90a672bcf004536ee1cbaeaaeab04b93", "score": "0.55504537", "text": "def write(self):\n self.statistical_model.write(self.dataset, self.output_dir)\n self._dump_state_file()", "title": "" }, { "docid": "2f35e85008c11f7a1de52fddf3e10742", "score": "0.5548303", "text": "def save_visualization(self, path_to_file):\n graphviz_dot = self._hmm.generate_graphviz_dot_ext_lbl(\n self.save_visualization_helper_decode_labels\n )\n # strip png\n if path_to_file[-4:] == '.png':\n path_to_file = path_to_file[:-4]\n graphviz_dot.format = 'png'\n\n elif path_to_file[-4:] == '.jpg':\n path_to_file = path_to_file[:-4]\n graphviz_dot.format = 'jpg'\n else:\n graphviz_dot.format = 'png'\n\n graphviz_dot.render(filename=path_to_file)", "title": "" }, { "docid": "688e2d6d256261a12080c9b9007d4c5a", "score": "0.5548146", "text": "def print_network(self):\n s, n = self.get_network_description(self.model)\n if isinstance(self.model, nn.DataParallel):\n net_struc_str = '{} - {}'.format(\n self.model.__class__.__name__,\n self.model.module.__class__.__name__)\n else:\n net_struc_str = '{}'.format(self.model.__class__.__name__)\n\n print(\"==================================================\")\n print(\"===> Network Summary\\n\")\n net_lines = []\n line = s + '\\n'\n # print(line)\n net_lines.append(line)\n line = 'Network structure: [{}], with parameters: [{:,d}]'.format(\n net_struc_str, n)\n print(line)\n net_lines.append(line)\n\n if self.is_train:\n with open(os.path.join(self.exp_root, 'network_summary.txt'),\n 'w') as f:\n f.writelines(net_lines)\n\n print(\"==================================================\")", "title": "" }, { "docid": "c27e2b118a6f0d84cf290422868d76cd", "score": "0.5546407", "text": "def build_graph(self):\n self._import_data()\n self._create_embedding()\n self._create_loss()\n self._create_optimizer()\n self._create_summaries()", "title": "" }, { "docid": "83fd86e59546d0028d6d5d8c157e2714", "score": "0.55398065", "text": "def print_network(self):\r\n s, n = self.get_network_description(self.model)\r\n if isinstance(self.model, nn.DataParallel):\r\n net_struc_str = '{} - {}'.format(self.model.__class__.__name__,\r\n self.model.module.__class__.__name__)\r\n else:\r\n net_struc_str = '{}'.format(self.model.__class__.__name__)\r\n\r\n print(\"==================================================\")\r\n print(\"===> Network Summary\\n\")\r\n net_lines = []\r\n line = s + '\\n'\r\n print(line)\r\n net_lines.append(line)\r\n line = 'Network structure: [{}], with parameters: [{:,d}]'.format(net_struc_str, n)\r\n print(line)\r\n net_lines.append(line)\r\n\r\n if self.is_train:\r\n with open(os.path.join(self.exp_root, 'network_summary.txt'), 'w') as f:\r\n f.writelines(net_lines)\r\n\r\n print(\"==================================================\")", "title": "" } ]
be6b3b9b4402fa1846ce5a964f80745a
Returns comment for the event.
[ { "docid": "d439f706d1e59a83da3ac0e6bc2a6275", "score": "0.73312235", "text": "def get_comment(self):\n return self.__comment", "title": "" } ]
[ { "docid": "b7554a7f4e179014f7b73ab8b7528e3b", "score": "0.75873995", "text": "def comment(self) -> str:\n return pulumi.get(self, \"comment\")", "title": "" }, { "docid": "b7554a7f4e179014f7b73ab8b7528e3b", "score": "0.75873995", "text": "def comment(self) -> str:\n return pulumi.get(self, \"comment\")", "title": "" }, { "docid": "b7554a7f4e179014f7b73ab8b7528e3b", "score": "0.75873995", "text": "def comment(self) -> str:\n return pulumi.get(self, \"comment\")", "title": "" }, { "docid": "b7554a7f4e179014f7b73ab8b7528e3b", "score": "0.75873995", "text": "def comment(self) -> str:\n return pulumi.get(self, \"comment\")", "title": "" }, { "docid": "31b4084429e19585a276c322bfc74669", "score": "0.74238575", "text": "def comment(self):\n return idc.get_func_cmt(self.ea, False)", "title": "" }, { "docid": "7aec1821cbf9f639d97907f318a754bf", "score": "0.7325617", "text": "def get_comment(self):\n return self.comment", "title": "" }, { "docid": "52cef30b1026c122569264c83e993623", "score": "0.7311529", "text": "def comment(self) :\n\t\ttry :\n\t\t\treturn self._comment\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "1ae47071f342a6ac215cd3b925310871", "score": "0.72743875", "text": "def comment(self):\n return self._comment", "title": "" }, { "docid": "48ce247b74c9a6b9f02700b240804162", "score": "0.72671384", "text": "def comment(self) -> str:\n return get_comment(\n proto_file=self.source_file, path=self.path, indent=self.comment_indent\n )", "title": "" }, { "docid": "581f55e60dcffbc4c300164511a24a59", "score": "0.72413594", "text": "def format_event_commit_comment(data: typing.Dict[str, typing.Any]) -> str:\n if data['action'] != \"created\":\n return None\n resp = f\"{format_author(data['sender'])} [commented]({data['comment']['html_url']}) on commit \"\n resp += f\"{format_commit_sha(data['comment']['commit_id'], data['repository'])} in \"\n resp += f\"{format_repo(data['repository'])}:\\n\\n{markdownify(data['comment']['body'])}.\"\n return resp", "title": "" }, { "docid": "b4f0bfeec28279212ddeb5dd12f0537f", "score": "0.7156651", "text": "def comment(self) -> Optional[str]:\n return pulumi.get(self, \"comment\")", "title": "" }, { "docid": "b4f0bfeec28279212ddeb5dd12f0537f", "score": "0.7156651", "text": "def comment(self) -> Optional[str]:\n return pulumi.get(self, \"comment\")", "title": "" }, { "docid": "c3e3486ba7f9b9d0a687c9dd653dbe7e", "score": "0.7080039", "text": "def comment(self):\n return str()", "title": "" }, { "docid": "084cf65db0ce9062fc0997da759d8142", "score": "0.70591515", "text": "def audit_comment(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"audit_comment\")", "title": "" }, { "docid": "8a833239d954a8c54f93735ecb9ba0c6", "score": "0.7037788", "text": "def document_comment(self) -> str:\n return pulumi.get(self, \"document_comment\")", "title": "" }, { "docid": "f4120c34f98437add4fe50c056886658", "score": "0.69872046", "text": "def comment(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"comment\")", "title": "" }, { "docid": "f4120c34f98437add4fe50c056886658", "score": "0.69872046", "text": "def comment(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"comment\")", "title": "" }, { "docid": "a6c2dec684f207b06222c756e4163e12", "score": "0.6956287", "text": "def format_event_issue_comment(data: typing.Dict[str, typing.Any]) -> str:\n if data['action'] == \"deleted\":\n return None\n resp = f\"{format_author(data['sender'])} \"\n if data['action'] == \"created\":\n resp += f\"[commented]({data['comment']['html_url']}) on issue \"\n else:\n resp += f\"edited [their comment]({data['comment']['html_url']}) on issue \"\n resp += f\"{format_issue_or_pr(data['issue'])} in {format_repo(data['repository'])}:\\n\\n{markdownify(data['comment']['body'])}\"\n return resp", "title": "" }, { "docid": "d05176f5594687fd4ed74d1cdc1666ae", "score": "0.6945515", "text": "def v_comment(self):\n return self._comment", "title": "" }, { "docid": "a81ad46c4a4f89512bbd6ba42efb4ba2", "score": "0.68713975", "text": "def rcomment(self):\n return idc.get_func_cmt(self.ea, True)", "title": "" }, { "docid": "0d41c76108129ab5ea6899d8028f199d", "score": "0.6756056", "text": "def audit_comment(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"audit_comment\")", "title": "" }, { "docid": "0d41c76108129ab5ea6899d8028f199d", "score": "0.6756056", "text": "def audit_comment(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"audit_comment\")", "title": "" }, { "docid": "bb5ddf9ec49e1a4a57806f96d1f22b0f", "score": "0.673737", "text": "def __str__(self):\n return 'Comment {} at {}'.format(self.text, self.image.name)", "title": "" }, { "docid": "39229a32ef9fc24ca67a8699b1464842", "score": "0.67348516", "text": "def comment(self):\n return ida_enum.get_enum_cmt(self._eid, False)", "title": "" }, { "docid": "782b9f591b1c46af4727202d2f1f13c3", "score": "0.6727842", "text": "def comment(self) -> typing.Optional[str]:\n return self._values.get('comment')", "title": "" }, { "docid": "e3ad7157a17dd22d9cbd8e30c279afa8", "score": "0.6692577", "text": "def comment(self):\n output = ''\n output += (constants.SONG_COMMENT.HEADER() + '\\n')\n output += (constants.SONG_COMMENT.COMMENT() + '\\n\\n')\n\n return output", "title": "" }, { "docid": "716162cc1239594bef9fd61ea2f4b15d", "score": "0.66683143", "text": "def comment(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"comment\")", "title": "" }, { "docid": "716162cc1239594bef9fd61ea2f4b15d", "score": "0.66683143", "text": "def comment(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"comment\")", "title": "" }, { "docid": "716162cc1239594bef9fd61ea2f4b15d", "score": "0.66683143", "text": "def comment(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"comment\")", "title": "" }, { "docid": "716162cc1239594bef9fd61ea2f4b15d", "score": "0.66683143", "text": "def comment(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"comment\")", "title": "" }, { "docid": "99d4afedc3fee938ddfd93bf5f833e0a", "score": "0.66484815", "text": "def comment_text(self):\n if self._comment_text_present:\n return self._comment_text_value\n else:\n return None", "title": "" }, { "docid": "99d4afedc3fee938ddfd93bf5f833e0a", "score": "0.66484815", "text": "def comment_text(self):\n if self._comment_text_present:\n return self._comment_text_value\n else:\n return None", "title": "" }, { "docid": "99d4afedc3fee938ddfd93bf5f833e0a", "score": "0.66484815", "text": "def comment_text(self):\n if self._comment_text_present:\n return self._comment_text_value\n else:\n return None", "title": "" }, { "docid": "99d4afedc3fee938ddfd93bf5f833e0a", "score": "0.66484815", "text": "def comment_text(self):\n if self._comment_text_present:\n return self._comment_text_value\n else:\n return None", "title": "" }, { "docid": "99d4afedc3fee938ddfd93bf5f833e0a", "score": "0.66484815", "text": "def comment_text(self):\n if self._comment_text_present:\n return self._comment_text_value\n else:\n return None", "title": "" }, { "docid": "99d4afedc3fee938ddfd93bf5f833e0a", "score": "0.66484815", "text": "def comment_text(self):\n if self._comment_text_present:\n return self._comment_text_value\n else:\n return None", "title": "" }, { "docid": "99d4afedc3fee938ddfd93bf5f833e0a", "score": "0.66484815", "text": "def comment_text(self):\n if self._comment_text_present:\n return self._comment_text_value\n else:\n return None", "title": "" }, { "docid": "99d4afedc3fee938ddfd93bf5f833e0a", "score": "0.66484815", "text": "def comment_text(self):\n if self._comment_text_present:\n return self._comment_text_value\n else:\n return None", "title": "" }, { "docid": "78876d633db59a87894e13607d179086", "score": "0.66466", "text": "def getcomment(self, key):\n ckey = \"comment-%s\" % key\n commentstr = self.get(ckey)\n return commentstr", "title": "" }, { "docid": "f6090f4ce1cf5824c26b5665de2ac5eb", "score": "0.66038364", "text": "def render_comment(self, model):\n pgid = self.get_pgid(model)\n hash = self.get_hash(model)\n table = model._meta.db_table\n return f'COMMENT ON TRIGGER {pgid} ON {table} IS \\'{hash}\\''", "title": "" }, { "docid": "59fd78d36faeb6b28f023fd2eff48a83", "score": "0.65305734", "text": "def comment(self, comment):\n ...", "title": "" }, { "docid": "2d071630c3b1e2d4b8e45c3c6bce4690", "score": "0.6528741", "text": "def comment(self, comment):\n pass", "title": "" }, { "docid": "a3c77e81d50dcb906e85604448447388", "score": "0.6526225", "text": "def name(self) -> str:\n return self.comment", "title": "" }, { "docid": "81840490a518a55e73675ff89b93c45e", "score": "0.65106237", "text": "def render_comment(self, tag_name=None, context=None):\n return ''", "title": "" }, { "docid": "9ecfbbc72e05ff62496fb2067b3f7324", "score": "0.65103936", "text": "def __str__(self):\n return \"{}\".format(self.comment)", "title": "" }, { "docid": "0a2d1ebf5e2a4b6738f752ac128a9da6", "score": "0.6474552", "text": "def on_comment(self, comment):\n logger.info(\"{}\".format(comment))", "title": "" }, { "docid": "7c63710c860c4d7f5942a87beaa589a1", "score": "0.64632094", "text": "def format_event_pull_request_review_comment(data: typing.Dict[str, typing.Any]) -> str:\n if data['action'] == \"deleted\":\n return None\n resp = f\"{format_author(data['sender'])} \"\n if data['action'] == \"created\":\n resp += f\"[commented]({data['comment']['html_url']}) on pull request \"\n else:\n resp += f\"edited [their comment]({data['comment']['html_url']}) on pull request \"\n resp += f\"{format_issue_or_pr(data['pull_request'])} in {format_repo(data['repository'])}:\\n\\n{markdownify(data['comment']['body'])}\"\n return resp", "title": "" }, { "docid": "80dafb4459d7c9df9fa7173d40c6e14a", "score": "0.63892514", "text": "def __str__(self):\n return self.comment", "title": "" }, { "docid": "80dafb4459d7c9df9fa7173d40c6e14a", "score": "0.63892514", "text": "def __str__(self):\n return self.comment", "title": "" }, { "docid": "f58638efca45d4a7899a4be310e46a4a", "score": "0.6384488", "text": "def comment(self):\n return ida_enum.get_enum_member_cmt(self._mid, False)", "title": "" }, { "docid": "7610c3b7c3651ebc2b6d203e0380ebd2", "score": "0.63652825", "text": "def comment(self):\n tags = dict()\n if self.hashtag:\n tags['hashtag'] = '#'+self.hashtag\n\n for k, v in self.bangtags.items():\n fields = v.copy()\n fields.insert(0, k)\n tags['bangtag,'+k] = '!' + ':'.join(fields)\n\n return self._comment.format(**tags)", "title": "" }, { "docid": "260f1c8b1913fc4d78a61df3aa6c67fb", "score": "0.63258696", "text": "def metricCommentText(self) -> CommentText:\n return self.__metricCommentText", "title": "" }, { "docid": "f61a33145ea35fdf30a7ea809783bce0", "score": "0.63044447", "text": "def __str__(self):\n return f'{self.comment}'", "title": "" }, { "docid": "f61a33145ea35fdf30a7ea809783bce0", "score": "0.63044447", "text": "def __str__(self):\n return f'{self.comment}'", "title": "" }, { "docid": "c2d745b09e2623564130e9ce18a03ea7", "score": "0.6213649", "text": "def getCommentCharacter(self) -> unicode:\n ...", "title": "" }, { "docid": "8423bbb87a2417bed9da22bf23d7de5d", "score": "0.6212207", "text": "def description(self, obj):\r\n return _('The latest comments for the entry %s') % obj.title", "title": "" }, { "docid": "12fd5e5e4173e75dec6e4cabd5b468f1", "score": "0.61747515", "text": "def rcomment(self):\n return ida_enum.get_enum_cmt(self._eid, True)", "title": "" }, { "docid": "9755d873bb6d0d628b92ef326b1a542c", "score": "0.61696637", "text": "def get_harvest_comment(task):\n if (task['harvestcomment'] is not None):\n comment = task['harvestcomment']\n else:\n comment = raw_input('Enter a log message: ')\n return comment", "title": "" }, { "docid": "688ebc1f0431f43259a187855dd7c08a", "score": "0.6135696", "text": "def get_issue_close_comment(testcase):\n return ISSUE_ClOSE_COMMENT_TEXT.format(\n bug_information=testcase.bug_information)", "title": "" }, { "docid": "4fbe6a72bd42f21d331ca7623896e30f", "score": "0.607641", "text": "def get_note(self):\n return f'{self.message}'", "title": "" }, { "docid": "00ddb33ccf3e7a5cc1ec643ade07e067", "score": "0.60522956", "text": "def format_comment(self, comment):\n comm_id = str(comment[\"id\"])\n comm_infos = expand(self.config.get(\"comment\", \"infos_format\"),\n ((r\"\\[id\\]\", t.A(self.rewrite(post=str(comment[\"post\"]),\n comment=comm_id), comm_id, name=\"comment_%s\" %\n comm_id)),\n (r\"\\[title\\]\", comment[\"title\"]),\n (r\"\\[author\\]\", comment[\"author\"]),\n (r\"\\[date\\]\", frogify(float(comment[\"date\"]), True))\n ))\n infos = t.Div(**{\"id\":\"\", \"class\": \"comment_infos\",\n \"content\": comm_infos})\n content = t.Div(**{\"id\":\"\", \"class\": \"comment_content\",\n \"content\": comment[\"content\"]})\n return unicode(t.Div(**{\"id\":\"\", \"class\": \"comment\", \"content\": infos +\n content}))", "title": "" }, { "docid": "c03c0f8cabc93349dbc3e0b72760b034", "score": "0.59925556", "text": "def rcomment(self):\n return ida_enum.get_enum_member_cmt(self._mid, True)", "title": "" }, { "docid": "11e25a1776484cf7558afb6d7b78b697", "score": "0.5974213", "text": "def event(self) -> str:\n return self.tags['event']", "title": "" }, { "docid": "fa88dff034a7c7360d15cb0d2bf7c373", "score": "0.597353", "text": "def get_description(self):\n description = u\"{}\".format(self.event_type.description)\n if self.event_type.show_partner_info and self.tracking.container:\n partner = self.tracking.container.partner_to\n if partner.get_description():\n description += u\"\\n<h3>{}</h3>{}\\n<a href='{}' target='blank'>{}</a>\".format(partner.get_name(), partner.get_description(), partner.get_website(), partner.get_website())\n return description", "title": "" }, { "docid": "eb5f9bbc4f51b8bc5f7a3d55a1e125c8", "score": "0.5962355", "text": "def comments():\n return \"create comment!\"", "title": "" }, { "docid": "4d73803696eeb4f6968f710d0722b607", "score": "0.5942258", "text": "def comment(self, *labels):\n char = _get_char(labels)\n if not char:\n return ''\n char = char.value\n name = unicode_name(char)\n if self.include_char and is_printable(char):\n return '[{}] {}'.format(char, name)\n return '{}'.format(name)", "title": "" }, { "docid": "fa5262494230550d125d7ef8693e0b46", "score": "0.5934873", "text": "def comment(self, value):\n if value is None:\n value = \"\"\n return idc.set_func_cmt(self.ea, value, False)", "title": "" }, { "docid": "576b1cbd94dd8142d5489f2700e31898", "score": "0.59305507", "text": "def get_comment(self):\n\n if self.comment_id == 0:\n return ''\n for row in self.repos.sql(\"SELECT blobdata FROM tw_blob WHERE primarykey = %i\" % self.comment_id):\n return _strac_decode(row[0])", "title": "" }, { "docid": "a37673429b6d124acd634d5d3a0b527f", "score": "0.59272814", "text": "def sample_event_comment(event, user, comment='test comment', **params):\n default = {\n 'event': event,\n 'user': user,\n 'comment': comment,\n }\n default.update(params)\n\n return EventComment.objects.create(**default)", "title": "" }, { "docid": "56de81e47e635b4e5ade0ae53818b7b0", "score": "0.58720976", "text": "def get_comment(self, token):\n return self.comment.get(token, None)", "title": "" }, { "docid": "09f832c389bf61b4ac4a8a9cfae220a3", "score": "0.5866739", "text": "def format_comment(comment):\n\n author = comment.author[0].name.text\n date = parse_gcode_date(comment.published.text)\n content = prepare_content(comment.content.text)\n\n if comment.updates.mergedIntoUpdate:\n return \"_This issue is a duplicate of #%d_\" % (options.base_id + int(comment.updates.mergedIntoUpdate.text))\n else: return \"_From %s on %s_\\n%s\" % (author, date, content)", "title": "" }, { "docid": "2e62b1c8520bf5c8f398037a1623b2dd", "score": "0.58630925", "text": "def get_comment(self) -> dict:\n all_comment_components = {\"data\": self.__data, \"udfs\": self.__udfs}\n return all_comment_components", "title": "" }, { "docid": "393e315276cc72a523a15f1127798c06", "score": "0.58505166", "text": "def get_comment_msg(comment):\n context = webserver_context.get_context()\n message = comment.message.decode('utf-8')\n sys_comment = comment_kind_from_thrift_type(ttypes.CommentKind.SYSTEM)\n\n if comment.kind == sys_comment:\n try:\n elements = shlex.split(message)\n except ValueError:\n # In earlier CodeChecker we saved system comments\n # without escaping special characters such as\n # quotes. This is kept only for backward\n # compatibility reason.\n message = message \\\n .replace(\"'\", \"\\\\'\") \\\n .replace('\"', '\\\\\"')\n\n elements = shlex.split(message)\n\n system_comment = context.system_comment_map.get(elements[0])\n if system_comment:\n for idx, value in enumerate(elements[1:]):\n system_comment = system_comment.replace(\n '{' + str(idx) + '}', html.escape(value))\n return system_comment\n\n return html.escape(message)", "title": "" }, { "docid": "cb5b6806377c996edbd237bb9944d362", "score": "0.5835535", "text": "def get_paper_doc_edit_comment_details(self):\n if not self.is_paper_doc_edit_comment_details():\n raise AttributeError(\"tag 'paper_doc_edit_comment_details' not set\")\n return self._value", "title": "" }, { "docid": "ee2d76006105a8b8847058ed0bbbd7fd", "score": "0.5821992", "text": "def comment(self):\n if self.next_token.symbol != '(:':\n return\n\n comment_level = 1\n comment = []\n while comment_level:\n comment.append(self.raw_advance('(:', ':)'))\n next_token = self.next_token\n if next_token.symbol == ':)':\n comment_level -= 1\n if comment_level:\n comment.append(str(next_token.value))\n elif next_token.symbol == '(:':\n comment_level += 1\n comment.append(str(next_token.value))\n return ''.join(comment)", "title": "" }, { "docid": "ca48d686d4913c8f95f079471aebb335", "score": "0.58180326", "text": "def test_add_event_comment_vs_comments(self):\n runner = CliRunner()\n result = runner.invoke(\n events_group,\n [\n \"annotate\",\n \"--event-id\",\n \"1\",\n \"--comments\",\n \"test foobar\",\n \"--timeline-id\",\n \"1\",\n ],\n obj=self.ctx,\n )\n\n expected_output = \"No such option: --comments Did you mean --comment?\"\n assert expected_output in result.output", "title": "" }, { "docid": "eea2c3967594492fa5c09b50fbff840f", "score": "0.58141506", "text": "def comment(id, **repeatable):\n res = idaapi.get_struc_cmt(id, repeatable.get('repeatable', True))\n return utils.string.of(res)", "title": "" }, { "docid": "9388662e76fc6a772427af415c5a4b5b", "score": "0.5807335", "text": "def get_file_add_comment_details(self):\n if not self.is_file_add_comment_details():\n raise AttributeError(\"tag 'file_add_comment_details' not set\")\n return self._value", "title": "" }, { "docid": "00f4d6618496950d31f53b6b85aaa95e", "score": "0.57967126", "text": "def test_comment_annotation(self):\n self._test_annotation(self.event.comments)\n # pylint: disable=unsubscriptable-object\n self.assertEqual(self.event.comments[0].comment, \"test\")", "title": "" }, { "docid": "abe1a99cc2d6f1ca8e6f095bfef37af9", "score": "0.576216", "text": "def get_paper_doc_add_comment_details(self):\n if not self.is_paper_doc_add_comment_details():\n raise AttributeError(\"tag 'paper_doc_add_comment_details' not set\")\n return self._value", "title": "" }, { "docid": "f1647d4dff23c5d29be26fcacc5f5e0c", "score": "0.5757711", "text": "def get_comments(self):\n raise NotImplementedError", "title": "" }, { "docid": "f1647d4dff23c5d29be26fcacc5f5e0c", "score": "0.5757711", "text": "def get_comments(self):\n raise NotImplementedError", "title": "" }, { "docid": "858bbc56c30abe61a69cb7e09621b73e", "score": "0.5755826", "text": "def test_email_review_comment_details(self):\n SandboxCommentDetailDisplayTestHook(extension=self.extension)\n\n context = Context({'comment': 'this is a comment'})\n\n t = Template(\n \"{% load rb_extensions %}\"\n \"{% comment_detail_display_hook comment 'html-email'%}\")\n\n t.render(context).strip()", "title": "" }, { "docid": "4b2da640aad3cd8f588e8c34f2f7c373", "score": "0.5748827", "text": "def comment(self, text):\n return \"%s\\n\" % \"\\n\".join(\"# %s\" % s for s in text.split(\"\\n\"))", "title": "" }, { "docid": "2f218fcdc01a5e53bead94684753b910", "score": "0.57363737", "text": "def creator_comment(self) -> str:\n return pulumi.get(self, \"creator_comment\")", "title": "" }, { "docid": "b1dffe7416a8de2485b4804d29bfd1d8", "score": "0.572982", "text": "def get_title(self, obj):\r\n return _('Comments on %s') % obj.title", "title": "" }, { "docid": "952b389f677996c45bb8df92b308b67d", "score": "0.5724623", "text": "def get_content_comment(self):\n #\n atf_line = self.cAtf_line\n #\n if self.test_line_content() == True:\n content_comment_search = re.search(\"^#.*\", atf_line)\n content_comment = content_comment_search.group(0)\n self.content_comment_line = content_comment\n else:\n pass\n #\n return self.content_comment_line", "title": "" }, { "docid": "5b8dd1ae79b8fa47dc60ab0edee2d048", "score": "0.5703796", "text": "def comment(enum, **repeatable):\n eid = by(enum)\n return idaapi.get_enum_cmt(eid, repeatable.get('repeatable', True))", "title": "" }, { "docid": "58904cff7f65d6039384901eb1c17149", "score": "0.56866884", "text": "def _handle_comment(self, comment):\r\n if not comment:\r\n return ''\r\n start = self.indent_type\r\n if not comment.startswith('#'):\r\n start += self._a_to_u(' # ')\r\n return (start + comment)", "title": "" }, { "docid": "565af145b24f17423071df236787f8d0", "score": "0.5684376", "text": "def note(self) -> str:\n return self.__note", "title": "" }, { "docid": "565af145b24f17423071df236787f8d0", "score": "0.5684376", "text": "def note(self) -> str:\n return self.__note", "title": "" }, { "docid": "565af145b24f17423071df236787f8d0", "score": "0.5684376", "text": "def note(self) -> str:\n return self.__note", "title": "" }, { "docid": "b3c26d2c3c685a4b97bb53f389dea051", "score": "0.567883", "text": "def get_comment(self, topic):\n return self.comments.get(topic, None)", "title": "" }, { "docid": "1c47e2fb65201bc2dd81b200784d7c4b", "score": "0.5674801", "text": "def notify_admin_new_comment(context, event):\n url_aprovacao = '{0}/@@manage-comments-view'.format(get_pratica_url(context))\n\n registry = getUtility(IRegistry)\n text=get_observatorio_config('comentario_info_admin')\n\n message=transform_message(text=text, url_aprovacao=url_aprovacao)\n\n address = get_observatorio_config('adm_observatorio')\n subject = 'Novo comentário pendente na prática %s' %(context.Title())\n\n return simple_send_mail(message,address,subject)", "title": "" }, { "docid": "0b5f0e0431b846ab64b542bc857300b9", "score": "0.5663553", "text": "def comment(\n self, issue: int | str, comment: str, expand: str | None = None\n ) -> Comment:\n return self._find_for_resource(Comment, (issue, comment), expand=expand)", "title": "" }, { "docid": "2e9ae906f207351d0fc51d73291fb7e4", "score": "0.564644", "text": "def get_file_like_comment_details(self):\n if not self.is_file_like_comment_details():\n raise AttributeError(\"tag 'file_like_comment_details' not set\")\n return self._value", "title": "" }, { "docid": "579e054f0d0a380f12fd22e2a97de02f", "score": "0.5645134", "text": "def render_timeline_event(self, context, field, event):\n if field == 'url':\n return event[3]['url']\n elif field == 'title':\n return \"Build %s #%s was %s\" % (event[3]['builder'], event[3]['num'], event[0])\n elif field == 'description':\n data = event[3]\n msg = tag.span()\n if data['source'] and data[\"rev\"]:\n rev_msg = tag.div(\"rev: \",\n tag.a(data['rev'][:7], href=context.href(\"/browser/%s\" % data['source'], rev=data['rev'])),\n \" \", \n tag.a(tag.img(src=context.href(\"/chrome/common/changeset.png\")),\n href=context.href(\"/changeset/%s/%s\" % (data['rev'], data['source'])))\n )\n msg.append(rev_msg)\n\n if 'error' in event[3] and event[3]['error']:\n error_msg = tag.div(event[3]['error'], \" \")\n if 'error_log' in event[3] and event[3]['error_log']:\n error_msg.append(tag.a(\"Log\", href=event[3]['error_log']))\n msg.append(error_msg)\n return msg", "title": "" }, { "docid": "fb81c44ab01cc954304f4d05b75759e4", "score": "0.56334907", "text": "def get_comment(self, id):\n return first_or_none('comment', 'commentID', id, strict=True)", "title": "" }, { "docid": "de6feded8b55417f47619be9907092e4", "score": "0.5627537", "text": "def __repr__(self):\n\n return \"<Comment Id: %s User: %s>\" %(self.comment_id, self.user_id)", "title": "" } ]
055e97ca6054cd2c478730be9dd70d7c
Checks that the input is a differential object and is one of the allowed class types.
[ { "docid": "142930a3f80bb1c41c4e5cf438f7686d", "score": "0.61701965", "text": "def convert_input(self, value):\n if value is None:\n return None, False\n\n if not isinstance(value, self.allowed_classes):\n if len(self.allowed_classes) == 1:\n value = self.allowed_classes[0](value)\n else:\n raise TypeError(\n \"Tried to set a DifferentialAttribute with an unsupported\"\n f\" Differential type {value.__class__}. Allowed classes are:\"\n f\" {self.allowed_classes}\"\n )\n\n return value, True", "title": "" } ]
[ { "docid": "0932d9c7992509ae372bbea1cc7e01b6", "score": "0.6821848", "text": "def is_kind_of_class(obj, a_class):", "title": "" }, { "docid": "0b4be828357e11ff2935dfe1cdaff0d3", "score": "0.6765005", "text": "def is_instance_check(cls, obj):\n if isinstance(obj, cls):\n return True\n else:\n raise TypeError(f\"must be subclass of DragItem, not {obj.__class__.__name__}\")", "title": "" }, { "docid": "0a2b731a5a75740e16601b53bd43e672", "score": "0.67162025", "text": "def _check_type(obj, expected_types):\n if not isinstance(obj, expected_types):\n raise TypeError(\"Expected type %s; got type %s\" %\n (expected_types, type(obj)))", "title": "" }, { "docid": "b62382706a41dafbbc205f09836469a8", "score": "0.6648394", "text": "def check_compatibility(self, obj1, obj2):\n if self._debug:\n print 'checking that', obj2.__class__.__name__, 'is compatible with', obj1.__class__.__name__\n if issubclass(obj2.__class__, VariableTree) or \\\n issubclass(obj1.__class__, VariableTree):\n try:\n assert set(obj1.list_vars()).issubset(set(obj2.list_vars()))\n except:\n raise Exception('Variables of the class %s are different from base %s:' % (\n obj2.__class__.__name__, obj1.__class__.__name__), obj2.list_vars(), ', '.join(obj1.list_vars()))\n else: # Assuming it's a Component or Assembly\n try:\n assert set(obj1.list_inputs()).issubset(\n set(obj2.list_inputs()))\n except:\n raise Exception('Inputs of the class %s are different from base %s. The missing input(s) of %s are: %s' % (\n obj2.__class__.__name__, obj1.__class__.__name__, obj2.__class__.__name__, ', '.join(set(obj1.list_inputs()) - set(obj2.list_inputs()))))\n try:\n assert set(obj1.list_outputs()).issubset(obj2.list_outputs())\n except:\n raise Exception('Outputs of the class %s are different from base %s. The missing output(s) of %s are: %s' % (\n obj2.__class__.__name__, obj1.__class__.__name__, obj2.__class__.__name__, ', '.join(set(obj1.list_outputs()) - set(obj2.list_outputs()))))\n if self._debug:\n print '--> OK'", "title": "" }, { "docid": "8d99af367258348e4b6d9777eef52ee4", "score": "0.6544697", "text": "def check_type(obj):\n if not isinstance(obj, Number):\n raise TypeError(\"The value \" + str(obj) + \" is not of type clispy.type.Number\")", "title": "" }, { "docid": "a68dafad3a723469b74bb2d9e7cd32e3", "score": "0.6462809", "text": "def test_object_type(self):\n honda = Car('Honda')\n self.assertTrue(isinstance(honda, Car), msg='The object should be a type of `Car`')", "title": "" }, { "docid": "f2d88e91db382e4e9b60bff8f159b1bc", "score": "0.64311695", "text": "def istypeof(cls, obj):\n return isinstance(obj, cls)", "title": "" }, { "docid": "e8f1610a01af0afd0e3bbfdc04ceb173", "score": "0.6425813", "text": "def _CheckType(obj, obj_type, obj_name):\n if not isinstance(obj, obj_type):\n raise TypeError('%s must be a %s, got %s'\n % (obj_name, obj_type, obj.__class__.__name__))\n return obj", "title": "" }, { "docid": "d1ab16a5ba5398438d05b10084cbda7a", "score": "0.6423637", "text": "def is_same_class(obj, a_class):\n\n if not type(obj) == a_class:\n return False\n else:\n return True", "title": "" }, { "docid": "1abd4a66612acb6253dcab039836151b", "score": "0.63895947", "text": "def is_same_class(obj, a_class):\n if type(obj) is not a_class:\n return False\n\n return True", "title": "" }, { "docid": "eea678b91e584a33f426697bf3836f6f", "score": "0.6318412", "text": "def isclass(object):\n return isinstance(object, tuple(_CLASS_TYPES))", "title": "" }, { "docid": "440b0a966146cdd8e823c8231a3f217e", "score": "0.6304573", "text": "def _is_dataclass_instance(obj):\n return is_dataclass(obj) and not isinstance(obj, type)", "title": "" }, { "docid": "64407e1a8c82f0a9f05bdcf8e1c273f0", "score": "0.62472355", "text": "def assert_same_type(*inputs) -> bool:\r\n first, *others = inputs\r\n # single input\r\n if not others:\r\n return True\r\n\r\n _class = type(first)\r\n for ix, obj in enumerate(others):\r\n if not isinstance(obj, _class):\r\n raise TypeError(f\"Input types don't agree. This method accepts multiple inputs, \"\r\n f\"type of the first input: {type(first)}, \"\r\n f\"but {ix+1}-th input: {type(obj)}\")\r\n\r\n return True", "title": "" }, { "docid": "9fa69401d87d0973dc7fe370b0e2e61c", "score": "0.624162", "text": "def validate_type(self):\n if self.depth == 1:\n if isinstance(self.type[0], type):\n return\n elif self.depth > 1:\n if all(isinstance(x, type) for x in self.type):\n return\n raise DependencyInputError('Property dependency type not formatted correctly.')", "title": "" }, { "docid": "8f93e6a93c9ea11455af4f57d678ebd9", "score": "0.6222604", "text": "def check_type(obj, obj_type, message=None):\n if not isinstance(obj, obj_type):\n if message is None:\n message = \"Check failed. Object is of type %s, expected %s.\" % (str(type(obj)), str(obj_type))\n check_failed(message)", "title": "" }, { "docid": "7c7d043657533e5fed177257403ab2de", "score": "0.6217251", "text": "def is_same_class(obj, a_class):\n return type(obj) == a_class", "title": "" }, { "docid": "7c7d043657533e5fed177257403ab2de", "score": "0.6217251", "text": "def is_same_class(obj, a_class):\n return type(obj) == a_class", "title": "" }, { "docid": "7c7d043657533e5fed177257403ab2de", "score": "0.6217251", "text": "def is_same_class(obj, a_class):\n return type(obj) == a_class", "title": "" }, { "docid": "42ede7d6bbb81585db3284a96985a4ba", "score": "0.6212023", "text": "def is_same_class(obj, a_class):\n if type(obj) == a_class:\n return True\n else:\n return False", "title": "" }, { "docid": "c29227e9bd32a2d75cf7e66917887b1c", "score": "0.6211062", "text": "def is_same_class(obj, a_class):\n\n if (type(obj) == a_class):\n return True\n return False", "title": "" }, { "docid": "32b7be64d2a1a5ad74254725e0bfebb8", "score": "0.6209559", "text": "def is_type(cls, x):\n return type(x) == cls", "title": "" }, { "docid": "5de99c1c025fc311e7e1a5c82e67d677", "score": "0.6192553", "text": "def IsClass(self) -> bool:", "title": "" }, { "docid": "7b68682579a46f5cc789f2c211e9b5c9", "score": "0.61859024", "text": "def is_same_class(obj, a_class):\n return (type(obj) == a_class)", "title": "" }, { "docid": "ba5a9a449fbc5e9562f2cd4491138e03", "score": "0.6177483", "text": "def is_valid(obj, allowed_class, single=True):\n\n if single:\n if obj == None:\n return True\n return isinstance(obj, allowed_class)\n else:\n if isinstance(obj, list):\n if len(obj) == 0:\n return True\n for element in obj:\n if not isinstance(element, allowed_class):\n return False\n return True\n return False", "title": "" }, { "docid": "4199f0bf042e2770c5fe901ee989aef3", "score": "0.61768085", "text": "def is_same_class(obj, a_class):\n\n return (type(obj) == a_class)", "title": "" }, { "docid": "9c484104c34287fd3e849ad79b8d06ec", "score": "0.61497444", "text": "def _check_string_with_object_type(self):\n try:\n int(self.possible_number)\n float(self.possible_number)\n except ValueError, error:\n raise error\n return True", "title": "" }, { "docid": "8be24e66fef03e4c8ce4604dbbe23d6b", "score": "0.6147874", "text": "def is_same_class(obj, a_class):\n\n if type(obj) == a_class:\n return True\n\n return False", "title": "" }, { "docid": "a8f3f7a3dc0d3c5e7d0ca056188546f5", "score": "0.6131966", "text": "def check_types(self):\n raise NotImplementedError(type(self).__name__ + \" must implement check_types()\")", "title": "" }, { "docid": "70d1c61605fa48084decc7a7ba7619e2", "score": "0.611732", "text": "def is_kind_of_class(obj, a_class):\n if type(obj) == a_class or isinstance(obj, a_class):\n return True\n else:\n return False", "title": "" }, { "docid": "f16d9700511ce7e8297139ecdd36825a", "score": "0.61137116", "text": "def validate(obj, obj_type):\n try:\n obj_type = graph_objs.KEY_TO_NAME[obj_type]\n except KeyError:\n pass\n try:\n test_obj = graph_objs.get_class_instance_by_name(obj_type, obj)\n except KeyError:\n raise exceptions.PlotlyError(\n \"'{0}' is not a recognizable graph_obj.\".\n format(obj_type))", "title": "" }, { "docid": "3e517ec0375aa8714b8b0e1547b4e804", "score": "0.6113055", "text": "def is_same_class(obj, a_class):\n\n return True if type(obj) is a_class else False", "title": "" }, { "docid": "78f411cb1f51f0435828a59e546d0071", "score": "0.610504", "text": "def is_same_class(obj, a_class):\n return type(obj) is a_class", "title": "" }, { "docid": "78f411cb1f51f0435828a59e546d0071", "score": "0.610504", "text": "def is_same_class(obj, a_class):\n return type(obj) is a_class", "title": "" }, { "docid": "78f411cb1f51f0435828a59e546d0071", "score": "0.610504", "text": "def is_same_class(obj, a_class):\n return type(obj) is a_class", "title": "" }, { "docid": "bcd6a6afd3a8ee17a3c7525e99dd038c", "score": "0.61023945", "text": "def check_quote_types(obj, *target_tys):\n targ_asset_ty, targ_market_ty, targ_source_ty = target_tys\n obj_asset, obj_market, obj_source = obj.asset, obj.market, obj.source\n\n if not isinstance(obj.asset, targ_asset_ty):\n raise TypeError(\n 'Expected asset of type {0}, got object {1} of type {2}'.format(\n targ_asset_ty, obj_asset, type(obj_asset)))\n if not isinstance(obj.market, targ_market_ty):\n raise TypeError(\n 'Expected market of type {0}, got object {1} of type {2}'.format(\n targ_market_ty, obj_market, type(obj_market)))\n if not isinstance(obj.source, targ_source_ty):\n raise TypeError(\n 'Expected source of type {0}, got object {1} of type {2}'.format(\n targ_source_ty, obj_source, type(obj_source)))\n return None", "title": "" }, { "docid": "ac269ea4a7472ee2605126ee36742e0f", "score": "0.6101883", "text": "def _is_class_instance(obj):\n return isinstance(obj, (nn.Cell, ops.Primitive)) or _is_dataclass_instance(obj)", "title": "" }, { "docid": "9b67c1a0faa92046ff5ae78a863bfb71", "score": "0.6094646", "text": "def is_kind_of_class(obj, a_class):\n return True if isinstance(obj, a_class) else False", "title": "" }, { "docid": "ad331e857eaf824c6dd9f1981da3cb0a", "score": "0.6082795", "text": "def check_isinstance(*info: Tuple[Tuple]):\n for obj, cls in info:\n if not isinstance(obj, cls):\n raise TypeError(\"{} must implement {}\".format(obj, cls))", "title": "" }, { "docid": "b4a267ed4334713814066a2937361d50", "score": "0.60747033", "text": "def is_same_class(obj, a_class):\n if type(obj) is a_class:\n return(True)\n else:\n return(False)", "title": "" }, { "docid": "de06f731a7a73f7888a6d2ff4af96ccb", "score": "0.60710996", "text": "def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)", "title": "" }, { "docid": "de06f731a7a73f7888a6d2ff4af96ccb", "score": "0.60710996", "text": "def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)", "title": "" }, { "docid": "de06f731a7a73f7888a6d2ff4af96ccb", "score": "0.60710996", "text": "def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)", "title": "" }, { "docid": "de06f731a7a73f7888a6d2ff4af96ccb", "score": "0.60710996", "text": "def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)", "title": "" }, { "docid": "de06f731a7a73f7888a6d2ff4af96ccb", "score": "0.60710996", "text": "def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)", "title": "" }, { "docid": "de06f731a7a73f7888a6d2ff4af96ccb", "score": "0.60710996", "text": "def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)", "title": "" }, { "docid": "dbece533a3570e6a9f6b4823fa8dfefd", "score": "0.60648483", "text": "def is_protected_type(obj):\r\n return isinstance(obj, (\r\n types.NoneType,\r\n int, long,\r\n datetime.datetime, datetime.date, datetime.time,\r\n float, Decimal)\r\n )", "title": "" }, { "docid": "c06b250f0a799474b0dedba949042f4d", "score": "0.60459423", "text": "def is_kind_of_class(obj, a_class):\n return (isinstance(obj, a_class))", "title": "" }, { "docid": "c06b250f0a799474b0dedba949042f4d", "score": "0.60459423", "text": "def is_kind_of_class(obj, a_class):\n return (isinstance(obj, a_class))", "title": "" }, { "docid": "c06b250f0a799474b0dedba949042f4d", "score": "0.60459423", "text": "def is_kind_of_class(obj, a_class):\n return (isinstance(obj, a_class))", "title": "" }, { "docid": "5529a272779e57ea03a6bc065973f70a", "score": "0.6042952", "text": "def is_kind_of_class(obj, a_class):\n if isinstance(obj, a_class):\n return True\n return False", "title": "" }, { "docid": "5529a272779e57ea03a6bc065973f70a", "score": "0.6042952", "text": "def is_kind_of_class(obj, a_class):\n if isinstance(obj, a_class):\n return True\n return False", "title": "" }, { "docid": "d9cfcac54d012c85938d63162c7f6b85", "score": "0.60367036", "text": "def is_kind_of_class(obj, a_class):\n if isinstance(obj, a_class):\n return True\n else:\n return False", "title": "" }, { "docid": "4fb725eacd3a8b5b0d142b6457dd8697", "score": "0.6030356", "text": "def _isclass(obj):\r\n if sys.version_info < (2, 7):\r\n return isinstance(obj, (type, types.ClassType))\r\n else:\r\n return inspect.isclass(obj)", "title": "" }, { "docid": "79b3b51e2717f022259e6f8aabc09cb8", "score": "0.6025978", "text": "def is_instance(instance, expected_types):\r\n for expected_type in expected_types:\r\n if isinstance(instance, expected_type):\r\n return True\r\n\r\n return False", "title": "" }, { "docid": "75a57c6cd7bff19f3f79839efac95e16", "score": "0.6014151", "text": "def inherits_from(obj, a_class):\n if type(obj) != a_class:\n if isinstance(obj, a_class):\n return True\n else:\n return False\n return False", "title": "" }, { "docid": "71eb7e4ee302fb7eeead8ab91245aafb", "score": "0.6013127", "text": "def check_instance(self):\n self.assertIsInstance(self.user_1, User)\n self.assertIsInstance(self.user_2, User)", "title": "" }, { "docid": "6b92e4e353e2d99fa6006e89ab7a972b", "score": "0.59914047", "text": "def is_kind_of_class(obj, a_class):\n\n return isinstance(obj, a_class)", "title": "" }, { "docid": "6b92e4e353e2d99fa6006e89ab7a972b", "score": "0.59914047", "text": "def is_kind_of_class(obj, a_class):\n\n return isinstance(obj, a_class)", "title": "" }, { "docid": "6b92e4e353e2d99fa6006e89ab7a972b", "score": "0.59914047", "text": "def is_kind_of_class(obj, a_class):\n\n return isinstance(obj, a_class)", "title": "" }, { "docid": "93eef8c769083e857814c8f979953670", "score": "0.59821874", "text": "def test_isclass(self):\n self.assertEqual(inspect.isclass(PygalleBaseClass), True)", "title": "" }, { "docid": "440234441eb539a52cbd5a7523c426dd", "score": "0.5971645", "text": "def is_instance(obj, typelist):\n return any([isinstance(obj, t) for t in typelist])", "title": "" }, { "docid": "91056bdba0652934bbc2720dede4b567", "score": "0.5965745", "text": "def is_same_class(obj, a_class):\r\n if type(obj).__name__ == a_class.__name__:\r\n return True\r\n else:\r\n return False", "title": "" }, { "docid": "3c7ed5af822a53346c3418e9d12366c4", "score": "0.5955906", "text": "def is_same_class(obj, a_class):\n if type(obj).__name__ == a_class.__name__:\n return True\n else:\n return False", "title": "" }, { "docid": "e311a23ab7232c163a63b43964c22a25", "score": "0.5953669", "text": "def inherits_from(obj, a_class):\n if type(obj) == a_class or not isinstance(obj, a_class):\n return False\n else:\n return True", "title": "" }, { "docid": "263a8de06a083b8d02332b4098266746", "score": "0.59484315", "text": "def inherits_from(obj, a_class):\n return isinstance(obj, a_class) and type(obj) != a_class", "title": "" }, { "docid": "263a8de06a083b8d02332b4098266746", "score": "0.59484315", "text": "def inherits_from(obj, a_class):\n return isinstance(obj, a_class) and type(obj) != a_class", "title": "" }, { "docid": "4f47ead5c12d01c2872e8e8e34d1b750", "score": "0.59436685", "text": "def is_kind_of_class(obj, a_class):\n return (isinstance(type(obj), a_class) or issubclass(type(obj), a_class))", "title": "" }, { "docid": "c72d88712a5d7675168ffbaa72c92cef", "score": "0.5933938", "text": "def isObject(self) -> bool:\n ...", "title": "" }, { "docid": "b3754911e26d2a99aa8bdb60eb652daa", "score": "0.5933445", "text": "def inherits_from(obj, a_class):\n result = issubclass(type(obj), a_class)\n if result and (type(obj) != a_class):\n return True\n else:\n return False", "title": "" }, { "docid": "80b33fbf4ee697a2e92c85648e3eff29", "score": "0.5911799", "text": "def check(self, obj):\n\n return isinstance(obj, (gridworldenvironment.GridWorldEnvironment))", "title": "" }, { "docid": "63d4fafd642130f4c4b2315468dceaa6", "score": "0.59001523", "text": "def type_check(self, **kw):\n pass", "title": "" }, { "docid": "63d4fafd642130f4c4b2315468dceaa6", "score": "0.59001523", "text": "def type_check(self, **kw):\n pass", "title": "" }, { "docid": "651b5be41b5232246f7bfc5968239f25", "score": "0.58891714", "text": "def inherits_from(obj, a_class):\n return issubclass(type(obj), a_class) and type(obj) != a_class", "title": "" }, { "docid": "651b5be41b5232246f7bfc5968239f25", "score": "0.58891714", "text": "def inherits_from(obj, a_class):\n return issubclass(type(obj), a_class) and type(obj) != a_class", "title": "" }, { "docid": "5222c0f4012e9bfe26a1bc9576d9fbf6", "score": "0.587661", "text": "def inherits_from(obj, a_class):\n if issubclass(type(obj), a_class) and type(obj) != a_class:\n return True\n return False", "title": "" }, { "docid": "897a5bf8c73983cf504ce4862c783a5b", "score": "0.5863801", "text": "def inherits_from(obj, a_class):\n if (type(obj) != a_class):\n return (isinstance(obj, a_class))\n else:\n return (False)", "title": "" }, { "docid": "955b74ea25cadbfa98bd0c05ee8eeb82", "score": "0.58190536", "text": "def is_kind_of_class(obj, a_class):\n if isinstance(obj, a_class):\n return True\n if issubclass(type(obj), a_class):\n return True\n return False", "title": "" }, { "docid": "a2c0d772febceec5d6a0107223a1f712", "score": "0.5806689", "text": "def type_check(self, **kw):\n raise NotImplementedError", "title": "" }, { "docid": "4d4c55ed66eb49fdc8f7722e181d0aba", "score": "0.58029366", "text": "def inherits_from(obj, a_class):\n if type(obj) != a_class:\n return isinstance(obj, a_class)", "title": "" }, { "docid": "9f8d72781d579fa97cd8cb41f673bcb2", "score": "0.58003384", "text": "def isinstance(self, cls):\n return self.cls.issubclass(cls)", "title": "" }, { "docid": "4cd78ba928221cb3d74b0737e854210f", "score": "0.57981366", "text": "def inherits_from(obj, a_class):\n\n return isinstance(obj, a_class) and type(obj) is not a_class", "title": "" }, { "docid": "eee62ed004c33fc0ea4604c5d12bc54b", "score": "0.57960504", "text": "def _verify_not_type_mismatch(second: \"CountingBloomFilter\") -> bool:\n return isinstance(second, (CountingBloomFilter))", "title": "" }, { "docid": "47b7193d79f4156a7f503c7e51d84800", "score": "0.57955", "text": "def test_testobjs(self):\n self.assertTrue(self.tcit_a)\n self.assertTrue(type(self.tcit_a.__class__) is type)\n self.assertTrue(self.tcit_b)\n self.assertTrue(type(self.tcit_b.__class__) is type)", "title": "" }, { "docid": "dc5c086b25f39b7c0cfae8a90f7e1a65", "score": "0.57935536", "text": "def inherits_from(obj, a_class):\n\n if issubclass(obj.__class__, a_class) and type(obj) != a_class:\n return True\n return False", "title": "" }, { "docid": "9f7d7ff5e6ae6f7ecc4616e36e94cbd1", "score": "0.57914776", "text": "def __validate(type1, type2):\n if not isinstance(type1, type2):\n raise ExchangeError('Type mismatch {}'.format((type1, type2)))", "title": "" }, { "docid": "dc05dabdb6a971aea216af692c8d308d", "score": "0.5786331", "text": "def inherits_from(obj, a_class):\n\n return (issubclass(type(obj), a_class) and type(obj) != a_class)", "title": "" }, { "docid": "f98473c5cbce006ac7d6264cf0d700db", "score": "0.5754453", "text": "def inherits_from(obj, a_class):\n return issubclass(obj.__class__, a_class) and obj.__class__ != a_class", "title": "" }, { "docid": "cf2c2ce83591de2ee9fe319542fbeaa3", "score": "0.5754218", "text": "def inherits_from(obj, a_class):\n return isinstance(obj, a_class) and not type(obj) == a_class", "title": "" }, { "docid": "82ed45abe36ca76063210d2703eb6b98", "score": "0.5753015", "text": "def inherits_from(obj, a_class):\n return (isinstance(obj, a_class) and obj.__class__ != a_class)", "title": "" }, { "docid": "01fd8a1003b135cef4a04b4e7688e353", "score": "0.5742652", "text": "def validate_obj_type(ws_obj, types):\n ws_type = ws_obj['info'][2]\n if all(t not in ws_type for t in types):\n raise InvalidWSType(given=ws_type, valid_types=types)", "title": "" }, { "docid": "c1cb9c76ba5a3a136db31c55fe7eddd2", "score": "0.5738095", "text": "def test_type(self):\n self.assertIsInstance(self.instance, SetwiseLoss)", "title": "" }, { "docid": "344ccfbc59962f2e11c5fd5f9dccb39d", "score": "0.5736896", "text": "def check_strict_json_compat(\n in_type: Any, expect_type: Type) -> bool: # pylint: disable=g-bare-generic\n check_instance = False\n if getattr(in_type, '__module__', None) not in {'typing', 'builtins'}:\n check_instance = True\n\n def _check(in_type: Any, expect_type: Type) -> bool: # pylint: disable=g-bare-generic\n \"\"\"Check if in_type conforms with expect_type.\"\"\"\n if in_type is Any:\n return expect_type is Any\n elif expect_type is Any:\n return True\n\n in_obj = None\n if check_instance:\n in_obj, in_type = in_type, type(in_type)\n\n in_args = get_args(in_type)\n in_origin = _convert_typing_to_builtin(in_type)\n expect_args = get_args(expect_type)\n expect_origin = _convert_typing_to_builtin(expect_type)\n\n if in_origin is Union:\n return all(_check(arg, expect_type) for arg in in_args)\n if expect_origin is Union:\n if check_instance:\n return any(_check(in_obj, arg) for arg in expect_args)\n else:\n return any(_check(in_type, arg) for arg in expect_args)\n\n if in_origin != expect_origin:\n return False\n elif in_origin in (\n dict, list\n ) and expect_args and expect_args[0].__class__.__name__ == 'TypeVar':\n return True\n elif check_instance:\n if isinstance(in_obj, list):\n return not expect_args or all(\n [_check(o, expect_args[0]) for o in in_obj])\n elif isinstance(in_obj, dict):\n return not expect_args or (\n all(_check(k, expect_args[0]) for k in in_obj.keys()) and\n all(_check(v, expect_args[1]) for v in in_obj.values()))\n else:\n return True\n # For List -> List[X] and Dict -> Dict[X, Y].\n elif len(in_args) < len(expect_args):\n return False\n # For Python 3.7, where Dict and List have args KT, KV, T. Return True\n # whenever the expect type is Dict or List.\n else:\n return all(_check(*arg) for arg in zip(in_args, expect_args))\n\n return _check(in_type, expect_type)", "title": "" }, { "docid": "ca190ea3d9a17fae6bbf8a59ce496a5c", "score": "0.5730924", "text": "def test_check_types(self):\n self.assertTrue(attributes.AnyAttr.check_type(''))\n self.assertTrue(attributes.AnyAttr.check_type(2))\n self.assertTrue(attributes.AnyAttr.check_type(2.))\n self.assertTrue(attributes.AnyAttr.check_type(()))", "title": "" }, { "docid": "c8e8ee1066691a4430302a34554154ab", "score": "0.5730014", "text": "def inherits_from(obj, a_class):\n return issubclass(type(obj), a_class) and type(obj) is not a_class", "title": "" }, { "docid": "b0c67074c19bddf6c080b93d1292b548", "score": "0.5723183", "text": "def _check_inputs_type(vertices, edges):\n\n # checking data type\n if not all(isinstance(item, Media) for item in vertices):\n raise TypeError(\"Invalid type for vertices input!\")\n if not all(isinstance(item, Relationship) for item in edges):\n raise TypeError(\"Invalid type for edges input!\")", "title": "" }, { "docid": "196c1581225e26da11a9723fa36d57b1", "score": "0.57171977", "text": "def isOfType(self, *args):\n return _coin.SoError_isOfType(self, *args)", "title": "" }, { "docid": "260c045ac4c1967d5a2accad6c459628", "score": "0.5707575", "text": "def check(obj):\r\n if not isinstance(obj, FunctionType):\r\n raise TypeError(\r\n \"Can't use implementer with classes. \"\r\n \"Use one of the class-declaration functions instead.\")", "title": "" }, { "docid": "824f6ee3a6a9b527d32164b51e0ad654", "score": "0.5706288", "text": "def test_create_input_instance_with_non_input_subclass_value_raises_type_error(\n self,\n ):\n\n self.input_definition.input_class = FileOutputDefinition\n with self.assertRaises(ValidationError):\n self.input_definition.check_input_class_definition()", "title": "" }, { "docid": "97c109610c32328db2b0143dbd9816c5", "score": "0.5700869", "text": "def completely_compatible(to_test_cls, classes):\n for cls in incompatibility_d[to_test_cls]:\n if cls in classes:\n return False\n return True", "title": "" }, { "docid": "87b6afd600d7d62677b321605a6833a8", "score": "0.5699653", "text": "def __is(self, object_instance: WashBase, rule_class) -> bool:\n return textx_isinstance(object_instance, self.__metamodel[rule_class])", "title": "" } ]
7c12a8529fc8bd3642c4e5b4b1741c93
Create a new ImageDataGenerator. Recieves a path string to a text file, which consists of many lines, where each line has first a path string to an image and seperated by a space an integer, referring to the class number. Using this data, this class will create TensrFlow datasets, that can be used to train e.g. a convolutional neural network.
[ { "docid": "c77f3ce864547be70553ac4e5a2d6f26", "score": "0.7018784", "text": "def __init__(self, txt_file, mode, batch_size, num_classes, shuffle=True, buffer_size=1000, img_out_size=224):\n\n self.txt_file = txt_file\n self.num_classes = num_classes\n\n # retrieve the data from the text file\n self._read_txt_file()\n\n # number of samples in the dataset\n self.data_size = len(self.labels)\n\n # initial shuffling of the file and label lists (together!)\n if shuffle:\n self._shuffle_lists()\n\n # the resize img\n self.img_out_size = img_out_size\n\n # convert lists to TF tensor\n self.img_paths = convert_to_tensor(self.img_paths, dtype=dtypes.string)\n self.labels = convert_to_tensor(self.labels, dtype=dtypes.int32)\n\n # create dataset\n data = tf.data.Dataset.from_tensor_slices((self.img_paths, self.labels))\n\n # distinguish between train/infer. when calling the parsing functions\n if mode == 'training':\n data = data.map(self._parse_function_train, num_parallel_calls=20)\n\n elif mode == 'inference':\n data = data.map(self._parse_function_inference, num_parallel_calls=20)\n\n else:\n raise ValueError(\"Invalid mode {}\" .format(mode))\n\n # shuffle the first `buffer_size` elements of the dataset\n if shuffle:\n data = data.shuffle(buffer_size=buffer_size)\n\n # create a new dataset with batches of images\n data = data.batch(batch_size)\n data = data.repeat()\n iterator = data.make_one_shot_iterator()\n self.iterator = iterator", "title": "" } ]
[ { "docid": "d353213e4316bf49056fec55c10bfd59", "score": "0.649757", "text": "def get_train_data_gen(self, train_data_path):\n train_datagen = ImageDataGenerator(rotation_range=180, horizontal_flip=True, vertical_flip=True)\n train_generator = train_datagen.flow_from_directory(\n train_data_path,\n target_size=(200, 200),\n color_mode='rgb',\n batch_size=32,\n class_mode='categorical',\n shuffle = True,\n seed = 15)\n\n return train_generator", "title": "" }, { "docid": "1ea90a806b776929d2308ff173e2893d", "score": "0.64579684", "text": "def generator(self, dirname, ):\n self._img_generator = self._img_datagen.flow_from_directory(\n dirname,\n target_size=(self._img_size, self._img_size),\n batch_size=self._batch_size,\n class_mode='binary')\n\n return self._img_generator", "title": "" }, { "docid": "5a8209695951435851c3440450a9d869", "score": "0.6361587", "text": "def get_test_data_gen(self, test_data_path):\n test_datagen = ImageDataGenerator()\n test_generator = test_datagen.flow_from_directory(\n test_data_path,\n target_size=(200, 200),\n color_mode='rgb',\n batch_size=32,\n class_mode='categorical',\n shuffle = False)\n\n return test_generator", "title": "" }, { "docid": "c213efa478390c12b75d29a558e58b8c", "score": "0.6360125", "text": "def train(self, data_path: str = None, *_, **__):\n\n def _data_verify(p: str):\n p = pathlib.Path(p)\n assert p.is_dir(), f\"{p} is not a valid directory\"\n # validate: at least two classes\n number_of_dir = len([each for each in os.listdir(p) if (p / each).is_dir()])\n assert (\n number_of_dir > 1\n ), f\"dataset only contains one class. maybe some path errors happened: {p}?\"\n\n # more than 6 classes?\n assert number_of_dir <= self.MODEL_DENSE, (\n f\"dataset has {number_of_dir} classes (more than \" + str(self.MODEL_DENSE) + \"), please see \"\n f\"https://github.com/williamfzc/stagesepx/issues/112 \"\n )\n\n _data_verify(data_path)\n\n if not self._model:\n logger.debug(\"no model can be used. build a new one.\")\n self._model = self.create_model()\n else:\n logger.debug(\"model found\")\n\n datagen = ImageDataGenerator(\n rescale=1.0 / 16, shear_range=0.2, zoom_range=0.2, validation_split=0.33\n )\n\n train_generator = datagen.flow_from_directory(\n data_path,\n target_size=self.data_size,\n batch_size=self.batch_size,\n color_mode=\"grayscale\",\n class_mode=\"sparse\",\n subset=\"training\",\n )\n\n validation_generator = datagen.flow_from_directory(\n data_path,\n target_size=self.data_size,\n batch_size=self.batch_size,\n color_mode=\"grayscale\",\n class_mode=\"sparse\",\n subset=\"validation\",\n )\n\n self._model.fit(\n train_generator,\n epochs=self.epochs,\n validation_data=validation_generator,\n )\n\n logger.debug(\"train finished\")", "title": "" }, { "docid": "e51472ca6d9135b4781cbfdae0cc1a64", "score": "0.63093054", "text": "def generate_training_data(fname):\n with open(fname, \"rb\") as f:\n for line in f:\n shirt, targets = line.split(\":\")\n shirt_path = \"training_shirts/{image}.jpg\".format(\n image=shirt.strip()\n )\n shirt_path = os.path.abspath(shirt_path)\n\n # parse out the list of targets\n target_list = targets.strip()[1:-1].split(\",\")\n labels = map(lambda target: \"label\" + target.strip(), target_list)\n yield [ (shirt_path, label) for label in labels]\n raise StopIteration", "title": "" }, { "docid": "448c26ff3731febd36069099c3a7c440", "score": "0.6273671", "text": "def train_data_generator(train_datagen, train_data_dir, img_height, img_width,\n batch_size):\n train_generator = train_datagen.flow_from_directory(\n train_data_dir,\n target_size=(img_height, img_width),\n batch_size=batch_size,\n save_to_dir='/tmp/keras',\n class_mode=\"binary\")\n return train_generator", "title": "" }, { "docid": "2431eb4046ca608479255aab3db214d9", "score": "0.6089303", "text": "def generate_data(batch_size, train_path, aug_dict, target_size):\r\n data_gen = trainGenerator(batch_size = batch_size,\r\n train_path = train_path,\r\n image_folder = 'image',\r\n mask_folder = 'label',\r\n aug_dict = aug_dict,\r\n target_size = target_size)\r\n data_n = len([i for i in os.listdir(os.path.join(train_path, 'image')) if i.endswith('.jpg')])\r\n return data_gen, data_n", "title": "" }, { "docid": "5489aed6443b4e8e7e1274acc27d80c1", "score": "0.59989804", "text": "def __init__(self, path, train=True, train_split=0.7, download=True, random_seed=1, transform=None):\n super(TinyImageNetDataset, self).__init__()\n \n self.path = path\n self.train = train\n self.train_split = train_split\n self.transform = transform\n self._validate_params()\n\n # Download dataset\n if download:\n self.download()\n\n self._class_ids = self._get_class_map()\n self.data, self.targets = self._load_data()\n\n self._image_indices = np.arange(len(self.targets))\n\n np.random.seed(random_seed)\n np.random.shuffle(self._image_indices)\n\n split_idx = int(len(self._image_indices) * train_split)\n self._image_indices = self._image_indices[:split_idx] if train else self._image_indices[split_idx:]", "title": "" }, { "docid": "1cd67bd3a344e87d40599057350398dd", "score": "0.59871066", "text": "def generate_dataset(output_dir, num_sequences, length_per_seq, csv_fp, h, w):\n h = 800 if h is None else h\n w = 1024 if w is None else w\n # First make dir structure\n root_path, img_path, new_path, t20_path = make_dir_structure(output_dir)\n if length_per_seq is None:\n length_per_seq = [randint(5, 50) for _ in range(num_sequences)]\n else:\n length_per_seq = [length_per_seq] * num_sequences\n\n # Load classes csv file and read categories\n cat_counter = 1\n cats = dict()\n with open(csv_fp) as csv_file:\n reader = DictReader(csv_file)\n for line in reader:\n cats[str(cat_counter)] = {\n 'super_category': line['super_category'],\n 'category': line['category'],\n 'avoidable': line['avoidable']\n }\n cat_counter += 1\n\n # Initialize an empty dictionary for image annotations\n imgs = dict()\n total = sum(length_per_seq)\n p_bar = tqdm(total=total, unit='img')\n\n for i in range(num_sequences):\n b = BinSequence(csv_fp, h, w)\n\n sequence = b.generate_sequence(length_per_seq[i], p_bar)\n\n for j, out in enumerate(sequence):\n img_name = f'{i:04}-{j:04}'\n\n # Save the files\n out['rendered_img'].save(join(img_path, img_name + '.jpg'))\n out['new_object_gt'].save(join(new_path, img_name + '.png'))\n out['top_20_gt'].save(join(t20_path, img_name + '.png'))\n\n # Add the annotation to the annotations dict\n prev_img = None if j == 0 else f'{i:04}-{j - 1:04}.jpg'\n next_img = None if j == length_per_seq[i] - 1 \\\n else f'{i:04}-{j + 1:04}.jpg'\n\n imgs[img_name + '.jpg'] = {\n 'new_obj_mask': join('new_object_masks', img_name + '.png'),\n 'top_20_mask': join('top_20_masks', img_name + '.png'),\n 'prev_img': prev_img,\n 'next_img': next_img,\n }\n\n anns = {'categories': cats, 'images': imgs}\n\n with open(join(root_path, 'annotations.json'), mode='w') as fp:\n json.dump(anns, fp, indent=2)", "title": "" }, { "docid": "d66573aa8d497c0988a2c84a49129a56", "score": "0.5978181", "text": "def generate_numbers_dataset():\n # Font Properties:\n photoSize = (26, 18)\n fontScale = 1\n thickness = [1, 2, 3]\n\n # Fonts:\n fonts = [f for f in range(8)]\n fonts.remove(1), fonts.remove(5), fonts.remove(7)\n print('generating dataset for', len(fonts), 'fonts with', len(thickness), 'different thicknesses...')\n\n dataset = []\n for font in fonts:\n for thick in thickness:\n numbers_gray = []\n # [DEBUG] numbers_image = []\n\n # Generating all numbers:\n for number in [num for num in range(1, 10)]:\n photo = np.zeros((photoSize[0], photoSize[1], 3), np.float32)\n digit_BGR = cv.putText(photo, str(number), org=(-2, photoSize[0] - 3), fontFace=font,\n fontScale=fontScale, color=(255, 255, 255), thickness=thick,\n lineType=cv.LINE_AA)\n digit_gray = cv.cvtColor(digit_BGR, cv.COLOR_BGR2GRAY)\n\n # Make image square\n side = np.zeros((26, 4))\n digit_gray = np.hstack((np.hstack((side, digit_gray)), side))\n\n # Scale image to 32x32 for HOG\n digit_gray = cv.resize(digit_gray, (32, 32))\n\n # Add number to gray list:\n numbers_gray.append(digit_gray)\n\n # [DEBUG] Add number to NumbersImage\n # if len(numbers_image) > 0:\n # numbers_image = np.hstack((numbers_image, digit_gray))\n # else:\n # numbers_image = digit_gray\n\n # Add digits gray to train set list\n dataset.append(numbers_gray)\n\n # [DEBUG]Show Numbers\n # cv.imshow('Numbers', numbers_image)\n # cv.waitKey(0)\n\n # add preloaded font to dataset\n def get_fonts_images(j):\n nums = []\n for i in range(1, 10):\n path = f'Dataset/fonts/Font{j}/{i}.jpg'\n num = cv.imread(path, 0)\n num = cv.resize(num, (32, 32))\n nums.append(num)\n return nums\n\n folders_count = 35\n print('generating dataset for', folders_count, 'fonts...')\n for folder_id in range(1, folders_count+1):\n frame_9nums = get_fonts_images(folder_id)\n dataset.append(frame_9nums)\n\n dataset = np.array(dataset)\n print('generated dataset shape:', dataset.shape)\n return dataset", "title": "" }, { "docid": "1e1eaf333917e2f398004e4cd81c6559", "score": "0.5948972", "text": "def validation_data_generator(test_datagen, validation_data_dir, img_height,\n img_width):\n validation_generator = test_datagen.flow_from_directory(\n validation_data_dir,\n target_size=(img_height, img_width),\n class_mode=\"binary\")\n return validation_generator", "title": "" }, { "docid": "35d01222e5a116722b4a8f02ee4b2462", "score": "0.5931262", "text": "def dataset_parser(config, filename, label):\n image_string = tf.read_file(filename)\n image_decoded = tf.image.decode_image(image_string)\n image_decoded.set_shape([None, None, config.data_cfg.image_channel])\n image_resized = tf.image.resize_images(image_decoded, \n [config.data_cfg.image_height, config.data_cfg.image_width])\n \n return image_resized,tf.one_hot(label, config.data_cfg.class_number)", "title": "" }, { "docid": "50792047c085df757dc74965e34d6add", "score": "0.5925856", "text": "def generator(data, batch_size=32):\n # Unroll zip object to a list\n lines = list(data)\n \n lines_nr = len(lines)\n \n while 1:\n \n np.random.shuffle(lines)\n \n # Set step to batch_size/2 in order to account for\n # flipped image, measurement pairs\n for i in range(0, lines_nr, int(batch_size/2)):\n \n batch = lines[i:i+int(batch_size/2)]\n \n images = []\n measurements = []\n \n for line in batch:\n \n image = cv2.imread(line[0])\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n flipped_img= cv2.flip(image, 1)\n images.append(image)\n images.append(flipped_img)\n\n measurement = float(line[1])\n flipped_measure = -measurement\n \n measurements.append(measurement)\n measurements.append(flipped_measure) \n \n features = np.array(images)\n labels = np.array(measurements)\n \n yield features, labels", "title": "" }, { "docid": "b76652bf689fffa49d3d8580c972f479", "score": "0.5902249", "text": "def create_generator(args):\n if args.dataset_type == 'csv':\n validation_generator = CSVGenerator(\n # args.annotations,\n args.classes,\n video_path = args.video_path,\n depth = args.depth,\n image_min_side=args.image_min_side,\n image_max_side=args.image_max_side,\n config=args.config\n )\n else:\n raise ValueError('Invalid data type received: {}'.format(args.dataset_type))\n\n return validation_generator", "title": "" }, { "docid": "691e4cd4cdea1df9ace618983fa6d66c", "score": "0.5899955", "text": "def get_train_data():\n\n images = [] # images\n labels = [] # corresponding labels\n # loop over all 43 classes\n for c in range(0, Parameters.class_count):\n prefix = os.path.join(Parameters.train_set_path, format(c, '05d')) # subdirectory for class\n csv_filename = 'GT-' + format(c, '05d') + '.csv'\n\n with open(os.path.join(prefix, csv_filename)) as csv_file: # annotations file\n csv_reader = csv.reader(csv_file, delimiter=';') # csv parser for annotations file\n next(csv_reader) # skip header\n # loop over all images in current annotations file\n for row in csv_reader:\n # images.append('Class {}, track {}, image {}'.format(c, int(row[0][:5]), int(row[0][6:-4])))\n images.append(plt.imread(os.path.join(prefix, row[0]))) # the 1th column is the filename\n labels.append(int(row[7])) # the 8th column is the label\n\n return images, labels", "title": "" }, { "docid": "7e6d893b006a4de711a9b8dfa6951602", "score": "0.58996755", "text": "def data_generator(dataset, nn_learner, params):\n # Define generator and settings according to the specified data set\n if dataset == \"training\":\n datagen = ImageDataGenerator(rescale=1./255,\n rotation_range=20,\n shear_range=0.3,\n zoom_range=0.3,\n horizontal_flip=True\n )\n dir_data = os.path.abspath(params[0]['dir']['training_data'])\n shuffle = True\n elif dataset == \"validation\":\n datagen = ImageDataGenerator(rescale=1./255)\n dir_data = os.path.abspath(params[0]['dir']['validation_data'])\n shuffle = False\n else:\n datagen = ImageDataGenerator(rescale=1./255)\n dir_data = os.path.abspath(params[0]['dir']['testing_data'])\n shuffle = False\n\n # Check for availability of images\n n_images = len(os.listdir(dir_data + \"/images/\"))\n if n_images == 0:\n msg = \"No images found in \" + dir_data + \"/images/\"\n raise OSError(msg)\n\n # Set batch size\n if dataset == \"testing\":\n batch_size = min(n_images, 100)\n else:\n batch_size = params[1]['batch_size']\n\n # Total number of batches\n n_batches = int(np.ceil(np.true_divide(n_images, batch_size)))\n\n # Get image height and width needed for the input of the network\n H = params[0]['input']['height']\n W = params[0]['input']['width']\n\n # Define flow, which takes the path to a directory and generates batches\n flow = datagen.flow_from_directory(dir_data,\n target_size=(H, W),\n batch_size=batch_size,\n color_mode=\"rgb\",\n class_mode=None,\n shuffle=shuffle)\n # Loop through each batch\n for batch_rgb in flow:\n # Convert RGB to LAB\n batch_lab = color.rgb2lab(batch_rgb) # (batch_size, H, W, 3)\n\n # Extract the L-channel.\n batch_l = batch_lab[:, :, :, 0, np.newaxis] # (batch_size, H, W, 1)\n\n # Normalize space from [0, 100] to [0, 1]\n batch_l_norm = batch_l / 100 # (batch_size, H, W, 1)\n\n # Extract the a and b channel\n batch_ab = batch_lab[:, :, :, 1:] # (batch_size, H, W, 2)\n\n # Get probability distribution over possible colors\n # Note: z in [0, 1] where z is an element of batch_probs\n batch_probs = y2z(batch_ab, nn_learner, params[0]) # (batch_size, \n # Ht, Wt, Q)\n # Define input batch\n batch_input = batch_l_norm # (batch_size, H, W, 1)\n\n # Define ground truth batch\n batch_truth = batch_probs # (batch_size, Ht, Wt, Q)\n\n if dataset == \"testing\":\n yield (batch_input, batch_truth, batch_ab, n_batches)\n else:\n yield (batch_input, batch_truth)", "title": "" }, { "docid": "6f2c9386ddb05add233fabf78ee85b1f", "score": "0.5892304", "text": "def generate(batch, size):\n\n # Using the data Augmentation in traning data\n ptrain = 'dataclassify/train'\n pval = 'dataclassify/validation'\n\n datagen1 = ImageDataGenerator(\n rescale=1. / 255,\n shear_range=0.2,\n zoom_range=0.2,\n rotation_range=90,\n width_shift_range=0.2,\n height_shift_range=0.2,\n horizontal_flip=True)\n\n datagen2 = ImageDataGenerator(rescale=1. / 255)\n\n train_generator = datagen1.flow_from_directory(\n ptrain,\n target_size=(size, size),\n batch_size=batch,\n class_mode='categorical')\n\n validation_generator = datagen2.flow_from_directory(\n pval,\n target_size=(size, size),\n batch_size=batch,\n class_mode='categorical')\n\n count1 = 0\n for root, dirs, files in os.walk(ptrain):\n for each in files:\n count1 += 1\n\n count2 = 0\n for root, dirs, files in os.walk(pval):\n for each in files:\n count2 += 1\n\n return train_generator, validation_generator, count1, count2", "title": "" }, { "docid": "2e83e8a27fd2707b712963385fb24fba", "score": "0.58673316", "text": "def make_dataset(filepaths, n_steps=100, batch_size=32):\n\n with open(filepaths) as f:\n text = f.read()\n\n tokenizer = keras.preprocessing.text.Tokenizer(char_level=True)\n tokenizer.fit_on_texts([text])\n max_id = len(tokenizer.word_index)\n\n [encoded] = np.array(tokenizer.texts_to_sequences([text])) - 1\n dataset = tf.data.Dataset.from_tensor_slices(encoded)\n\n window_length = n_steps + 1\n dataset = dataset.window(window_length, shift=1, drop_remainder=True)\n\n dataset = dataset.flat_map(lambda window: window.batch(window_length))\n dataset = dataset.shuffle(10000).batch(batch_size)\n dataset = dataset.map(lambda windows: (windows[:, :-1], windows[:, 1:]))\n dataset = dataset.map(lambda X_batch, Y_batch: (tf.one_hot(X_batch, depth=max_id), Y_batch))\n dataset = dataset.prefetch(1)\n\n return dataset", "title": "" }, { "docid": "bacdbbde0f01f29f127a36df0c92cde3", "score": "0.58636874", "text": "def _input_gen():\n import cv2\n i=0\n for image_file in image_file_list:\n log.info('Image %d of %d: %s' % (\n i,\n len(image_file_list) - 1,\n image_file,\n ))\n cv2_image = cv2.imread(os.path.join(args.dataset_split_path, image_file))\n preprocessed_image = model_preprocessor_fn(cv2_image)\n image = np.array(preprocessed_image)\n yield [[image]]\n i+=1", "title": "" }, { "docid": "7727e82feff869c124948779ff07c710", "score": "0.5862453", "text": "def gen(fontfaces, lines, useAllFonts=False, repeats=1, batch_size=16, imshape=(225, 2200, 1), dataLen=200, maxLabelLen=80, height=150, noiseChance=0.5, lightingChance=0.5, shuffle=True, split=True, auxOutput=False):\n if useAllFonts:\n idxs = make_idx([len(fontfaces), len(lines)], repeats)\n else:\n idxs = np.stack([np.empty(len(lines)*repeats, dtype=int),\n np.tile(np.arange(len(lines)), repeats)],\n axis=-1)\n x = np.zeros((batch_size,) + imshape, dtype=int)\n y = np.zeros((batch_size, maxLabelLen, charUtils.componentCount), dtype=int)\n xLen = np.zeros((batch_size,), dtype=int)\n yLen = np.zeros((batch_size,), dtype=int)\n loss = [np.zeros((batch_size,)) for _ in range(charUtils.componentCount)]\n while True:\n if not useAllFonts:\n idxs[:, 0] = np.random.randint(len(fontfaces), size=idxs.shape[0])\n if shuffle:\n np.random.shuffle(idxs)\n for i in range(idxs.shape[0]):\n f, l = idxs[i]\n face = fontfaces[f]\n s = lines[l]\n canvas = np.zeros(imshape, dtype='uint8')\n tempCanvas, _, _ = gen_text_image(s, face, height, None, 7, 5, 3, safePad=150)\n tempCanvas = tempCanvas[..., None]\n if tempCanvas.shape[1] > imshape[1]:\n scale = imshape[1] / tempCanvas.shape[1]\n tempCanvas = cv2.resize(tempCanvas, dsize=(0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_AREA)[..., None]\n yOffset = np.random.randint(imshape[0]-tempCanvas.shape[0]+1)\n xOffset = np.random.randint(imshape[1]-tempCanvas.shape[1]+1)\n canvas[yOffset:yOffset+tempCanvas.shape[0],\n xOffset:xOffset+tempCanvas.shape[1]] = tempCanvas\n x[i%batch_size] = (255-canvas)\n if np.random.random() < noiseChance:\n x[i%batch_size] = noiseUtils.random_background(x[i%batch_size])\n if np.random.random() < lightingChance:\n x[i%batch_size] = noiseUtils.random_lighting(x[i%batch_size], gain_range=(0.5, 1.0))\n xLen[i%batch_size] = dataLen\n \n encoded_labels = charUtils.str2idx(s, split=split)\n y[i%batch_size] = np.pad(encoded_labels, [(0, max(0, maxLabelLen - encoded_labels.shape[0])), (0, 0)], 'constant')\n yLen[i%batch_size] = encoded_labels.shape[0]\n \n if (i+1)%batch_size == 0:\n inp = [x, y, xLen, yLen]\n outp = (loss + [charUtils.idx2auxout(y)]) if auxOutput else loss\n yield inp, outp\n if idxs.shape[0]%batch_size != 0:\n inp = [x, y, xLen, yLen]\n outp = (loss + [charUtils.idx2auxout(y)]) if auxOutput else loss\n yield [z[:idxs.shape[0]%batch_size] for z in inp], [z[:idxs.shape[0]%batch_size] for z in outp]", "title": "" }, { "docid": "0264aad17a7efb32640f63758781f2b6", "score": "0.5861028", "text": "def run(image_path, dataset_dir):\n\n if not tf.gfile.Exists(image_path):\n print(\"Image Path doesn't exist.\")\n \n if not tf.gfile.Exists(dataset_dir):\n tf.gfile.MakeDirs(dataset_dir)\n\n image_list = glob.glob(os.path.join(image_path, '*/*'))\n random.shuffle(image_list)\n total_cnt = len(image_list)\n test_cnt = int(total_cnt/4) if int(total_cnt/4) > 0 else 1\n train_cnt = total_cnt - test_cnt\n\n train_img_list = image_list[:train_cnt]\n test_img_list = image_list[train_cnt:]\n\n # Finally, write the labels file:\n class_name = []\n for label_item in glob.glob(os.path.join(image_path, '*')):\n class_name.append(label_item.split('/')[-1])\n\n labels_to_class_names = dict(zip(range(len(class_name)), class_name))\n\n class_names_to_labels = {}\n for i in labels_to_class_names.keys():\n class_names_to_labels[labels_to_class_names[i]] = i\n \n dataset_utils.write_label_file(labels_to_class_names, dataset_dir)\n\n\n training_filename = _get_output_filename(dataset_dir, 'train')\n testing_filename = _get_output_filename(dataset_dir, 'test')\n\n if tf.gfile.Exists(training_filename) and tf.gfile.Exists(testing_filename):\n print('Dataset files already exist. Exiting without re-creating them.')\n return\n\n # First, process the training data:\n with tf.python_io.TFRecordWriter(training_filename) as tfrecord_writer:\n offset = 0\n # for i in range(_NUM_TRAIN_FILES):\n # filename = os.path.join(dataset_dir,\n # 'captured_img',\n # 'data_batch_%d' % (i + 1)) # 1-indexed.\n offset = _add_to_tfrecord(train_img_list, class_names_to_labels, tfrecord_writer, offset)\n\n # Next, process the testing data:\n with tf.python_io.TFRecordWriter(testing_filename) as tfrecord_writer:\n # filename = os.path.join(dataset_dir,\n # 'captured_img',\n # 'test_batch')\n _add_to_tfrecord(test_img_list, class_names_to_labels, tfrecord_writer)\n\n# _clean_up_temporary_files(dataset_dir)\n print('\\nFinished converting the image dataset!')\n return train_cnt, test_cnt", "title": "" }, { "docid": "ca51bcd30ba2794c48cef2c491c2f168", "score": "0.5835604", "text": "def generator(z, txt, img_height, img_width, img_depth=3, s_dim=128, gf_dim=128, is_train=True, reuse=tf.AUTO_REUSE):\n H, W, D = img_height, img_width, img_depth\n w_init = tf.random_normal_initializer(stddev=0.02)\n gamma_init = tf.random_normal_initializer(mean=1.0, stddev=0.02)\n H2, H4, H8, H16 = int(H / 2), int(H / 4), int(H / 8), int(H / 16)\n W2, W4, W8, W16 = int(W / 2), int(W / 4), int(W / 8), int(W / 16)\n with tf.variable_scope('Generator', reuse=reuse):\n txt = Layer.dense(txt, s_dim, act=tf.nn.leaky_relu,\n W_init=w_init, name='txt/dense')\n code = tf.concat([z, txt], axis=1, name='code')\n h0 = Layer.dense(code, gf_dim * 8 * H16 * W16, act=tf.identity,\n W_init=w_init, b_init=None, name='h0/dense')\n h0 = tf.reshape(\n h0, shape=[-1, H16, W16, gf_dim * 8], name='h0/reshape')\n h0 = Layer.batch_norm(h0, act=tf.nn.relu, is_train=is_train,\n gamma_init=gamma_init, name='h0/batch_norm')\n # 1\n h1 = Layer.deconv2d(h0, act=tf.identity, filter_shape=[4, 4, gf_dim * 4, gf_dim * 8],\n output_shape=[tf.shape(h0)[0], H8, W8, gf_dim * 4], strides=[1, 2, 2, 1], padding='SAME',\n W_init=w_init, name='h1/deconv2d')\n h1 = Layer.batch_norm(h1, act=tf.nn.relu, is_train=is_train,\n gamma_init=gamma_init, name='h1/batch_norm')\n # 2\n h2 = Layer.deconv2d(h1, act=tf.identity, filter_shape=[4, 4, gf_dim * 2, gf_dim * 4],\n output_shape=[tf.shape(h1)[0], H4, W4, gf_dim * 2], strides=[1, 2, 2, 1], padding='SAME',\n W_init=w_init, name='h2/deconv2d')\n h2 = Layer.batch_norm(h2, act=tf.nn.relu, is_train=is_train,\n gamma_init=gamma_init, name='h2/batch_norm')\n # 3\n h3 = Layer.deconv2d(h2, act=tf.identity, filter_shape=[4, 4, gf_dim, gf_dim * 2],\n output_shape=[tf.shape(h1)[0], H2, W2, gf_dim], strides=[1, 2, 2, 1], padding='SAME',\n W_init=w_init, name='h3/deconv2d')\n h3 = Layer.batch_norm(h3, act=tf.nn.relu, is_train=is_train,\n gamma_init=gamma_init, name='h3/batch_norm')\n # output\n h4 = Layer.deconv2d(h3, act=tf.identity, filter_shape=[4, 4, D, gf_dim],\n output_shape=[tf.shape(h3)[0], H, W, D], strides=[1, 2, 2, 1], padding='SAME',\n W_init=w_init, name='h10/deconv2d')\n logits = h4\n outputs = tf.div(tf.nn.tanh(logits) + 1, 2)\n return outputs", "title": "" }, { "docid": "e27e6f7b5593055ad00aee4ae32332e8", "score": "0.58307236", "text": "def _read_txt_file(self):\n self.img_paths = []\n self.labels = []\n with open(self.txt_file, 'r') as f:\n lines = f.readlines()\n for line in lines:\n items = line.split(' ')\n self.img_paths.append(items[0])\n self.labels.append(int(items[1]))", "title": "" }, { "docid": "83151c8d8dc9ebed3849eb28248db67e", "score": "0.582847", "text": "def dataloder(path):\n \n ## Initialize an empty list to store all the images and their associated labels !!\n data = []\n \n ## Define classes !!\n image_classes = ['day', 'night']\n \n ## Iterate over each class folder !!\n for img_class in image_classes:\n \n ## Iterate over all the images to read them !!\n for img_path in glob.glob(os.path.join(path, img_class, '*')):\n \n ## Reading image !!\n image = mplimg.imread(img_path)\n \n ## Check if the image exists !!\n if image is not None:\n ## Store all the images and their associated labels inside a list in the form of tuples !!\n data.append((image, img_class))\n \n \n return data", "title": "" }, { "docid": "72b389e4b4df06ca565a9611f91a238c", "score": "0.5824619", "text": "def ProvideData(self, batch_size):\n label_bytes = 1\n label_offset = 0\n\n image_bytes = IMAGE_SIZE * IMAGE_SIZE * NUM_CHANNELS\n record_bytes = label_bytes + label_offset + image_bytes\n\n file_names = tf.gfile.Glob(self.data_files)\n file_queue = tf.train.string_input_producer(file_names, shuffle=True)\n # Read examples from files in the filename queue.\n reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)\n _, value = reader.read(file_queue)\n\n # Convert these examples to dense labels and processed images.\n record = tf.reshape(tf.decode_raw(value, tf.uint8), [record_bytes])\n label = tf.cast(\n tf.strided_slice(record, [label_offset], [label_offset + label_bytes]),\n tf.int32)\n # Convert from string to [depth * height * width] to [depth, height, width].\n depth_major = tf.reshape(\n tf.strided_slice(record, [label_bytes], [label_bytes + image_bytes]),\n [NUM_CHANNELS, IMAGE_SIZE, IMAGE_SIZE])\n # Convert from [depth, height, width] to [height, width, depth].\n image = tf.cast(tf.transpose(depth_major, [1, 2, 0]), tf.float32)\n\n if self.split_name == 'train':\n # Randomly crop a [height, width] section of the image.\n if FLAGS.random_crop:\n image = tf.random_crop(image, [FLAGS.crop_size, FLAGS.crop_size, 3])\n else:\n # Crop the central [FLAGS.crop_size, FLAGS.crop_size] of the image.\n image = tf.image.resize_image_with_crop_or_pad(\n image, FLAGS.crop_size, FLAGS.crop_size)\n\n if FLAGS.data_augmentation:\n # Randomly flip the image horizontally.\n image = tf.image.random_flip_left_right(image)\n\n # Randomize the pixel values.\n # Most images = 0 if random_brightness applied, so test before using.\n #image = tf.image.random_brightness(image, max_delta=63./255.)\n image = tf.image.random_saturation(image, lower=0.5, upper=1.5)\n image = tf.image.random_contrast(image, lower=0.2, upper=1.8)\n\n if FLAGS.per_image_whitening:\n image = tf.image.per_image_standardization(image)\n else:\n image = image / 255.0\n\n example_queue = tf.RandomShuffleQueue(\n capacity=16 * batch_size,\n min_after_dequeue=8 * batch_size,\n dtypes=[tf.float32, tf.int32],\n shapes=[[FLAGS.crop_size, FLAGS.crop_size, NUM_CHANNELS], [1]])\n num_threads = 16\n else:\n image = tf.image.resize_image_with_crop_or_pad(\n image, FLAGS.crop_size, FLAGS.crop_size)\n if FLAGS.per_image_whitening:\n image = tf.image.per_image_standardization(image)\n else:\n image = image / 255.0\n\n example_queue = tf.FIFOQueue(\n 3 * batch_size,\n dtypes=[tf.float32, tf.int32],\n shapes=[[FLAGS.crop_size, FLAGS.crop_size, NUM_CHANNELS], [1]])\n num_threads = 1\n\n example_enqueue_op = example_queue.enqueue([image, label])\n tf.train.add_queue_runner(tf.train.queue_runner.QueueRunner(\n example_queue, [example_enqueue_op] * num_threads))\n\n # Read 'batch' labels + images from the example queue.\n images, labels = example_queue.dequeue_many(batch_size)\n labels = tf.one_hot(tf.squeeze(labels), self.config.number_of_classes)\n\n assert len(images.get_shape()) == 4\n assert images.get_shape()[0] == batch_size\n assert images.get_shape()[-1] == NUM_CHANNELS\n assert len(labels.get_shape()) == 2\n assert labels.get_shape()[0] == batch_size\n assert labels.get_shape()[1] == self.config.number_of_classes\n\n return images, self.NormalizeData(images, 3), labels", "title": "" }, { "docid": "853d28df6e7f2af7bf06f92e2d318577", "score": "0.58063453", "text": "def _provide_custom_dataset(image_file_pattern, batch_size, shuffle=True, num_threads=1, img_size=256):\n filename_queue = tf.train.string_input_producer(\n tf.train.match_filenames_once(image_file_pattern), shuffle=shuffle, capacity=5 * batch_size)\n image_reader = tf.WholeFileReader()\n\n _, image_bytes = image_reader.read(filename_queue)\n image = tf.image.decode_image(image_bytes, channels=1)\n image_norm = normalize_image(image, (img_size, img_size))\n\n if shuffle:\n return tf.train.shuffle_batch([image_norm],\n batch_size=batch_size,\n num_threads=num_threads,\n capacity=5 * batch_size,\n min_after_dequeue=batch_size)\n else:\n return tf.train.batch(\n [image_norm],\n batch_size=batch_size,\n num_threads=1, # no threads so it's deterministic\n capacity=5 * batch_size)", "title": "" }, { "docid": "890240cfd43d3e948994f983b5349de9", "score": "0.580485", "text": "def test_gen(self, test_dir: str, batch_size: int):\n datagen = ImageDataGenerator(preprocessing_function=preprocess_input)\n files = [str(p.name) for p in (Path(test_dir) / 'test_data').glob('*.*') if p.suffix not in ['.gif', '.GIF']]\n metadata = pd.DataFrame({'filename': files})\n gen = datagen.flow_from_dataframe(metadata, directory=f'{test_dir}/test_data', x_col='filename',\n class_mode=None, shuffle=False, batch_size=batch_size)\n return gen, files", "title": "" }, { "docid": "1e54c225cb4d4fe8889399285236f4f8", "score": "0.5800393", "text": "def load_inference_data(path, is_2D= False):\n\n dataset = np.loadtxt(path, delimiter=',')\n\n if is_2D:\n dataset_2d = []\n for series in dataset[:, 1:]:\n # readings are in format: (x1,y1,z1, x2,y2,z2, x3,y3,z3 ...)\n reshaped_series = series.reshape((-1,3)).T\n reshaped_series = np.expand_dims(reshaped_series, axis=-1)\n dataset_2d.append(reshaped_series)\n\n dataset_2d = np.array(dataset_2d)\n return utils.DataSetGenerator(dataset_2d, dataset[:, 0].astype(np.int32)), dataset_2d\n\n else:\n return utils.DataSetGenerator(dataset[:, 1:], dataset[:, 0].astype(np.int32)), dataset", "title": "" }, { "docid": "5a275ceb4725e9e19d53327c54577be6", "score": "0.5773162", "text": "def __init__(self, txt_path='filelist.txt', img_dir='data', transform=None, test=False):\n\n df = pd.read_csv(txt_path, sep=' ', index_col=0)\n self.img_names = df.index.values\n self.txt_path = txt_path\n self.img_dir = img_dir\n self.transform = transform\n self.to_tensor = ToTensor()\n self.to_pil = ToPILImage()\n self.get_image_selector = True if img_dir.__contains__('tar') else False\n self.tf = tarfile.open(self.img_dir) if self.get_image_selector else None\n self.transform_gt = transform if test else Compose(self.transform.transforms[:-1]) # omit noise of ground truth", "title": "" }, { "docid": "5a275ceb4725e9e19d53327c54577be6", "score": "0.5773162", "text": "def __init__(self, txt_path='filelist.txt', img_dir='data', transform=None, test=False):\n\n df = pd.read_csv(txt_path, sep=' ', index_col=0)\n self.img_names = df.index.values\n self.txt_path = txt_path\n self.img_dir = img_dir\n self.transform = transform\n self.to_tensor = ToTensor()\n self.to_pil = ToPILImage()\n self.get_image_selector = True if img_dir.__contains__('tar') else False\n self.tf = tarfile.open(self.img_dir) if self.get_image_selector else None\n self.transform_gt = transform if test else Compose(self.transform.transforms[:-1]) # omit noise of ground truth", "title": "" }, { "docid": "e7b1289be7a64a49e52e0f7d5447a6fc", "score": "0.5761168", "text": "def make_training_dataset(self, images):\n pass", "title": "" }, { "docid": "45931923fdada12658e23bfdfe3fe17b", "score": "0.57561195", "text": "def create_training_dataset(self):\n logging.info(\"Creating training dataset from saved images.\")\n src = (SegmentationItemList.from_folder(self.data_dir)\n .split_by_rand_pct()\n .label_from_func(self.get_label_name, classes=self.codes))\n self.data = (src.transform(get_transforms(), size=self.image_size, tfm_y=True)\n .databunch(bs=self.batch_size)\n .normalize(imagenet_stats))", "title": "" }, { "docid": "803c7652f94fc619b19a062e1ed84c2e", "score": "0.57492286", "text": "def _make_train_generator(params: Params, images_reader: ImagesReader, masks_reader: MasksReader):\n print(\"Loading data\")\n X_train, Y_train = make_train_df(params)\n xtr, xval, ytr, yval = train_test_split(X_train, Y_train, test_size=params.validation_size)\n train_generator, val_generator = generator(xtr, xval, ytr, yval, params.batch_size, images_reader, masks_reader)\n print(\"Data loaded\")\n return train_generator, val_generator", "title": "" }, { "docid": "b108a93c34b7d65f7c9ef8a0ec4b6f4a", "score": "0.57484466", "text": "def make_data_gen(dataset_name, num_samples, input_shape):\n if dataset_name == \"fake\":\n return fake_data_gen(num_samples, input_shape)\n\n datadir = pathlib.Path('data/raw') / dataset_name\n imgdir = datadir / 'Images'\n \n def representative_dataset_gen():\n for i, filename in enumerate(imgdir.iterdir()):\n if filename.suffix not in ['.jpeg', '.jpg', '.png']: \n continue\n image = Image.open(str(filename.resolve()))\n image = image.resize((input_shape.height, input_shape.width))\n yield [np.array(image).reshape(-1, input_shape.height, input_shape.width, input_shape.channels).astype(np.float32)]\n if i >= num_samples: break\n return representative_dataset_gen", "title": "" }, { "docid": "41f2ccd7a5a623718ccba8ec17ce197f", "score": "0.5748031", "text": "def gen_batch_function(data_folder, image_shape):\n middle_shape = (int(image_shape[0]/2), int(image_shape[1]/2))\n low_shape = (int(image_shape[0]/4), int(image_shape[1]/4))\n gt2_shape = (int(image_shape[0]/8), int(image_shape[1]/8))\n gt1_shape = (int(image_shape[0]/16), int(image_shape[1]/16))\n\n def get_batches_fn(batch_size):\n \"\"\"\n Create batches of training data\n :param batch_size: Batch Size\n :return: Batches of training data\n \"\"\"\n image_paths = glob(os.path.join(data_folder, 'image_2', '*.png'))\n label_paths = {\n re.sub(r'_(lane|road)_', '_', os.path.basename(path)): path\n for path in glob(os.path.join(data_folder, 'gt_image_2', '*_road_*.png'))}\n\n random.shuffle(image_paths)\n for batch_i in range(0, len(image_paths), batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i+batch_size]:\n #reading images\n gt_image_file = label_paths[os.path.basename(image_file)]\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n gt= scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)\n gt_image = gt_process(gt)\n\n #add to batch list\n images.append(image)\n gt_images.append(gt_image)\n\n yield np.array(images), np.array(gt_images)\n return get_batches_fn", "title": "" }, { "docid": "d509b82a7b980a6d617f0ae1ae055919", "score": "0.57284755", "text": "def load_dataset(self):\n _dataset = []\n\n with open(self.dataset_path, \"r\") as fd:\n txt = fd.readlines()\n if self.dataset_type == \"converted_coco\":\n for line in txt:\n # line: \"<image_path> class_id,x,y,w,h ...\"\n bboxes = line.strip().split()\n image_path = bboxes[0]\n if self.image_path_prefix:\n image_path = path.join(\n self.image_path_prefix, image_path\n )\n xywhc_s = np.zeros((len(bboxes) - 1, 5))\n for i, bbox in enumerate(bboxes[1:]):\n # bbox = class_id,x,y,w,h\n bbox = list(map(float, bbox.split(\",\")))\n xywhc_s[i, :] = (\n *bbox[1:],\n bbox[0],\n )\n _dataset.append([image_path, xywhc_s])\n\n elif self.dataset_type == \"yolo\":\n for line in txt:\n # line: \"<image_path>\"\n image_path = line.strip()\n if self.image_path_prefix:\n image_path = path.join(\n self.image_path_prefix, image_path\n )\n root, _ = path.splitext(image_path)\n with open(root + \".txt\") as fd2:\n bboxes = fd2.readlines()\n xywhc_s = np.zeros((len(bboxes), 5))\n for i, bbox in enumerate(bboxes):\n # bbox = class_id x y w h\n bbox = bbox.strip()\n bbox = list(map(float, bbox.split(\" \")))\n xywhc_s[i, :] = (\n *bbox[1:],\n bbox[0],\n )\n _dataset.append([image_path, xywhc_s])\n\n if len(_dataset) == 0:\n raise FileNotFoundError(\"Failed to find images\")\n\n return _dataset", "title": "" }, { "docid": "2755f8e4d4dd16d77506aea31124578e", "score": "0.57261395", "text": "def __init__(\n self,\n meta_data_path: str,\n tokenizer_path: str,\n batch_size: int = 32,\n ):\n self.meta_df = pd.read_json(\n meta_data_path, orient=\"records\", lines=True\n )\n self.meta_df = self.meta_df.dropna(\n subset=[\"file_path_64\", \"file_path_300\"]\n )\n self.meta_df = self.meta_df.dropna(subset=[\"artist_genre\"])\n self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)\n self.label_binarizer = MultiLabelBinarizer()\n self.label_binarizer.fit(self.meta_df[\"artist_genre\"].to_list())\n train_set = self.meta_df.sample(frac=0.7)\n data = self.meta_df[~self.meta_df.index.isin(train_set.index)]\n self.val_set = data.sample(frac=2 / 3)\n self.test_set = data[~data.index.isin(self.val_set.index)]\n self.train_generator = GenreTrainGenerator(\n data=train_set,\n batch_size=batch_size,\n tokenizer=self.tokenizer,\n binarizer=self.label_binarizer,\n )\n self.val_generator = GenreDatasetGenerator(\n data=self.val_set,\n batch_size=256,\n tokenizer=self.tokenizer,\n binarizer=self.label_binarizer,\n )\n self.test_generator = GenreDatasetGenerator(\n data=self.test_set,\n batch_size=256,\n tokenizer=self.tokenizer,\n binarizer=self.label_binarizer,\n )", "title": "" }, { "docid": "433d94fdb44e1a26367acd998aa741ad", "score": "0.57244885", "text": "def __init__(self, in_dir, exts='.jpg'):\n\n # Extend the input directory to the full path.\n in_dir = os.path.abspath(in_dir)\n\n # Input directory.\n self.in_dir = in_dir\n\n # Convert all file-extensions to lower-case.\n self.exts = tuple(ext.lower() for ext in exts)\n\n # Names for the classes.\n self.class_names = []\n\n # Filenames for all the files in the training-set.\n self.filenames = []\n\n # Filenames for all the files in the test-set.\n self.filenames_test = []\n\n # Class-number for each file in the training-set.\n self.class_numbers = []\n\n # Class-number for each file in the test-set.\n self.class_numbers_test = []\n\n # Total number of classes in the data-set.\n self.num_classes = 0\n\n # For all files/dirs in the input directory.\n for name in os.listdir(in_dir):\n # Full path for the file / dir.\n current_dir = os.path.join(in_dir, name)\n\n # If it is a directory.\n if os.path.isdir(current_dir):\n # Add the dir-name to the list of class-names.\n self.class_names.append(name)\n\n # Training-set.\n\n # Get all the valid filenames in the dir (not sub-dirs).\n filenames = self._get_filenames(current_dir)\n\n # Append them to the list of all filenames for the training-set.\n self.filenames.extend(filenames)\n\n # The class-number for this class.\n class_number = self.num_classes\n\n # Create an array of class-numbers.\n class_numbers = [class_number] * len(filenames)\n\n # Append them to the list of all class-numbers for the training-set.\n self.class_numbers.extend(class_numbers)\n\n # Test-set.\n\n # Get all the valid filenames in the sub-dir named 'test'.\n filenames_test = self._get_filenames(os.path.join(current_dir, 'test'))\n\n # Append them to the list of all filenames for the test-set.\n self.filenames_test.extend(filenames_test)\n\n # Create an array of class-numbers.\n class_numbers = [class_number] * len(filenames_test)\n\n # Append them to the list of all class-numbers for the test-set.\n self.class_numbers_test.extend(class_numbers)\n\n # Increase the total number of classes in the data-set.\n self.num_classes += 1", "title": "" }, { "docid": "3d3db7f9d4768644a00183cd44e569ee", "score": "0.57202256", "text": "def read_file(self, file_path):\n\n def read_file(file, data):\n def skip():\n pass\n\n class_ids_and_counters = []\n\n while True:\n line = file.readline()\n if not line:\n break\n\n if line.startswith(\"GRT_LABELLED_TIME_SERIES_CLASSIFICATION_DATA_FILE_V1.0\\n\"):\n skip()\n if line.startswith(\"DatasetName\"):\n data.name = line[line.index(':') + 2:-1]\n if line.startswith(\"InfoText\"):\n data.info = line[line.index(':') + 2:-1]\n if line.startswith(\"TotalNumTrainingExamples\"):\n skip()\n if line.startswith(\"NumberOfClasses\"):\n num_classes_expected = int(line[line.index(':') + 2:-1])\n if line.startswith(\"ClassIDsAndCounters\"):\n for i in range(num_classes_expected):\n line = file.readline()\n class_ids_and_counters.append(int(line.split(\"\\t\")[1]))\n data.add_gesture(\"Gesture\", uuid.uuid4())\n if line.startswith(\"ClassIDsAndNames\"):\n for i in range(num_classes_expected):\n line = file.readline()\n data.gestures[i].name = line.split(\"\\t\")[1]\n if line.startswith(\"ClassIDsAndDescriptions\"):\n for i in range(num_classes_expected):\n line = file.readline()\n data.gestures[i].description = line.split(\"\\t\")[1]\n if line.startswith(\"LabelledTimeSeriesTrainingData\"):\n for class_id in range(num_classes_expected):\n for sample in range(class_ids_and_counters[class_id]):\n file.readline() # **** TIME SERIES ****\n file.readline() # ClassID: ...\n line = file.readline()\n if line.startswith(\"SampleName\"):\n data.add_sample(line[line.index(':') + 2:-1], uuid.uuid4(),\n data.gestures[class_id])\n line = file.readline()\n else:\n data.add_sample(\"Sample\", uuid.uuid4(),\n data.gestures[class_id])\n time_series_length = int(line[line.index(':') + 2:-1])\n file.readline() # TimeSeriesData:\n for time_serie in range(time_series_length):\n line = file.readline()\n nums = line.split()\n data.add_time_state(uuid.uuid4(),\n data.get_selected_sample(),\n float(nums[0]) / 100.0,\n (float(nums[1]) / 100.0, float(nums[2]) / 100.0, float(nums[3]) / 100.0),\n (float(nums[4]) / 100.0, float(nums[5]) / 100.0, float(nums[6]) / 100.0))\n\n file = open(file_path)\n data = Data.Data(self.service_locator)\n read_file(file, data)\n file.close()\n return data", "title": "" }, { "docid": "2ed0f5b5a97c9c09ea638400fa7810c0", "score": "0.5701203", "text": "def __init__(self, images, cls, set_ids):\n\n self._num_images = images.shape[0]\n\n # Convert shape from [num examples, rows, columns, depth]\n # to [num examples, rows*columns] (assuming depth == 1)\n # Convert from [0, 255] -> [0.0, 1.0].\n\n # images = images.astype(np.uint8)\n # images = np.multiply(images, 1.0 / 255.0)\n\n self._images = images\n self._cls = cls\n self._set_ids = set_ids\n\n # Set the labels based on cls. \n labels = np.zeros((self._num_images, 10))\n for i, cls_ in enumerate(cls):\n \tlabels[i][cls_] = 1\n self._labels = labels\n\n self._epochs_completed = 0\n self._index_in_epoch = 0", "title": "" }, { "docid": "27e340fe671dae2883f690788af9f1ce", "score": "0.5684839", "text": "def load_data():\n # download the dataset and extract it\n \n data_dir = pathlib.Path(\"C:/Users/eugsa/Tensorflow-GPU/freiburg_groceries_dataset/images\")\n # count how many images are there\n image_count = len(list(data_dir.glob('*/*.png')))\n print(\"Number of images:\", image_count)\n \n CLASS_NAMES = np.array(sorted([item.name for item in data_dir.glob('*') if item.name != \"LICENSE.txt\"]))\n # 20% validation set 80% training set\n image_generator = ImageDataGenerator(rescale=1/255, \n validation_split=0.1)\n # make the training dataset generator\n train_data_gen = image_generator.flow_from_directory(directory=str(data_dir), batch_size=batch_size,\n classes=list(CLASS_NAMES), target_size=(IMAGE_SHAPE[0], IMAGE_SHAPE[1]),\n shuffle=True, subset=\"training\")\n # make the validation dataset generator\n test_data_gen = image_generator.flow_from_directory(directory=str(data_dir), batch_size=batch_size, \n classes=list(CLASS_NAMES), target_size=(IMAGE_SHAPE[0], IMAGE_SHAPE[1]),\n shuffle=True, subset=\"validation\")\n return train_data_gen, test_data_gen, CLASS_NAMES", "title": "" }, { "docid": "e1c47157daa4b8a9b7d2f6b4aaaed3aa", "score": "0.56772566", "text": "def read_training_image_labels(path, classes=range(0, 43)):\n images = []\n labels = []\n # iterate over classes (dirs)\n for dirname in os.listdir(path):\n class_id = int(dirname)\n if class_id not in classes:\n continue\n # iterate over images (files)\n for filename in os.listdir(path + dirname):\n if filename.endswith('.csv'):\n continue\n img_id = filename.partition('.')[0]\n images.append(img_id)\n labels.append(class_id)\n return images, labels", "title": "" }, { "docid": "1111e62e326c6c89fcd48db595f05cdb", "score": "0.567219", "text": "def create_dataset(self, path, size):\n\t\tif not os.path.exists(path):\n\t\t\tos.makedirs(path)\n\n\t\tpath_bbtxt = os.path.join(path, 'annotations.bbtxt')\n\t\twith open(path_bbtxt, 'w') as outfile:\n\t\t\t# Generate the images\n\t\t\tfor i in range(size):\n\t\t\t\timage, labels = self._generate_image(path)\n\n\t\t\t\t# Write image\n\t\t\t\tcv2.imwrite(labels['path'], image)\n\n\t\t\t\t# Write out labels\n\t\t\t\tfor bb in labels['bbs']:\n\t\t\t\t\toutfile.write(labels['path'] + ' 1 1 ' + str(bb['x_min']) + ' ' + str(bb['y_min']) \n\t\t\t\t\t\t+ ' ' + str(bb['x_max']) + ' ' + str(bb['y_max']) + '\\n')", "title": "" }, { "docid": "f35538ba5e00a4dd51007d45b5f3114e", "score": "0.5666955", "text": "def prepare(self):\n with open(self.root / 'annotations' / 'trainval.txt', 'r') as datadesc:\n for line in datadesc:\n if line.startswith('#'):\n continue\n fields = line.split(' ')\n self.dataX.append(\n str(self.root / 'images' / (fields[0] + '.jpg'))\n )\n self.dataY.append(int(fields[1]) - 1)\n clsname = fields[0].rsplit('_', 1)[0]\n if not self.dataY[-1] in self.classnames:\n self.classnames[self.dataY[-1]] = clsname\n assert self.classnames[self.dataY[-1]] == clsname\n self.numclasses = len(self.classnames)\n with open(self.root / 'annotations' / 'test.txt', 'r') as datadesc:\n for line in datadesc:\n if line.startswith('#'):\n continue\n fields = line.split(' ')\n self.testX.append(\n str(self.root / 'images' / (fields[0] + '.jpg'))\n )\n self.testY.append(int(fields[1]) - 1)\n self.reset_metrics()\n self.mean, self.std = self.get_input_mean_std()\n self.onehotvectors = np.eye(self.numclasses)", "title": "" }, { "docid": "7fbf71bdd961b317f5781cd0c387c37b", "score": "0.5660028", "text": "def batch_generator(data_dir, image_paths, shutter_speeds, is_os, batch_size, is_training):\n# global image\n images = np.empty([batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS])\n shss = np.empty(batch_size)\n isos = np.empty(batch_size)\n while True:\n i = 0\n for index in np.random.permutation(image_paths.shape[0]):\n shutter_speed = shutter_speeds[index]\n is_o = is_os[index]\n scene = image_paths[index]\n if is_training:\n image = load_image(data_dir, scene)\n images[i] = preprocess(image)\n shss[i] = shutter_speed\n isos[i] = is_o\n i += 1\n if i == batch_size:\n break\n labels = [np.array(shss), np.array(isos)]\n yield images, labels", "title": "" }, { "docid": "66da4a9685d0a4e384bdd85fa62ea9ac", "score": "0.5657103", "text": "def __init__(self, input_file_pattern, is_training):\n\n self.reader = tf.TFRecordReader()\n self.input_file_pattern = input_file_pattern\n self.batch_size = 32\n self.values_per_input_shard = 16\n self.input_queue_capacity_factor = 16\n self.num_input_reader_threads = 4\n self.num_preprocess_threads = 4\n\n self.video_id_feature = \"image/video_id\"\n self.image_id_feature = \"image/image_id\"\n self.xs_feature = \"image/xs\"\n self.ys_feature = \"image/ys\"\n self.image_feature = \"image/image\"\n self.shape_feature = \"image/shape\"", "title": "" }, { "docid": "e0b8791f6a311983ebb66ad9a4a0d0b0", "score": "0.5656039", "text": "def train_dataloader():\n\n image_list = os.listdir(IMG_DIR)\n train_dataset = ImageTrainData(IMG_DIR, image_list, SPLIT)\n train_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)\n\n return train_dataloader", "title": "" }, { "docid": "96afe8320664aa27b7e9b1aa1f6fdee4", "score": "0.56540585", "text": "def make_dataset(self, dir, paths, class_to_idx):\n\n images = []\n\n for path in paths:\n split = path.split('/')\n product_id = split[0]\n item = (os.path.join(dir, path), class_to_idx[product_id])\n images.append(item)\n\n return images", "title": "" }, { "docid": "33c6d10513dfc44ad4cd3f62c5bfaa1e", "score": "0.5651258", "text": "def data_generator(dataset, config, shuffle=True, augment=False, augmentation=None,\n random_rois=0, batch_size=1, detection_targets=False):\n b = 0 # batch item index\n image_index = -1\n image_ids = np.copy(dataset.image_ids)\n error_count = 0\n\n # Keras requires a generator to run indefinately.\n while True:\n try:\n # Increment index to pick next image. Shuffle if at the start of an epoch.\n image_index = (image_index + 1) % len(image_ids)\n if shuffle and image_index == 0:\n np.random.shuffle(image_ids)\n\n # Get GT bounding boxes and masks for image.\n image_id = image_ids[image_index]\n image, gt_masks = \\\n load_image_gt(dataset, config, image_id, augment=augment,\n augmentation=augmentation,\n use_mini_mask=config.USE_MINI_MASK)\n\n # Init batch arrays\n if b == 0:\n batch_images = np.zeros(\n (batch_size,) + image.shape, dtype=np.float32)\n batch_gt_masks = np.zeros(\n (batch_size, gt_masks.shape[0], gt_masks.shape[1]), dtype=gt_masks.dtype)\n\n # Add to batch\n batch_images[b] = mold_image(image.astype(np.float32), config)\n batch_gt_masks[b] = gt_masks\n\n b += 1\n\n # Batch full?\n if b >= batch_size:\n inputs = [batch_images, batch_gt_masks]\n outputs = []\n\n yield inputs, outputs\n\n # start a new batch\n b = 0\n except (GeneratorExit, KeyboardInterrupt):\n raise\n except:\n # Log it and skip the image\n logging.exception(\"Error processing image {}\".format(\n dataset.image_info[image_id]))\n error_count += 1\n if error_count > 5:\n raise", "title": "" }, { "docid": "cb75ae0b7263a062a011ccb33a8c06f4", "score": "0.56490004", "text": "def read_dataset(data_dir):\n\n train_dataset = list(tfds.as_numpy(tfds.load('cifar100', split='train')))\n test_dataset = list(tfds.as_numpy(tfds.load('cifar100', split='test')))\n dataset = train_dataset + test_dataset\n\n class_images = [[] for _ in range(100)]\n\n for obj in dataset:\n class_images[int(obj['label'])].append(obj['image'])\n\n for cur_images in class_images:\n yield Character(np.asarray(cur_images))", "title": "" }, { "docid": "b6c0f0cd7943e1950774f665d0e2a655", "score": "0.56484634", "text": "def __init__(self, directory, trainset_size=3000):\r\n file = \"ressources/dataset/CATH_info.txt\"\r\n self.seq = []\r\n #cath = open(directory + \"CATH_info.txt\", \"r\")\r\n cath = open(file, \"r\")\r\n for line in cath:\r\n #self.parse(directory + \"dssp/\" + line.split()[0])\r\n self.parse(directory + '/' + line.split()[0])\r\n\r\n self.train = self.seq[:trainset_size]\r\n self.test = self.seq[trainset_size:]\r\n cath.close()", "title": "" }, { "docid": "f33dacef366ed5d1997245cf8048e924", "score": "0.5647407", "text": "def generate_batch_(self, type='train'):\n\n # Convert lists of paths to tensors for tensorflow\n if type == 'train':\n images = tf.convert_to_tensor(self.image_list, dtype=tf.string)\n labels = tf.convert_to_tensor(self.label_list, dtype=tf.float32)\n bbox = tf.convert_to_tensor(self.bbox_list, dtype=tf.int32)\n \n data = tf.data.Dataset.from_tensor_slices((images, labels, bbox))\n data = data.shuffle(buffer_size=self.train_data_len) \n \n else:\n images = tf.convert_to_tensor(self.test_image_list, dtype=tf.string)\n labels = tf.convert_to_tensor(self.test_label_list, dtype=tf.float32)\n bbox = tf.convert_to_tensor(self.test_bbox_list, dtype=tf.int32)\n\n data = tf.data.Dataset.from_tensor_slices((images, labels, bbox))\n data = data.shuffle(buffer_size=self.test_data_len)\n\n\n # Parse images and label\n data = data.map(self._parse_data,\n num_parallel_calls=self.num_threads).prefetch(self.num_prefetch)\n\n # If augmentation is to be applied\n '''if 'flip_lr' in self.augment:\n #print 'flip_lr'\n data = data.map(self._flip_left_right,\n num_parallel_calls=self.num_threads).prefetch(self.num_prefetch)'''\n if 'contrast' in self.augment:\n #print 'contrast'\n data = data.map(self._corrupt_brightness,\n num_parallel_calls=self.num_threads).prefetch(self.num_prefetch)\n if 'saturation' in self.augment:\n #print 'saturation'\n data = data.map(self._corrupt_saturation,\n num_parallel_calls=self.num_threads).prefetch(self.num_prefetch)\n if 'brightness' in self.augment:\n #print 'brightness'\n data = data.map(self._corrupt_brightness,\n num_parallel_calls=self.num_threads).prefetch(self.num_prefetch)\n if 'rotate' in self.augment:\n #print 'rotate'\n data = data.map(self._rotate,\n num_parallel_calls=self.num_threads).prefetch(self.num_prefetch)\n \n # Batch, epoch, shuffle the data\n data = data.batch(self.batch_size, drop_remainder=True)\n data = data.repeat(self.epoch)\n\n # Create iterator\n iterator = data.make_one_shot_iterator()\n\n # Next element Op\n next_element = iterator.get_next()\n #init_op = iterator.make_initializer(data)\n return next_element", "title": "" }, { "docid": "095b796759ec6eb132308922d436e9a1", "score": "0.56399953", "text": "def load_training_data(self):\n # Number of files for the training-set.\n _num_files_train = 5\n\n # Number of images for each batch-file in the training-set.\n _images_per_file = 10000\n\n # Total number of images in the training-set.\n # This is used to pre-allocate arrays for efficiency.\n _num_images_train = _num_files_train * _images_per_file\n \n \n # Pre-allocate the arrays for the images and class-numbers for efficiency.\n images = np.zeros(shape=[_num_images_train, self.img_size, self.img_size, self.num_channels], dtype=float)\n cls = np.zeros(shape=[_num_images_train], dtype=int)\n\n # Begin-index for the current batch.\n begin = 0\n\n # For each data-file.\n for i in range(_num_files_train):\n # Load the images and class-numbers from the data-file.\n images_batch, cls_batch = self._load_data(filename=\"data_batch_\" + str(i + 1))\n\n # Number of images in this batch.\n num_images = len(images_batch)\n\n # End-index for the current batch.\n end = begin + num_images\n\n # Store the images into the array.\n images[begin:end, :] = images_batch\n\n # Store the class-numbers into the array.\n cls[begin:end] = cls_batch\n\n # The begin-index for the next batch is the current end-index.\n begin = end\n\n return images, self._one_hot_encoded(class_numbers=cls, num_classes=self.num_classes)", "title": "" }, { "docid": "23d64a0fc33dc3422988a65516f70161", "score": "0.5633206", "text": "def create_dataset_cfg(data_name, dataset_type, class_num, ratio):\n if dataset_type == 'yolo':\n if ratio is None:\n raise TypeError(\"Please indicate ratio arg\")\n img_dir = data_name + \"/images/*.jpg\"\n #label_dir = data_name + \"/labels/*\"\n images = sorted(glob.glob(img_dir))\n random.shuffle(images)\n \n #labels = sorted(glob.glob(label_dir))\n\n train_len = int(len(images)*ratio)\n with open(data_name + '/config/' + '/train.txt', 'w') as f:\n for text in images[:train_len]:\n f.write('data/' + text + \"\\n\")\n\n with open(data_name + '/config/' + '/valid.txt', 'w') as f:\n for text in images[train_len:]:\n f.write('data/' + text + \"\\n\")\n\n with open(data_name + '/config' + f'/{data_name}.data', 'w') as f:\n text = f'class = {class_num} \\ntrain = data/{data_name}/config/train.txt \\nvalid = data/{data_name}/config/valid.txt \\nnames = data/{data_name}/config/classes.txt \\nbackup = data/{data_name}/config/backup'\n f.write(text)\n \n else:\n if ratio is None:\n raise TypeError(\"Please indicate ratio arg\")\n img_dir = data_name + \"/JPEGImages/*.jpg\"\n #label_dir = data_name + \"/labels/*\"\n images = sorted(glob.glob(img_dir))\n random.shuffle(images)\n \n #labels = sorted(glob.glob(label_dir))\n\n train_len = int(len(images)*ratio)\n with open(data_name + '/ImageSets/Main/' + '/trainval.txt', 'w') as f:\n for text in images[:train_len]:\n text = text.split('/')[-1]\n text = text.split('.')[0]\n f.write(text + \"\\n\")\n\n with open(data_name + '/ImageSets/Main/' + '/test.txt', 'w') as f:\n for text in images[train_len:]:\n text = text.split('/')[-1]\n text = text.split('.')[0]\n f.write('data/' + text + \"\\n\")", "title": "" }, { "docid": "e43c17f7bec64b65204b06b93b68c1f4", "score": "0.56272846", "text": "def make_batch(self):\n dataset = tf.data.TFRecordDataset(self.get_filenames())\n dataset = dataset.map(self.parser, num_parallel_calls=self.batch_size)\n\n min_queue_examples = int(\n Cifar10DataSet.num_examples_per_epoch(True) * 0.4)\n # Ensure that the capacity is sufficiently large to provide good random shuffling.\n dataset = dataset.shuffle(buffer_size=min_queue_examples + 3 * self.batch_size)\n dataset = dataset.prefetch(buffer_size=2 * self.batch_size)\n\n # Batch it up.\n dataset = dataset.batch(self.batch_size)\n dataset = dataset.repeat()\n self.iterator = dataset.make_initializable_iterator()\n self.image_batch, self.label_batch = self.iterator.get_next()", "title": "" }, { "docid": "d8fdfc238ad0c8ffa826cdeec5d3b63a", "score": "0.5625257", "text": "def __init__(self, DATASET_PATH, BATCH_SIZE=32):\n\n self.batch_size = BATCH_SIZE\n self.load_data(DATASET_PATH)\n self.create_data_batches()", "title": "" }, { "docid": "b550ff003a8b39f9c42b0a949b35fe9b", "score": "0.5601537", "text": "def generate_dataset(batch_size, dir_name=\"inaturalist_12K\"):\n imagegen_without_aug = ImageDataGenerator(\n rescale=1./255,\n validation_split=0.1\n )\n test_datagen = ImageDataGenerator(rescale=1./255)\n\n imagegen = imagegen_without_aug\n\n train_ds = imagegen.flow_from_directory(\n dir_name+\"/train/\",\n subset=\"training\",\n seed=1337,\n class_mode=\"categorical\",\n batch_size=batch_size,\n target_size=MODEL_INPUT[:-1]\n )\n\n val_ds = imagegen.flow_from_directory(\n dir_name+\"/train/\",\n subset=\"validation\",\n seed=1337,\n class_mode=\"categorical\",\n batch_size=batch_size,\n target_size=MODEL_INPUT[:-1]\n\n )\n\n test_ds = test_datagen.flow_from_directory(\n dir_name+\"/val/\",\n seed=1337,\n batch_size=batch_size,\n target_size=MODEL_INPUT[:-1]\n\n )\n\n return train_ds, val_ds, test_ds", "title": "" }, { "docid": "84d8329c337922c9ed6ac97a61c1a228", "score": "0.55932796", "text": "def build_generator(self):\n\n self.train_generator = \\\n DataGenerator(args=self.args,\n dictionary=self.dictionary,\n n_classes=self.n_classes,\n feature_shapes=self.feature_shapes,\n n_anchors=self.n_anchors,\n shuffle=True)", "title": "" }, { "docid": "24a6a5be858e99f4a440cce08268c5e3", "score": "0.5593011", "text": "def _define_train_test_fold_orig(dataset_path, file_name=\"dataset_specs.txt\"):\n n_groups = 10\n groups = list(range(1, n_groups + 1))\n \n n_images_per_group = 12\n # images = list(range(1, n_images_per_group + 1))\n\n d = dict()\n d[\"n_groups\"] = n_groups\n d[\"n_images_per_group\"] = n_images_per_group\n d[\"train_groups\"] = groups[0:8] # 1...8\n d[\"test_groups\"] = groups[8:n_groups] # 9...10\n \n # provide other info such as image_shape\n # TODO: make sure to check that all images / groups are used\n \n # write to txt\n with open(path.join(dataset_path, file_name), 'x') as file:\n file.write(json.dumps(d))", "title": "" }, { "docid": "f3648618d7eaab1d04c108b7d68163c4", "score": "0.5592695", "text": "def train_dataset() -> Dataset:\n source = (\n Path(__file__).parent.parent\n / \"resources\"\n / \"data\"\n / \"emotions_with_transformers.txt\"\n )\n\n train_dataset = Dataset.from_csv(\n paths=str(source), delimiter=\";\", column_names=[\"text\", \"label\"]\n )\n return train_dataset", "title": "" }, { "docid": "e43ec09777543ca1de559d101ecc40d8", "score": "0.55710644", "text": "def data_generator(dataset,config,shuffle=True, augment=False):\n b = 0 # batch item index\n image_index = -1\n \n image_ids = np.copy(dataset.image_ids)\n \n error_count = 0\n # Keras requires a generator to run indefinately.\n while True:\n try:\n # Increment index to pick next image. Shuffle if at the start of an epoch.\n image_index = (image_index + 1) % len(image_ids)\n if shuffle and image_index == 0:\n np.random.shuffle(image_ids)\n image_id = image_ids[image_index]\n #image_meta:image_id,image_shape,windows.active_class_ids\n image,label=load_image_label(dataset,image_id)\n # Init batch arrays\n if b == 0:\n batch_images = np.zeros((config.BATCH_SIZE ,)+ image.shape, dtype=np.float32)\n batch_label = np.zeros((config.BATCH_SIZE),dtype=np.float32)\n batch_images[b] = image\n batch_label[b] = label\n b += 1\n # Batch full?\n # input_image,input_labels\n if b >= config.BATCH_SIZE:\n batch_label=np.reshape(batch_label,[config.BATCH_SIZE,1])\n inputs = (batch_images,batch_label)\n yield inputs\n # start a new batch\n b = 0\n except (GeneratorExit, KeyboardInterrupt):\n raise\n except:\n # Log it and skip the image\n logging.exception(\"Error processing image {}\".format(dataset._images_pathes[image_id]))\n error_count += 1\n if error_count > 5:\n raise", "title": "" }, { "docid": "2c63386100b79f30d8d20d10a1fe72c5", "score": "0.55688435", "text": "def _parse_data_classification(image_paths, labels, target_size, class_len):\n image_content = tf.read_file(image_paths)\n\n images = tf.image.decode_jpeg(image_content, channels=3)\n images = tf.image.resize_images(images, target_size)\n labels = tf.one_hot(labels, class_len)\n # print (images)\n\n return images, labels", "title": "" }, { "docid": "63407a9edeb70032b910491a7e45f912", "score": "0.55673033", "text": "def _single_class_reader(directory):\n fnames = [os.path.join(directory, fname) for fname in os.listdir(directory)\n if re.search(r'jpe?g', fname)]\n tf.logging.info('class %s, %d files',\n directory, len(fnames))\n fnames = tf.constant(fnames)\n fname_queue = tf.train.string_input_producer(fnames)\n reader = tf.WholeFileReader()\n\n key, value = reader.read(fname_queue)\n img_value = tf.image.decode_jpeg(value, channels=3)\n\n return img_value", "title": "" }, { "docid": "794925d826a34637f531cb4b22cf4657", "score": "0.5552317", "text": "def train(data_path, split_num, is_classification):\n global TRAIN_DATA\n load_data(data_path, split_num)\n\n def reader():\n for d in TRAIN_DATA:\n if is_classification:\n yield d[:-1], int(d[-1])\n else:\n yield d[:-1], d[-1:]\n\n return reader", "title": "" }, { "docid": "a78a01d9db6c03655633a7ee2e2ca2c0", "score": "0.5552247", "text": "def __init__(self, data, tgff_filename):\n # print \"I'm in TGFFGenerator __int__!\"\n self.data = data\n #self.tgff_path = \"/home/rashad/Documents/tgff-3.6/\"\n #self.tgff_path_example = self.tgff_path + 'examples/'\n self.tgff_path_example = './'\n self.tgff_filename = tgff_filename + '.tgffopt'\n self.write_file()", "title": "" }, { "docid": "76711315f9db184480992d8b09ca98a5", "score": "0.55500835", "text": "def read_images(data_dir, tensor_shape=(256, 256),\n filter_by_class=None, verbose=1):\n images_arrays = []\n masks_arrays = []\n for i in glob.glob(os.path.join(data_dir, '*image.tif')):\n tiled = tile(i, i.replace('image.tif', 'label.tif'),\n tensor_shape, filter_by_class)\n images_arrays.extend(tiled[0])\n masks_arrays.extend(tiled[1])\n\n if len(images_arrays) == 0:\n raise DatasetError('No training samples created. Check the size of '\n 'the images in the data_dir or the appearance of '\n 'the classes you are interested in in labels')\n\n if masks_arrays[0].ndim == 2:\n masks_arrays = [np.expand_dims(i, -1) for i in masks_arrays]\n\n # create TF datasets\n images_dataset = tf.data.Dataset.from_tensor_slices(images_arrays)\n masks_dataset = tf.data.Dataset.from_tensor_slices(masks_arrays)\n\n im_nr = len(images_arrays)\n if verbose > 0:\n print('Created {} training samples from the provided '\n 'image.'.format(im_nr))\n\n return images_dataset, masks_dataset", "title": "" }, { "docid": "9ec4164b96ae075c423bce0db23d5dfe", "score": "0.5545518", "text": "def create_generator(config, data_shape):\n sampler = create_sampler(config)\n attention = create_attention(\n config.attention_type, data_shape,\n read_size=config.attention_read_size, write_size=config.attention_write_size)\n decoder = Cell(config.num_units, config.num_layers, scope='Decoder')\n\n model = Generator(decoder, sampler, attention)\n\n for step in xrange(config.num_steps):\n with tf.name_scope('step{0}'.format(step)):\n model(outputs_collections=graph_utils.GraphKeys.RNN_OUTPUTS)\n\n return [tf.sigmoid(output) for output in model.outputs]", "title": "" }, { "docid": "e0b9251f654c43a4364d1779e642e4cb", "score": "0.55430055", "text": "def make_sequence_example(data, file_properties):\n\n features = {\n 'example_id': tf.train.Feature(bytes_list=tf.train.BytesList(value=[file_properties.uid])),\n 'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[file_properties.label])),\n 'network_depth': tf.train.Feature(int64_list=tf.train.Int64List(value=[len(data)]))\n }\n \n\n\n for layer, am_layer in enumerate(data):\n features['num_rows/{:02d}'.format(layer + 1)] = tf.train.Feature(int64_list=tf.train.Int64List(value=[am_layer.shape[0]]))# model spec\n features['num_columns/{:02d}'.format(layer + 1)] = tf.train.Feature(int64_list=tf.train.Int64List(value=[am_layer.shape[1]]))# model spec\n features['img/{:02d}'.format(layer + 1)] = tf.train.Feature(bytes_list=tf.train.BytesList(value=[am_layer.tostring()]))\n\n \n\n example = tf.train.Example(features=tf.train.Features(feature=features))\n\n return example", "title": "" }, { "docid": "a55399a7c8a1ea0bb210278a4ad48eb4", "score": "0.55276066", "text": "def __data_generation(self, batch):\n X_img = []\n y = []\n # Generate data\n for frames, label in batch.values:\n # Read image\n x_1 = np.array(Image.open(data_prefix + '/rgb_0/' + str(frames[0]) + '.jpg').resize(RESHAPE))\n x_2 = np.array(Image.open(data_prefix + '/rgb_0/' + str(frames[1]) + '.jpg').resize(RESHAPE))\n\n # Normalization\n\n # preprocess in the image_net preprocess (linear combination so doesn't matter which one as long as its consistent with the training\n x_1 = preprocess_input(x_1)\n x_2 = preprocess_input(x_2)\n # Some image augmentation codes\n if self.augment:\n key = random.choice(list(available_transformations))\n if np.random.randint(2):\n x_1 = available_transformations[key](x_1)\n else:\n x_2 = available_transformations[key](x_2)\n\n X_img.append(np.concatenate([x_1, x_2], axis=-1))\n y.append(label)\n if self.n_classes > 1:\n output = keras.utils.to_categorical(y, num_classes=self.n_classes)\n else:\n output = y\n return np.array(X_img), output", "title": "" }, { "docid": "9d676e09607cd02b2271aa71f6dcede2", "score": "0.5527555", "text": "def data_reader(train_input_dir, shuffle):\n train_file_paths = []\n train_image_ids = []\n train_image_widths = []\n train_image_heights = []\n\n for img_file in scandir(train_input_dir):\n if img_file.name.endswith('.jpg') and img_file.is_file():\n train_file_paths.append(img_file.path)\n train_image_ids.append(img_file.name[:-4])\n img = cv2.imread(img_file.path, cv2.IMREAD_COLOR)\n height, width, _ = img.shape\n train_image_heights.append(height)\n train_image_widths.append(width)\n\n if shuffle is True:\n # Shuffle the ordering of all image files in order to guarantee\n # random ordering of the images with respect to label in the\n # saved TFRecord files. Make the randomization repeatable.\n shuffled_index = list(range(len(train_file_paths)))\n random.seed(12345)\n random.shuffle(shuffled_index)\n\n train_file_paths = [train_file_paths[i] for i in shuffled_index]\n train_image_ids = [train_image_ids[i] for i in shuffled_index]\n train_image_heights = [train_image_heights[i] for i in shuffled_index]\n train_image_widths = [train_image_widths[i] for i in shuffled_index]\n\n return train_file_paths, train_image_ids, train_image_heights, train_image_widths", "title": "" }, { "docid": "d65b27dd8531c7588c203d8158038e4a", "score": "0.5521945", "text": "def make_batch(self, batch_size):\n filenames = self.get_filenames()\n # Repeat infinitely.\n dataset = tf.contrib.data.TFRecordDataset(filenames).repeat()\n\n # Parse records.\n dataset = dataset.map(\n self.parser, num_threads=4, output_buffer_size=2 * batch_size)\n\n # Potentially shuffle records.\n if self.subset == 'train':\n min_queue_examples = int(\n DataSet.num_examples_per_epoch(self.subset) * 0.1)\n # Ensure that the capacity is sufficiently large to provide good random\n # shuffling.\n dataset = dataset.shuffle(buffer_size=min_queue_examples + 3 * batch_size)\n\n # Batch it up.\n #dataset = dataset.prefetch(buffer_size=2 * batch_size)\n dataset = dataset.batch(batch_size)\n \n\n iterator = dataset.make_one_shot_iterator()\n image_batch, label_batch, subejct_id, ind = iterator.get_next()\n\n return image_batch, label_batch, subejct_id, ind", "title": "" }, { "docid": "b40fc4af45c2324992b0f5602dc510ab", "score": "0.5520936", "text": "def __init__(self, img_rows=512, img_cols=512, weight_filepath=None):\n \n # Settings\n self.weight_filepath = weight_filepath\n self.img_rows = img_rows\n self.img_cols = img_cols\n assert self.img_rows >= 256, 'Height must be >256 pixels'\n assert self.img_cols >= 256, 'Width must be >256 pixels'\n\n # Set current epoch\n self.current_epoch = 0\n \n # Create UNet-like model\n self.input, self.output, self.model = self.build_pconv_unet()", "title": "" }, { "docid": "32207800337226c8a7c9addd0cdf68e5", "score": "0.5516457", "text": "def create_data_loader(main_folder, min_traj, max_traj,\n num_actions, stack=4, data_ext='npz',\n image_ext='png', batch_size=32, shuffle=True,\n preprocess_fcn=None, grayscale=True):\n if isinstance(main_folder, list):\n frame_path_list = []\n actions_one_hot = []\n for i in range(len(main_folder)):\n frame_paths, actions = _gather_frame_paths_and_actions(\n main_folder[i], min_traj[i], max_traj[i], num_actions, stack=4,\n data_ext='npz', image_ext='png'\n )\n frame_path_list.extend(frame_paths)\n actions_one_hot.extend(actions)\n else:\n frame_path_list, actions_one_hot = _gather_frame_paths_and_actions(\n main_folder, min_traj, max_traj, num_actions, stack=4,\n data_ext='npz', image_ext='png'\n )\n\n data_gen = DataGenerator(\n path_list=frame_path_list,\n targets=actions_one_hot,\n batch_size=batch_size,\n shuffle=shuffle,\n ext=image_ext,\n preprocess_fcn=preprocess_fcn,\n stack=stack,\n grayscale=grayscale\n )\n return data_gen", "title": "" }, { "docid": "c2cab2ac020a7f32f220108045f2b8d5", "score": "0.5511252", "text": "def test_dataloader():\n\n image_list = os.listdir(IMG_DIR)\n test_dataset = ImageTrainData(IMG_DIR, image_list, SPLIT)\n test_dataloader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=True)\n\n return test_dataloader", "title": "" }, { "docid": "41ec7459b6a0bf0f8297d9ab76114475", "score": "0.54982424", "text": "def __init__(self, filepath, train=True, test=False):\n self.isTrain = train\n self.isTest = test\n\n # load images from filepath\n if self.isTest and not self.isTrain: # only test mode\n imgs = [os.path.join(filepath + '/test', img) for img in os.listdir(filepath + '/test')]\n imgs = sorted(imgs, key=lambda x: int(x.split('.')[-2].split('/')[-1]))\n else: # train or valid mode\n imgs = [os.path.join(filepath+'/train', img) for img in os.listdir(filepath+'/train')]\n imgs = sorted(imgs, key=lambda x: int(x.split('.')[-2]))\n\n imgs_num = len(imgs)\n\n if self.isTest:\n self.imgs = imgs\n elif self.isTrain:\n self.imgs = imgs[:int(0.8 * imgs_num)] # train: valid = 8: 2\n else:\n self.imgs = imgs[int(0.8 * imgs_num):]\n\n # normalize each channel of the input by (input[channel] = (input[channel] - mean[channel]) / std[channel])\n normalize = T.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n if self.isTest or not self.isTrain: # for test image\n self.transforms = T.Compose([\n T.Resize(224),\n T.CenterCrop(224),\n T.ToTensor(),\n normalize\n ])\n else: # for train or valid image\n self.transforms = T.Compose([\n T.Resize(256),\n T.RandomResizedCrop(224),\n T.RandomHorizontalFlip(),\n T.ToTensor(),\n normalize\n ])", "title": "" }, { "docid": "6d9744d587ff6916515d66b4e58c67d3", "score": "0.5495102", "text": "def LoadData(self):\n\t\tImages = np.load('./Flowers/flower_imgs.npy')\n\t\tLabels = np.load('./Flowers/flower_labels.npy')\n\t\tn = Labels.size\n\t\tindices = list(range(n))\n\t\tsplit = int(np.floor(0.1 * n))\n\t\t# np.random.seed(1234) # fix the training and test datasets for bettering tuning the parameters.\n\t\tnp.random.shuffle(indices)\n\t\ttrain_indices, test_indices = indices[split:], indices[:split]\n\t\tself.img_mean = np.mean(np.swapaxes(Images/255.0,0,1).reshape(3,-1),1)\n\t\tself.img_std = np.std(np.swapaxes(Images/255.0,0,1).reshape(3,-1),1)\n\n\t\tnormalize = transforms.Normalize(mean=list(self.img_mean), std=list(self.img_std))\n\n\t\tTrainTrans = transforms.Compose([\n\t\t\ttransforms.ToPILImage(),\n \ttransforms.RandomCrop(28),\n \ttransforms.Resize(32),\n \ttransforms.RandomHorizontalFlip(),\n \ttransforms.RandomRotation(10),\n \ttransforms.ToTensor(),\n \tnormalize,\n\t\t\t])\n\t\tTestTrans = transforms.Compose([\n\t\t\ttransforms.ToPILImage(),\n\t\t\ttransforms.ToTensor(),\n\t\t\tnormalize,\n\t\t\t])\n\t\ttrain_dataset = MyDataset(Images[train_indices],Labels[train_indices],TrainTrans)\n\t\ttest_dataset = MyDataset(Images[test_indices], Labels[test_indices], TestTrans)\n\n\t\tself.Train_dataloader = DataLoader(dataset=train_dataset, batch_size = 128, shuffle=True)\n\t\tself.Test_dataloader = DataLoader(dataset=test_dataset, batch_size = 128, shuffle=False)", "title": "" }, { "docid": "f7f0685076ac0ddc4b30d3ed96840d55", "score": "0.5491918", "text": "def make_dataset(\n directory: str,\n classes: List[str],\n extensions: Optional[Tuple[str, ...]] = IMG_EXTENSIONS,\n is_valid_file: Optional[Callable[[str], bool]] = None,\n limit_classes: Optional[Dict[str, int]] = None,\n shuffle: bool = False,\n) -> List[Tuple[str, int]]:\n instances = []\n directory = os.path.expanduser(directory)\n both_none = extensions is None and is_valid_file is None\n both_something = extensions is not None and is_valid_file is not None\n if both_none or both_something:\n raise ValueError(\"Both extensions and is_valid_file cannot be None or not None at the same time\")\n if extensions is not None:\n\n def is_valid_file(x: str) -> bool:\n return has_file_allowed_extension(x, cast(Tuple[str, ...], extensions))\n\n is_valid_file = cast(Callable[[str], bool], is_valid_file)\n for target_class in classes:\n target_dir = os.path.join(directory, target_class)\n if not os.path.isdir(target_dir):\n continue\n for root, _, fnames in sorted(os.walk(target_dir, followlinks=True)):\n limit_counter = 0\n if shuffle:\n np.random.shuffle(fnames)\n else:\n fname = sorted(fnames)\n for fname in fnames:\n if limit_classes:\n if target_class in limit_classes:\n if limit_counter < limit_classes[target_class]:\n limit_counter += 1\n else:\n break\n path = os.path.join(root, fname)\n if is_valid_file(path):\n instances.append(path)\n return instances", "title": "" }, { "docid": "37b86743165675256a1ebd26686d2bec", "score": "0.5489631", "text": "def get_training_datagen(generator, training, img_directory, targetsize, batchsize):\n\n return generator.flow_from_dataframe(dataframe=training,\n directory=img_directory,\n x_col=\"filename\",\n y_col=\"steering_angle\",\n target_size=targetsize,\n batch_size=batchsize,\n class_mode='other')", "title": "" }, { "docid": "5fae2046fa09a015f94bd92502cff17a", "score": "0.54870003", "text": "def generator(self, image_id):\n # print(\"=========prepare for gt=========\")\n gt = self.prepare_image(image_id, augment=self.augment, augmentation=self.augmentation)\n if gt is None:\n return None\n else:\n image, class_ids, bbox, gt_y, gt_x = gt\n\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n mean = np.reshape(mean, [1, 1, 3])\n std = np.reshape(std, [1, 1, 3])\n image = (image / 255. - mean) / std\n # bbox: [num_instances, (y1, x1, y2, x2)]\n gt_top = np.expand_dims((gt_y - bbox[..., 0]), axis=-1)\n gt_bot = np.expand_dims((bbox[..., 2] - gt_y), axis=-1)\n gt_left = np.expand_dims((gt_x - bbox[..., 1]), axis=-1)\n gt_right = np.expand_dims((bbox[..., 3] - gt_x), axis=-1)\n gt_y = np.expand_dims(gt_y, axis=-1)\n gt_x = np.expand_dims(gt_x, axis=-1)\n class_ids = np.expand_dims(class_ids, axis=-1)\n # print(\"picture class_ids: \", class_ids)\n gt_basic = [gt_y, gt_x, gt_top, gt_left, gt_bot, gt_right, class_ids]\n gt = np.concatenate(gt_basic, axis=-1)\n instance_num = np.shape(gt)[0]\n if instance_num <= self.config.MAX_GT_INSTANCES:\n gt = np.pad(gt, ((0, self.config.MAX_GT_INSTANCES - instance_num), (0, 0)), mode='constant')\n else:\n gt = gt[:self.config.MAX_GT_INSTANCES, ...]\n # Resize masks to 1/4 smaller size for stride 4 feature\n # resize_mask(mask, scale, padding, ordernum=0, crop=None):\n # padding = [(0, 0), (0, 0), (0, 0)]\n # stride_mask = resize_mask(vector_mask, 0.25, padding, 0)\n # print(np.sum(stride_mask[..., 0::2], axis=(0, 1)))\n # print(np.amax(stride_mask, axis=(0, 1)))\n # print(\"========get one gt===========\")\n return image, gt", "title": "" }, { "docid": "828483539e2d4d5c4d393071dd07d590", "score": "0.5482282", "text": "def generate(self, iterations=150):\n initial_learning_rate = 6\n for i in range(1, iterations):\n # Process image and return variable\n self.processed_image = preprocess_image(self.created_image, False)\n\n # Define optimizer for the image\n optimizer = SGD([self.processed_image], lr=initial_learning_rate)\n # Forward\n output = self.model(self.processed_image)\n # Target specific class\n class_loss = -output[0, self.target_class]\n\n if i % 10 == 0 or i == iterations-1:\n print('Iteration:', str(i), 'Loss',\n \"{0:.2f}\".format(class_loss.data.numpy()))\n # Zero grads\n self.model.zero_grad()\n # Backward\n class_loss.backward()\n # Update image\n optimizer.step()\n # Recreate image\n self.created_image = recreate_image(self.processed_image)\n if i % 10 == 0 or i == iterations-1:\n # Save image\n im_path = '../generated/class_'+str(self.target_class)+'/c_'+str(self.target_class)+'_'+'iter_'+str(i)+'.png'\n save_image(self.created_image, im_path)\n\n return self.processed_image", "title": "" }, { "docid": "1c2f164fc4b8c07a4253f4cbe900dcec", "score": "0.54793566", "text": "def read_dataset_file(dataset_path, file_path, img_dir, split, class_type, model_dir):\n\n out_img_dir = \"\"\n\n lines = open(file_path).readlines()\n for line in lines:\n in_json_file = line.strip()\n\n labels = []\n for line in lines:\n json_file = line.strip()\n with open(join(dataset_path, json_file)) as f:\n d = json.load(f)\n img_path = join(img_dir, d.get(\"image_path\"))\n bboxes = d.get(\"bboxes\")\n # read the image\n\n for box in bboxes:\n # crop\n\n # save\n\n # call model to label\n # format is x0,y0,x1,y1\n bboxes = np.array([np.array(box[:4]) for box in bboxes])\n\n # Remove invalid bboxes where w or h are 0\n invalid = np.where(np.logical_or(bboxes[:, 2] < bboxes[:, 0],\n bboxes[:, 3] < bboxes[:, 1]))\n bboxes = np.delete(bboxes, invalid, 0)\n datum = {\n \"img_path\": img_path,\n \"bboxes\": bboxes\n }\n data.append(datum)", "title": "" }, { "docid": "ec35832cea431778d13cb225eb606a34", "score": "0.5476094", "text": "def gen_batch_function(data, image_shape):\n def get_batches_fn(batch_size):\n \"\"\"\n Create batches of training data\n :param batch_size: Batch Size\n :return: Batches of training data\n \"\"\"\n #random.shuffle(data)\n new_data = equalize_distribution(data)\n for batch_i in range(0, 10*batch_size, batch_size):\n images = []\n labels = []\n while(len(images) < batch_size):\n #for image_file, image_label in new_data[batch_i:batch_i+batch_size]:\n img_index = np.random.randint(len(new_data))\n image_file = new_data[img_index][0]\n image_label = new_data[img_index][1]\n image = cv2.cvtColor(cv2.imread(image_file), cv2.COLOR_BGR2RGB)\n resize_img = cv2.resize(image,dsize=image_shape,interpolation= cv2.INTER_AREA)\n # apply image augmentation\n translate_limit = [-10, 10]\n rotate_limit = [-30, 30]\n noise_sigma = np.random.randint(5,20)\n image = image_augmentation(resize_img, translate_limit,rotate_limit,noise_sigma)\n #image = resize_img\n #normalizing image\n image = normalizing(image)\n if image_label == 'red':\n labels.append(0)\n else:\n labels.append(1)\n images.append(image)\n #gt_images.append(gt_image)\n\n yield np.array(images), np.array(labels)\n return get_batches_fn", "title": "" }, { "docid": "c005012d5d0af468c2429adea9dba0e9", "score": "0.5472469", "text": "def __init__(self, metadata_file):\n with open(metadata_file, 'r') as fp:\n metadata = json.load(fp)\n input_image_dims = tuple(metadata[\"input_image_dims\"])\n models_dir = metadata[\"models_dir\"]\n training_set = metadata[\"training_set\"]\n number_of_classes = len(training_set)\n\n self.models_dir = models_dir\n self.number_of_classes = number_of_classes\n self.input_image_dims = input_image_dims", "title": "" }, { "docid": "02eb9fa3344225c24473973e1d6b4eea", "score": "0.5460684", "text": "def gen_batch_function(data_folder, image_shape):\n image_paths = glob(os.path.join(data_folder, 't*', '*.jpg'))\n for image_file in image_paths:\n original = scipy.misc.imread(image_file)\n image = scipy.misc.imresize(original, image_shape)\n if original.shape != image.shape:\n scipy.misc.imsave(image_file, image)\n\n def get_batches_fn(batch_size):\n \"\"\"\n Create batches of training data\n :param batch_size: Batch Size\n :return: Batches of training data\n \"\"\"\n\n random.shuffle(image_paths)\n for batch_i in range(0, len(image_paths), batch_size):\n images = []\n labels = []\n for image_file in image_paths[batch_i:batch_i+batch_size]:\n\n image = scipy.misc.imread(image_file)\n label = int(os.path.dirname(image_file)[-1])\n\n #augment the image\n image = distort_image(image)\n images.append(image)\n labels.append(label)\n\n images_np = np.array(images)\n labels_np = np.array(labels)\n #print(\"images_size:\"+str(images_np.shape))\n #print(\"labels_size:\" + str(labels_np.shape))\n yield images_np, labels_np\n\n return get_batches_fn", "title": "" }, { "docid": "801c360cdc8576bd4f589b1a9efe4538", "score": "0.54495484", "text": "def __init__(self, config):\n self.input_fn = dataloader.InputReader(\n FLAGS.file_pattern,\n is_training=not FLAGS.eval,\n use_fake_data=False,\n max_instances_per_image=config.max_instances_per_image)\n\n self.params = dict(\n config.as_dict(), batch_size=FLAGS.samples, model_name=FLAGS.model_name)\n logging.info(self.params)\n self.cls_to_label = config.label_map\n os.makedirs(FLAGS.save_samples_dir, exist_ok=True)", "title": "" }, { "docid": "1152c0b0a204b2e751f4b90bf1fba8a1", "score": "0.54473263", "text": "def __init__(self, input_files, number_images_in_batch, number_rois_per_image,\n max_foreground_rois_per_image):\n self.files = input_files\n self.current_file = 0\n\n self.number_images_in_batch = number_images_in_batch\n self.number_rois_per_image = number_rois_per_image\n self.max_foreground_rois_per_image = max_foreground_rois_per_image\n\n with open(input_files[self.current_file], 'rb') as fo:\n # Each item (dictionary) in the list returned by load method contains the\n # following fields:\n #\n # image - Pixels of the resized input image\n # gt_bboxes - Information about the ground truth boxes for the image (not needed here)\n # rois - Information about foreground rois. Contains bbox, class and reg target\n # rois_background - Information about background rois. Contains bbox and class\n self.data = pickle.load(fo, encoding='latin1')\n\n # next_records represents the row that marks the beginning of the next batch\n self.next_record = 0\n self.total_records = len(self.data)", "title": "" }, { "docid": "7e4f3f17d1119ae8875a1a9118a47616", "score": "0.54418004", "text": "def train(text_filepath, textgen, num_epochs=50, gen_epochs=1, batch_size=1024, dropout=0.05, train_size=0.8,\n verbose=1, validation=True, gen_text_length=500, train_new_model=True, **kwargs):\n\n with open(text_filepath, 'r', encoding='utf8', errors='ignore') as f:\n texts = [f.read()]\n\n print(\"Training a {}LSTM model with {}-layers each with {} cells\".format(\n 'Bidirectional ' if textgen.config['bidirectional'] else '',\n textgen.config['rnn_layers'], textgen.config['rnn_size']\n ))\n\n if train_new_model:\n print('Training a new model...')\n if textgen.vocab_filepath is None:\n textgen.build_vocab(texts)\n textgen.model = chargen_model(textgen.num_of_classes,\n dropout=dropout,\n cfg=textgen.config)\n textgen.save_files()\n\n # calculate all of the combinations of token indices and text indices\n list_of_indices = [np.meshgrid(np.array(i), np.arange(\n len(text) + 1)) for i, text in enumerate(texts)]\n list_of_indices = np.block(list_of_indices)\n\n # Remove the two extra indices\n # Remove initial sequence with padding\n list_of_indices = list_of_indices[textgen.config['input_length']:-2, :]\n\n indices_mask = np.random.rand(list_of_indices.shape[0]) < train_size\n\n gen_val = None\n val_steps = None\n if train_size < 1.0 and validation:\n list_of_indices_val = list_of_indices[~indices_mask, :]\n gen_val = generate_sequences_from_texts(\n texts, list_of_indices_val, textgen, batch_size)\n val_steps = max(\n int(np.floor(list_of_indices_val.shape[0] / batch_size)), 1)\n\n list_of_indices = list_of_indices[indices_mask, :]\n\n num_tokens = list_of_indices.shape[0]\n assert num_tokens >= batch_size, \"Less tokens than the batch_size.\"\n\n print(\"Training on {:,} {} sequences.\".format(num_tokens, 'Character'))\n\n steps_per_epoch = max(int(np.floor(num_tokens / batch_size)), 1)\n\n gen = generate_sequences_from_texts(\n texts, list_of_indices, textgen, batch_size)\n\n base_lr = 4e-3\n\n # inline definition of LearningRateScheduler function\n def lr_linear_decay(epoch):\n return (base_lr * (1 - (epoch / num_epochs)))\n\n textgen.model.fit_generator(gen, steps_per_epoch=steps_per_epoch,\n epochs=num_epochs,\n callbacks=[\n LearningRateScheduler(lr_linear_decay),\n GenerateAfterEpoch(textgen, gen_epochs, gen_text_length), save_model_weights(\n textgen.config['name'])],\n verbose=verbose,\n max_queue_size=2,\n validation_data=gen_val,\n validation_steps=val_steps\n )", "title": "" }, { "docid": "782add3da8c45342fefccff98931e5c0", "score": "0.5438889", "text": "def generate_dataset(cls, watermark, images):\n # Create a dataset, and save it.\n training_set = []\n for image in images:\n raw_image = copy.deepcopy(image)\n watermarked_image = cls._add_watermark(\n watermark=watermark,\n image=raw_image\n )\n\n datapoint = cls._create_datapoint(\n watermarked=watermarked_image,\n original=image\n )\n training_set.append(datapoint)\n\n cls._save_dataset(dataset=training_set, fname=cls.TRAINING_FNAME)", "title": "" }, { "docid": "754c96a65320c18b77dcd9a5a62f63c1", "score": "0.54353064", "text": "def __init__(self, root_dir='../datasets/', stage_name='stage1', data_format='channels_first', use_edges=False,\n use_pix2pix=False, training_batch_size=1):\n train_path = Path('{}/{}_train/'.format(root_dir, stage_name))\n test_path = Path('{}/{}_test/'.format(root_dir, stage_name))\n\n self.train_images = list(Path(train_path).glob('*/images/*.png'))\n self.train_masks = list(Path(train_path).glob('*/masks'))\n self.test_images = list(Path(test_path).glob('*/images/*.png'))\n self.data_format = data_format\n self.use_edges = use_edges\n self.use_pix2pix = use_pix2pix\n # Note: Setting batch size to 1 can lead to problmes when training for long iterations\n # See https://discuss.pytorch.org/t/nan-when-i-use-batch-normalization-batchnorm1d/322/16\n self.train_batch_size = training_batch_size\n\n self.tfrecords_train_path = '{}/tfrecords/{}_train.tfrecords'.format(root_dir, stage_name)\n self.tfrecords_test_path = '{}/tfrecords/{}_test.tfrecords'.format(root_dir, stage_name)", "title": "" }, { "docid": "5ec7d8743676a2253a3ba1957f0b9dda", "score": "0.5430186", "text": "def _create_data_pipeline(self):\n with tf.name_scope(FileListProcessor_Semantic_Segmentation.dataprovider_namescope+'_gen_crops_then_transform'):\n \"\"\"create a first \"queue\" (actually a list) of pairs of image filenames\n and generate data samples (whole read images)\n \"\"\"\n #1. let the dataset load the raw images and associates possible metadata as last channel(s)\n self._create_dataset_filenames()\n\n\n #2. transform the dataset samples convert raw images into crops\n if self.full_frame_mode == True:\n self.dataset=self.dataset.map(map_func=self._load_raw_images_from_filenames, num_parallel_calls=self.num_reader_threads)\n with tf.name_scope('full_raw_frame_prefetching'):\n if self.apply_whitening: # Subtract off the mean and divide by the variance of the pixels.\n self.dataset=self.dataset.map(self._whiten_sample)\n else:\n if self.shuffle_samples:\n reader_threads=1\n else:\n #parallel samples interleaving plays the role of shuffling\n reader=self.num_reader_threads\n self.dataset=self.dataset.interleave(cycle_length=self.num_reader_threads, map_func=self._generate_crops, num_parallel_calls=self.num_reader_threads)\n\n self.dataset=self.dataset.map(self._image_transform, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n #finalise the dataset pipeline : filterout\n #finalize dataset (set nb epoch and batch size and prefetch)\n self.dataset=self.dataset.batch(self.batch_size, drop_remainder=True)\n self.dataset=self.dataset.prefetch(tf.data.experimental.AUTOTUNE)#int(self.batch_size*20))\n print('Input data pipeline graph is now defined')", "title": "" }, { "docid": "1e86bae3e8d962334d957002849c15c3", "score": "0.5430101", "text": "def __init__(self, records_list, image_options={}, batch_size=1):\n self.batch_size = batch_size\n self.image_options = image_options\n self.records = {}\n self.records[\"image\"] = [record['image'] for record in records_list]\n self.records[\"filename\"] = [record['filename'] for record in records_list]\n if not self.image_options.get(\"predict_dataset\", False):\n self.records[\"annotation\"] = [record['annotation'] for record in records_list]\n\n #tf_records_placeholder = tf.placeholder(self.records)\n if 'annotation' in self.records:\n self.dataset = Dataset.from_tensor_slices((self.records['image'], self.records['filename'],\n self.records['annotation']))\n else:\n self.dataset = Dataset.from_tensor_slices((self.records['image'], self.records['filename']))\n\n self.dataset = self.dataset.map(self._input_parser)\n self.dataset = self.dataset.batch(batch_size)\n self.dataset = self.dataset.repeat()", "title": "" }, { "docid": "79609d4ecad876d56a76b4c1d100e9eb", "score": "0.54298395", "text": "def createMNISTDataset(path):\n\n start = time()\n\n data = []\n characters = os.listdir(path)\n for character in characters:\n if character == '.ipynb_checkpoints':\n continue\n imagePath = os.path.join(path, character)\n characterIndex = characters.index(character)\n for image in os.listdir(imagePath):\n if character == '.ipynb_checkpoints':\n continue\n try:\n imageArray = cv2.imread(os.path.join(\n imagePath, image), cv2.IMREAD_GRAYSCALE)\n data.append([imageArray, characterIndex])\n except Exception as e:\n pass\n random.shuffle(data)\n\n print('Operation completed in ', time() - start, 'seconds')\n return data", "title": "" }, { "docid": "20972b98a12f5e600b454385279a9837", "score": "0.5429761", "text": "def training_data():\n sequences = []\n classes = []\n seqname = 'training_sets.txt'\n classname = 'training_class.txt'\n\n with open(os.path.join(file_path, seqname)) as f:\n for line in f:\n sequences.append(line.strip())\n\n with open(os.path.join(file_path, classname)) as f:\n for line in f:\n classes.append(line.strip())\n\n return (string_to_array(sequences), np.asarray(classes, dtype=int))", "title": "" }, { "docid": "52223583b4b5b238de61191f726fea64", "score": "0.5422367", "text": "def read(dataset = \"training\", path = \".\"):\n\n if dataset is \"training\":\n fname_img = os.path.join(path, 'train-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 'train-labels-idx1-ubyte')\n elif dataset is \"testing\":\n fname_img = os.path.join(path, 't10k-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 't10k-labels-idx1-ubyte')\n else:\n raise ValueError(\"dataset must be 'testing' or 'training'\")\n\n # Load everything in some numpy arrays\n with open(fname_lbl, 'rb') as flbl:\n magic, num = struct.unpack(\">II\", flbl.read(8))\n lbl = np.fromfile(flbl, dtype=np.int8)\n\n with open(fname_img, 'rb') as fimg:\n magic, num, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n img = np.fromfile(fimg, dtype=np.uint8).reshape(len(lbl), rows, cols)\n\n get_img = lambda idx: (lbl[idx], img[idx])\n\n # Create an iterator which returns each image in turn\n for i in range(len(lbl)):\n yield get_img(i)", "title": "" }, { "docid": "856d0f574b864a8ec2db29c8a5fc4651", "score": "0.54197866", "text": "def get_image_data_generator(labels, width, height, augmenter=None, area_threshold=0.5, min_area=None, shuffle=True):\n labels = labels.copy()\n for index in itertools.cycle(range(len(labels))):\n if index == 0 and shuffle:\n random.shuffle(labels)\n image_filepath, lines, confidence = labels[index]\n image = tools.read(image_filepath)\n if augmenter is not None:\n image, lines = tools.augment(boxes=lines, boxes_format='lines', image=image, area_threshold=area_threshold, min_area=min_area, augmenter=augmenter)\n\n image, scale = tools.fit(image,\n width=width,\n height=height,\n mode='letterbox',\n return_scale=True)\n lines = tools.adjust_boxes(boxes=lines, boxes_format='lines', scale=scale)\n yield image, lines, confidence", "title": "" }, { "docid": "a53e52f11a5e438df88a36b774a11999", "score": "0.5419786", "text": "def read(dataset=\"training\", path=\".\"):\n\n if dataset is \"training\":\n fname_img = os.path.join(path, 'train-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 'train-labels-idx1-ubyte')\n elif dataset is \"testing\":\n fname_img = os.path.join(path, 't10k-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 't10k-labels-idx1-ubyte')\n else:\n raise ValueError(\"dataset must be 'testing' or 'training'\")\n\n # Load everything in some numpy arrays\n with open(fname_lbl, 'rb') as flbl:\n magic, num = struct.unpack(\">II\", flbl.read(8))\n lbl = np.fromfile(flbl, dtype=np.int8)\n\n with open(fname_img, 'rb') as fimg:\n magic, num, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n img = np.fromfile(fimg, dtype=np.uint8).reshape(len(lbl), rows, cols)\n\n get_img = lambda idx: (lbl[idx], img[idx])\n\n # Create an iterator which returns each image in turn\n for i in xrange(len(lbl)):\n yield get_img(i)", "title": "" }, { "docid": "0a0fd30564ecd27604429273b6d62658", "score": "0.5416893", "text": "def __init__(self, file_path, one_hot=True, shuffle_data=True, xywh=True, num_cells=7):\r\n self.data_list = []\r\n self.idx = -1\r\n self.shuffle_data = shuffle_data\r\n self.one_hot = one_hot\r\n file = open(file_path, \"r\")\r\n image_path, x1, y1, x2, y2, label = file.readline().split(',')\r\n #image = self._load_image(image_path)\r\n coordinates = np.array([[0, 0, 0, 0, int(label)]])\r\n if not self.one_hot:\r\n if not x1 == '':\r\n coordinates = np.array([[int(x1), int(y1), int(x2), int(y2), int(label)]])\r\n while True:\r\n try:\r\n while True:\r\n new_image_path, x1, y1, x2, y2, new_label = file.readline().split(',')\r\n if new_image_path != image_path:\r\n if not self.one_hot:\r\n if xywh:\r\n coordinates = convert_xyxy_to_xywh((224, 224), coordinates, num_cells)\r\n self.data_list.append((image_path, coordinates)) # label stored in coordinates\r\n if x1 != '':\r\n coordinates = np.array([[int(x1), int(y1), int(x2), int(y2), int(new_label)]])\r\n else:\r\n coordinates = np.array([[0, 0, 0, 0, int(new_label)]])\r\n else:\r\n self.data_list.append((image_path, label))\r\n image_path = new_image_path\r\n #image = self._load_image(image_path)\r\n label = new_label\r\n break\r\n if not self.one_hot and x1 != '':\r\n coordinates = np.append(coordinates, [[int(x1), int(y1), int(x2), int(y2), int(new_label)]], axis=0)\r\n except ValueError:\r\n if not self.one_hot:\r\n if xywh:\r\n coordinates = convert_xyxy_to_xywh((224, 224), coordinates, num_cells)\r\n self.data_list.append((image_path, coordinates))\r\n else:\r\n self.data_list.append((image_path, label))\r\n break\r\n file.close()\r\n if self.shuffle_data:\r\n shuffle(self.data_list)", "title": "" }, { "docid": "b4ddeee9562fc12a423ae34eb7c28410", "score": "0.54150254", "text": "def __init__(self):\n with h5py.File(\"cell_data.h5\", \"r\") as data:\n self.train_images = \\\n [data[\"/train_image_{}\".format(i)][:] for i in range(28)]\n self.train_labels = \\\n [data[\"/train_label_{}\".format(i)][:] for i in range(28)]\n self.test_images = \\\n [data[\"/test_image_{}\".format(i)][:] for i in range(3)]\n self.test_labels = \\\n [data[\"/test_label_{}\".format(i)][:] for i in range(3)]\n self.input_resolution = 300\n self.label_resolution = 116\n self.offset = (300 - 116) // 2", "title": "" }, { "docid": "027c6b76622ed1f3c3ff89f60f79581a", "score": "0.5405722", "text": "def __init__(self,\n images, # [total_labels, h, w, 3]\n labels, # should contain [total_labels, 6]\n fake_data=False,\n one_hot=False,\n dtype=dtypes.float32,\n reshape=False,\n seed=None):\n seed1, seed2 = random_seed.get_seed(seed)\n # If op level seed is not set, use whatever graph level seed is returned\n np.random.seed(seed1 if seed is None else seed2)\n dtype = dtypes.as_dtype(dtype).base_dtype\n if dtype not in (dtypes.uint8, dtypes.float32):\n raise TypeError(\n 'Invalid image dtype %r, expected uint8 or float32' % dtype)\n if fake_data:\n self._num_examples = 10000\n self.one_hot = one_hot\n else:\n assert images.shape[0] == labels.shape[0], (\n 'images.shape: %s labels.shape: %s' % (images.shape, labels.shape))\n self._num_examples = images.shape[0]\n\n # Convert shape from [num examples, rows, columns, depth]\n # to [num examples, rows*columns] (assuming depth == 1)\n if reshape:\n assert images.shape[3] == 1\n images = images.reshape(images.shape[0],\n images.shape[1] * images.shape[2])\n if dtype == dtypes.float32:\n # Convert from [0, 255] -> [0.0, 1.0].\n images = images.astype(np.float32)\n images = np.multiply(images, 1.0 / 255.0)\n\n self._images = images\n self._labels = labels\n self._epochs_completed = 0\n self._index_in_epoch = 0", "title": "" }, { "docid": "6c6cb97edd0b76c6f01178943b3d5a88", "score": "0.54039186", "text": "def nli_dataset_generator(raw_path, setname):\n cnt=0\n fullpath = raw_path.format(setname)\n assert os.path.exists(fullpath)\n read_headline = False\n with open(fullpath) as f:\n for line in f:\n if cnt % 1000 == 0: print(setname, cnt)\n split_line = line.strip().split('\\t')\n if split_line[0] not in ['neutral', 'contradiction', 'entailment']:\n if read_headline: continue\n print(split_line)\n print(setname)\n sent1Idx, sent2Idx, labelIdx = split_line.index('sentence1'), split_line.index(\n 'sentence2'), split_line.index('gold_label')\n read_headline = True\n continue\n cnt += 1\n yield [split_line[labelIdx], preprocess_sentence(split_line[sent1Idx]), preprocess_sentence(split_line[sent2Idx])]", "title": "" } ]
c6481e9c7727bea1df9e5d4020d05fe3
Documentacion para la funcion .
[ { "docid": "4df190a302a56c2b94df3ff61a12d69d", "score": "0.0", "text": "def poblar_mapa(self, obts=0):\n\t\tif obts==0 or obts<((self.filas*self.columnas)//3) or obts>(((self.filas*self.columnas)//3)*2):\n\t\t\tobts = (self.filas * self.columnas)//3\n\t\twhile True:\n\t\t\ta_x, a_y = random.randint(0,self.filas-1), random.randint(0,self.columnas-1)\n\t\t\tm_x, m_y = random.randint(0,self.filas-1), random.randint(0,self.columnas-1)\n\t\t\tif a_x!=m_x and a_y!= m_y:\n\t\t\t\tself.axy[0], self.axy[1] = a_x, a_y\n\t\t\t\tself.mxy[0], self.mxy[1] = m_x, m_y\n\t\t\t\tbreak\n\t\tself.mapa[self.axy[0]][self.axy[1]] = self.simbolos[2]\n\t\tself.mapa[self.mxy[0]][self.mxy[1]] = self.simbolos[3]\n\t\tcntdr = 0\n\t\twhile cntdr < obts:\n\t\t\tx, y = random.randint(0,self.filas-1), random.randint(0,self.columnas-1)\n\t\t\tif self.mapa[x][y] == self.simbolos[0]:\n\t\t\t\tself.mapa[x][y] = self.simbolos[1]\n\t\t\t\tcntdr += 1", "title": "" } ]
[ { "docid": "70f58445cd1f2d39a33a90573343aef0", "score": "0.75110817", "text": "def function(self):", "title": "" }, { "docid": "cfa74ecf76709ca612bed25272d7c559", "score": "0.7438437", "text": "def explaination(self):", "title": "" }, { "docid": "0c6b50969ed269d10634f34ef37b0530", "score": "0.7359935", "text": "def document_func():", "title": "" }, { "docid": "055270363635086aa425083c46f8e7a3", "score": "0.71768546", "text": "def fn(self):", "title": "" }, { "docid": "9b2dc83bfb20d21cfec9a81e00f2ae8d", "score": "0.7169835", "text": "def fun_doc():\n return None", "title": "" }, { "docid": "04e9d75985240a993c1d98b7c3e1e5a3", "score": "0.69649935", "text": "def af(self):", "title": "" }, { "docid": "ae1f8638d5c5af2a58884a1f717137e4", "score": "0.6908594", "text": "def __call__(self, doc):\n raise NotImplementedError", "title": "" }, { "docid": "8bec32d4734c283b29ea484b9d38a1ab", "score": "0.67496544", "text": "def parameters(self):", "title": "" }, { "docid": "953014ed509b87d1c79d74937918dfdd", "score": "0.66731805", "text": "def do(self):", "title": "" }, { "docid": "953014ed509b87d1c79d74937918dfdd", "score": "0.66731805", "text": "def do(self):", "title": "" }, { "docid": "2e2984181e827cc563c80c5333b4bf2c", "score": "0.6640214", "text": "def fun():\r\n print 'func doc'", "title": "" }, { "docid": "a998418a5b30924702e046bb400f11ec", "score": "0.66167605", "text": "def Reactants(self):", "title": "" }, { "docid": "e6aec7c6412834628d338f69d56f95a3", "score": "0.6615819", "text": "def express(self):", "title": "" }, { "docid": "0723a9f57c89b95fa859a6d9e8bacdff", "score": "0.65562505", "text": "def docstring():", "title": "" }, { "docid": "fe038e0be7616b9cfa6122cb34655ca2", "score": "0.6553726", "text": "def docstrings():", "title": "" }, { "docid": "0b80a68a775eb1a309fdad9337c29a1b", "score": "0.6515944", "text": "def __call__():", "title": "" }, { "docid": "0b80a68a775eb1a309fdad9337c29a1b", "score": "0.6515944", "text": "def __call__():", "title": "" }, { "docid": "0b80a68a775eb1a309fdad9337c29a1b", "score": "0.6515944", "text": "def __call__():", "title": "" }, { "docid": "0b80a68a775eb1a309fdad9337c29a1b", "score": "0.6515944", "text": "def __call__():", "title": "" }, { "docid": "0b80a68a775eb1a309fdad9337c29a1b", "score": "0.6515944", "text": "def __call__():", "title": "" }, { "docid": "0b80a68a775eb1a309fdad9337c29a1b", "score": "0.6515944", "text": "def __call__():", "title": "" }, { "docid": "0b80a68a775eb1a309fdad9337c29a1b", "score": "0.6515944", "text": "def __call__():", "title": "" }, { "docid": "0b80a68a775eb1a309fdad9337c29a1b", "score": "0.6515944", "text": "def __call__():", "title": "" }, { "docid": "0b80a68a775eb1a309fdad9337c29a1b", "score": "0.6515944", "text": "def __call__():", "title": "" }, { "docid": "0b80a68a775eb1a309fdad9337c29a1b", "score": "0.6515944", "text": "def __call__():", "title": "" }, { "docid": "e034ea853a7336efb3b79086dd954790", "score": "0.6498816", "text": "def describe(self):", "title": "" }, { "docid": "e034ea853a7336efb3b79086dd954790", "score": "0.6498816", "text": "def describe(self):", "title": "" }, { "docid": "7376c6070bd45a9f7cbb12ebcedc5b63", "score": "0.6498506", "text": "def func(self):\n pass", "title": "" }, { "docid": "12ef351568920016d6e61772431c9e04", "score": "0.6453903", "text": "def __documented_special_method__(self):\n pass", "title": "" }, { "docid": "78e33815d207e4aa34d4e52320435483", "score": "0.64506996", "text": "def func():", "title": "" }, { "docid": "78e33815d207e4aa34d4e52320435483", "score": "0.64506996", "text": "def func():", "title": "" }, { "docid": "610f4d56f48709c12ed9fad49109d881", "score": "0.64337355", "text": "def __funcion_analizar_desc():", "title": "" }, { "docid": "7c31a1afc776339992c0ae079ef9eaf3", "score": "0.6403126", "text": "def Do(self):", "title": "" }, { "docid": "79622c3ba53c2398bb43106a6b3befdc", "score": "0.636568", "text": "def method(self):", "title": "" }, { "docid": "744cdb9c3379342ebcc924145c144f1e", "score": "0.63656205", "text": "def retour():\n pass", "title": "" }, { "docid": "f0f72abc460055d57a106eb4be922eaf", "score": "0.6365434", "text": "def explore(self):", "title": "" }, { "docid": "12a90fb46a858910daeb6fa17375222d", "score": "0.6345793", "text": "def metageneration(self):\n ...", "title": "" }, { "docid": "d388939ba349a300a7a20adb599c127d", "score": "0.634101", "text": "def __call__(self):\n pass", "title": "" }, { "docid": "d388939ba349a300a7a20adb599c127d", "score": "0.634101", "text": "def __call__(self):\n pass", "title": "" }, { "docid": "6e195c858e219017205a4d7a8f1b96e4", "score": "0.6338538", "text": "def public(self):\n\t\tpass", "title": "" }, { "docid": "52e3d6121e1b619fbf9d43997c81ae4a", "score": "0.63189363", "text": "def sampleFunction():", "title": "" }, { "docid": "4b3bb219839ecfaf06b86f3112e6535c", "score": "0.63095206", "text": "def function(self, *args):\n return", "title": "" }, { "docid": "1efd3d5fccaf99d4cce6d5085e615545", "score": "0.6301404", "text": "def doc_prop():\n pass", "title": "" }, { "docid": "d50c429ae463fda01f73beb782d7b313", "score": "0.62752575", "text": "def test_func_doc(self):\n\n self.assertTrue(len(Base.to_json_string.__doc__) > 0)\n self.assertTrue(len(Base.save_to_file.__doc__) > 0)\n self.assertTrue(len(Base.from_json_string.__doc__) > 0)\n self.assertTrue(len(Base.create.__doc__) > 0)\n self.assertTrue(len(Base.load_from_file.__doc__) > 0)", "title": "" }, { "docid": "f8bdc3852b556d96cc3d68f0e4e286c4", "score": "0.62651145", "text": "def __doc__(self):\n return self.fget.__doc__", "title": "" }, { "docid": "10b3fd49f537277d9f5b9be39f6dc847", "score": "0.62589407", "text": "def get(self):", "title": "" }, { "docid": "10b3fd49f537277d9f5b9be39f6dc847", "score": "0.62589407", "text": "def get(self):", "title": "" }, { "docid": "10b3fd49f537277d9f5b9be39f6dc847", "score": "0.62589407", "text": "def get(self):", "title": "" }, { "docid": "10b3fd49f537277d9f5b9be39f6dc847", "score": "0.62589407", "text": "def get(self):", "title": "" }, { "docid": "0c255cea79a45b14e1f08474e6395546", "score": "0.6256364", "text": "def Function(self):\n raise NotImplementedError()", "title": "" }, { "docid": "53a309858b39ef92bfa2a69552d091ea", "score": "0.6238472", "text": "def dummy_fun(doc):\n return doc", "title": "" }, { "docid": "0c5b83a1048c472604c397dde93a99cc", "score": "0.62314236", "text": "def fun():", "title": "" }, { "docid": "84be3f86e67fc91dad74a06bfa3df334", "score": "0.62204045", "text": "def respond(self):", "title": "" }, { "docid": "2f3f56e17906e180ec146610945de5e0", "score": "0.622038", "text": "def Get(self):", "title": "" }, { "docid": "2f3f56e17906e180ec146610945de5e0", "score": "0.62203056", "text": "def Get(self):", "title": "" }, { "docid": "2f3f56e17906e180ec146610945de5e0", "score": "0.62203056", "text": "def Get(self):", "title": "" }, { "docid": "af9f7a778f588a2020296a6c2734ea56", "score": "0.6218948", "text": "def _function(self):\n pass # implement in subclasses to achieve any results", "title": "" }, { "docid": "2f3f56e17906e180ec146610945de5e0", "score": "0.62181854", "text": "def Get(self):", "title": "" }, { "docid": "2f3f56e17906e180ec146610945de5e0", "score": "0.62181854", "text": "def Get(self):", "title": "" }, { "docid": "2f3f56e17906e180ec146610945de5e0", "score": "0.62181854", "text": "def Get(self):", "title": "" }, { "docid": "2f3f56e17906e180ec146610945de5e0", "score": "0.62181854", "text": "def Get(self):", "title": "" }, { "docid": "2f3f56e17906e180ec146610945de5e0", "score": "0.62181854", "text": "def Get(self):", "title": "" }, { "docid": "2f3f56e17906e180ec146610945de5e0", "score": "0.62181854", "text": "def Get(self):", "title": "" }, { "docid": "2f3f56e17906e180ec146610945de5e0", "score": "0.62181854", "text": "def Get(self):", "title": "" }, { "docid": "2f3f56e17906e180ec146610945de5e0", "score": "0.62181854", "text": "def Get(self):", "title": "" }, { "docid": "2f3f56e17906e180ec146610945de5e0", "score": "0.62181854", "text": "def Get(self):", "title": "" }, { "docid": "f6833ada7137ee02133be6a447b6d9fe", "score": "0.62176937", "text": "def function1(self):", "title": "" }, { "docid": "a852926cbf9d472c08e93fb84f095933", "score": "0.6217637", "text": "def detail(self):", "title": "" }, { "docid": "81b5d3a004703c374667270f514843ba", "score": "0.62054706", "text": "def thorcam():\n\n pass", "title": "" }, { "docid": "afcc7539975c5fb98961524cb483550e", "score": "0.6192775", "text": "def data(self):", "title": "" }, { "docid": "ebe0abd083b5eb46f807eb06819dd477", "score": "0.6191556", "text": "def define_parameters(self):", "title": "" }, { "docid": "0d1716c437f8e2399775022f5286b4a7", "score": "0.6186427", "text": "def Run(self):", "title": "" }, { "docid": "a141af5d6e495e355b89720b7917b343", "score": "0.6169954", "text": "def documented_inner_func():\n pass", "title": "" }, { "docid": "4d62a363909eddc0c6c56d1c6550c8d2", "score": "0.616723", "text": "def func_google():", "title": "" }, { "docid": "57da7fab7fa0c8632808367a4c701616", "score": "0.6161936", "text": "def func_rst():", "title": "" }, { "docid": "55bd11d0d65c267ae7f4ab7f712faf39", "score": "0.61615163", "text": "def z(self):", "title": "" }, { "docid": "5e7959839d507a95c88da0c794f4760c", "score": "0.61492026", "text": "def params(self):\n pass", "title": "" }, { "docid": "cc08b6b9fc8fb6c1a32b8f021cefb25c", "score": "0.6134874", "text": "def ro(self):", "title": "" }, { "docid": "849302a8a8efbe2e50f3a7954ba32489", "score": "0.6109316", "text": "def Pica(self):\n pass", "title": "" }, { "docid": "df9f728a5b3df976e40e16f3b89b178f", "score": "0.61023057", "text": "def gi(self):", "title": "" }, { "docid": "1b8480024f274d526b724d3b24e7c5d8", "score": "0.61017936", "text": "def test_docstring(self):\n self.assertTrue(len(Amenity.__doc__) > 0)\n for funct in dir(Amenity):\n self.assertTrue(len(funct.__doc__) > 0)", "title": "" }, { "docid": "f2254a4e976dd1a07afff9be2fa8fa4a", "score": "0.60907453", "text": "def custom(self):\n pass", "title": "" }, { "docid": "0f484374b006293ee455731ec02a7d74", "score": "0.6088892", "text": "def pseudodb():\n pass", "title": "" }, { "docid": "944fb2df87fb227ccee6471df3301cdd", "score": "0.60845906", "text": "def __call__(self):\n raise NotImplementedError", "title": "" }, { "docid": "944fb2df87fb227ccee6471df3301cdd", "score": "0.60845906", "text": "def __call__(self):\n raise NotImplementedError", "title": "" }, { "docid": "944fb2df87fb227ccee6471df3301cdd", "score": "0.60845906", "text": "def __call__(self):\n raise NotImplementedError", "title": "" }, { "docid": "7ad7767ab6207fecab5c62491cdc5065", "score": "0.6083089", "text": "def func():\n pass", "title": "" }, { "docid": "7ad7767ab6207fecab5c62491cdc5065", "score": "0.6083089", "text": "def func():\n pass", "title": "" }, { "docid": "7ad7767ab6207fecab5c62491cdc5065", "score": "0.6083089", "text": "def func():\n pass", "title": "" }, { "docid": "b7d764926405dd87ad6ab59e6a30a0cf", "score": "0.6080686", "text": "def simpleMethod():\n pass", "title": "" }, { "docid": "7528e9366ab244d1b83660919f9f40dd", "score": "0.6079921", "text": "def _add_doc(func, doc):\r\n func.__doc__ = doc", "title": "" }, { "docid": "7528e9366ab244d1b83660919f9f40dd", "score": "0.6079921", "text": "def _add_doc(func, doc):\r\n func.__doc__ = doc", "title": "" }, { "docid": "7528e9366ab244d1b83660919f9f40dd", "score": "0.6079921", "text": "def _add_doc(func, doc):\r\n func.__doc__ = doc", "title": "" }, { "docid": "7528e9366ab244d1b83660919f9f40dd", "score": "0.6079921", "text": "def _add_doc(func, doc):\r\n func.__doc__ = doc", "title": "" }, { "docid": "7528e9366ab244d1b83660919f9f40dd", "score": "0.6079921", "text": "def _add_doc(func, doc):\r\n func.__doc__ = doc", "title": "" }, { "docid": "7528e9366ab244d1b83660919f9f40dd", "score": "0.6079921", "text": "def _add_doc(func, doc):\r\n func.__doc__ = doc", "title": "" }, { "docid": "7528e9366ab244d1b83660919f9f40dd", "score": "0.6079921", "text": "def _add_doc(func, doc):\r\n func.__doc__ = doc", "title": "" }, { "docid": "6a259d4d6209d3ab41f5f123bdcb0b72", "score": "0.60756034", "text": "def get_doc(self) -> str:", "title": "" }, { "docid": "c238c00bb081a761a9c456f19925e7ec", "score": "0.6058326", "text": "def bytereverse(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "1be6edb50dab0c6e23f77b4e1ff8adfd", "score": "0.6058101", "text": "def x(self):", "title": "" }, { "docid": "62ff5f67dab75804e5e480e030e9991b", "score": "0.60579836", "text": "def __repr__(self):\r\n return self.func.__doc__", "title": "" } ]
2ab950e19f28d65cd80cf50c55a6b220
Implement accountactivation logic here.
[ { "docid": "735d4b9dda73bfbef17bfbb7a768fe41", "score": "0.0", "text": "def activate(self, *args, **kwargs):\n raise NotImplementedError", "title": "" } ]
[ { "docid": "c17689b758389e6c1b74af0c63d2373b", "score": "0.7125156", "text": "def test_activate_account(self):\n pass", "title": "" }, { "docid": "c17689b758389e6c1b74af0c63d2373b", "score": "0.7125156", "text": "def test_activate_account(self):\n pass", "title": "" }, { "docid": "124a1abf57a1182af0a2311c0e0f0b85", "score": "0.69174916", "text": "def activate_account(request, activation_key):\n from django.contrib.auth.models import User\n try:\n activate_user = models.RegistrationModel.objects.get(activation_key=activation_key)\n if activate_user:\n usr = User.objects.get(id=activate_user.user_id)\n usr.is_active = 1\n usr.save()\n activate_user.delete()\n return render(request,'registration/registration_complete.html')\n except:\n return HttpResponseRedirect('/')", "title": "" }, { "docid": "1d50db1c6f8f0c3105987b3587bd9005", "score": "0.68715113", "text": "def activate(self):\n user = get_user_model().objects.get(pk=self.activation_key)\n user.is_active = True\n user.save(update_fields=['is_active'])\n client = Client()\n client.captureMessage(\"New user activated\", extra={\n 'email': user.email\n })\n user.backend = 'ratelimitbackend.backends.RateLimitModelBackend'\n auth_login(self.request, user)", "title": "" }, { "docid": "6a68f3682d7d1e72672362e2ba196aee", "score": "0.67915547", "text": "def activate(request, uidb64, token):\n try:\n uid = force_text(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n except (TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n\n if user is not None and account_activation_token.check_token(user, token):\n user.is_active = True\n user.profile.email_confirmed = True\n user.save()\n login(request, user)\n return redirect('main')\n else:\n return render(request, 'accounts/account_activation_invalid.html')", "title": "" }, { "docid": "d0f690b8284916b69aab0159227b38f4", "score": "0.67746216", "text": "def activateAccount(request, uidb64, token):\n try:\n uid = force_bytes(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n except(TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n if user is not None and account_activation_token.check_token(user, token):\n user.is_active = True\n user.save()\n login(request, user)\n\n messages.info(request, 'Congrats, your email has been verified. Add your contact address. Please note that you cannot change this information later.')\n return redirect('address')\n else:\n return HttpResponse('Activation link is invalid!')", "title": "" }, { "docid": "a71ca7cbeb1091206de5d8c7c87b2182", "score": "0.67716", "text": "def test_customer_account_management_v1_activate_put(self):\n pass", "title": "" }, { "docid": "3a5b631a7ebaa85d77af51796350f89c", "score": "0.6732537", "text": "def form_valid(self, form):\n profile = self.get_object()\n password = form.cleaned_data['password1']\n self.activated_user = self.backend.activate(\n profile.activation_key, self.request, password=password)\n return super(ActivationView, self).form_valid(form)", "title": "" }, { "docid": "095ce65f660adff591e1d56dee7d4bd5", "score": "0.67312175", "text": "def user_activation(sender, instance, **kwargs):\n if instance.id is None:\n # user is being created\n instance.is_active = True # for now it's set to true, i.e. any newly registered user can login after registration.", "title": "" }, { "docid": "8b60dbbf5c1e5650b5617b64c7a2a4de", "score": "0.65827394", "text": "def enable_account(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "3f1cf9a666eac724528dd60b229ecee1", "score": "0.6537399", "text": "def _send_activation_email(self, request, user):\n\t\tpass", "title": "" }, { "docid": "4597730cff7c1e85fe367645933c4344", "score": "0.6522434", "text": "def activate(request, activation_key,\n template_name='registration/activate.html',\n extra_context=None):\n activation_key = activation_key.lower() # Normalize before trying anything with it.\n account = RegistrationProfile.objects.activate_user(activation_key)\n if extra_context is None:\n extra_context = {}\n context = RequestContext(request)\n #====================================================\n # custom democracy code below\n #====================================================\n session_votes = False\n if request.session.has_key(\"vote_history\"):\n session_votes = True \n if account: \n migrate_votes(request, account, request.session[\"vote_history\"])\n del request.session[\"vote_history\"]\n\n extra_context.update( { 'votes_saved' : session_votes } )\n #=========================================================\n # end custom code\n #========================================================\n for key, value in extra_context.items():\n context[key] = callable(value) and value() or value\n return render_to_response(template_name,\n { 'account': account,\n 'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS },\n context_instance=context)", "title": "" }, { "docid": "06b33e19eb78674c190ab6b12dd96045", "score": "0.6513843", "text": "def activate(request, uidb64, token):\n \"\"\"Followed tutorial: https://simpleisbetterthancomplex.com/tutorial/2017/02/18/how-to-create-user-sign-up-view.html\"\"\"\n try:\n uid = force_text(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n except (TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n\n if user is not None and account_activation_token.check_token(user, token):\n user.is_active = True\n user.registeruser.email_confirmed = True\n user.save()\n\n optional = RegisterUser.objects.get(user=user) #get optional info of user\n #email admin\n admin_subject = 'New User Registered'\n admin_message = render_to_string('app/new_user_email_to_admin.html', {\n 'user': user,\n 'optional': optional,\n })\n\n\n mail_admins(admin_subject, admin_message)\n login(request, user, backend=\"django.contrib.auth.backends.ModelBackend\")\n return redirect('account_activated')\n else:\n return render(request, 'email_confirmation_invalid.html')", "title": "" }, { "docid": "c28c42d60e0dce8f3b813fc82e014025", "score": "0.6470399", "text": "def start_email_activation():\n if g.user.email_status == 'active':\n flash(\"Ihre E-Mail-Adresse wurde bereits aktiviert\")\n return redirect(url_for('.edit_profile'))\n code = _generate_activation_code(g.user)\n activation_url = request.url_root.rstrip('/') + url_for('.activate_email', code=code)\n g.user.email_activation_code = code\n db.session.add(g.user)\n db.session.commit()\n email.send_email(g.user.email, \"Aktivieren Sie Ihre E-Mail-Adresse\",\n 'account/emails/activation', {'user': g.user,\n 'url': activation_url})\n flash(u\"Es wurde eine Aktivierungsemail an {} versandt\".format(g.user.email))\n return redirect(url_for('.edit_profile'))", "title": "" }, { "docid": "e7a0259580519cf304c83127744c3147", "score": "0.6460895", "text": "def send_activate_email(self):\n mail_subject = 'Aktywacja konta w IVmonitor.'\n template_name = 'users/email_activate.html'\n\n data = {\n 'user': self.user,\n 'domain': DOMAIN,\n 'uid': urlsafe_base64_encode(force_bytes(self.user.pk)),\n 'token': account_activation_token.make_token(self.user),\n }\n\n self.send_email(mail_subject, template_name, data)", "title": "" }, { "docid": "e62418962f5c6a1fb24605493ad8f494", "score": "0.6322714", "text": "def activate (request, a_key = None ):\n SITE_URL = settings.SITE_URL\n if (a_key == '' or a_key==None):\n\t key_dne = True\n else:\n try:\n\t user_profile = UserProfile.objects.get(activation_key = a_key)\n except ObjectDoesNotExist:\n prof_dne = True\n # try-except-else is actually there! God knows what for... Nested try blocks work just as well...\n else:\n if user_profile.user.is_active == True:\n activated = True\n elif user_profile.key_expires < datetime.datetime.today():\n\t expired = True\n\t user = user_profile.user\n\t user.delete()\n\t user_profile.delete()\n else:\n user = user_profile.user\n user.is_active = True\n user.save()\n request.session[\"registered\"] = True\n activated = True\n return render_to_response('registration/activated.html',locals(), context_instance= global_context(request))", "title": "" }, { "docid": "6810afb28530132d1a87263181ee43e4", "score": "0.62574786", "text": "def activate_account(request, token):\n user = get_object_or_404(User, auth_token=token)\n\n if not user.auth_token_is_used:\n if request.POST:\n form = ActivateAccountForm(request.POST, user=user)\n\n if form.is_valid():\n\n # Activate the user's account\n user.full_name = form.cleaned_data['full_name']\n user.password = make_password(form.cleaned_data['password'])\n user.is_active = True\n\n user.activated_datetime = now()\n user.auth_token_is_used = True\n user.save()\n\n email = user.email\n password = request.POST['password']\n user = authenticate(username=email, password=password)\n login(request, user)\n\n # Redirect to dashboard with welcome message\n request.session['show_welcome'] = True\n return redirect(reverse('dashboard'))\n\n else:\n form = ActivateAccountForm(user=user)\n\n context = {\n 'user': user,\n 'form': form,\n }\n\n else:\n token_is_used = True\n\n context = {\n 'token_is_used': token_is_used,\n }\n\n return render(request, 'accounts/activate_account.html', context)", "title": "" }, { "docid": "6fc1c177e4874a656627cee96fd80f26", "score": "0.62280345", "text": "def test_activation_active(self):\n p = self._profile()\n u = RegisterProfile.objects.activate_profile(p.activation_key)\n ok_(u.is_active)", "title": "" }, { "docid": "bee52878ccd35c77e987df7883ed62c8", "score": "0.62222964", "text": "def activate_subscriber(self, activation_key):\n # Make sure the key we're trying conforms to the pattern of a\n # SHA1 hash; if it doesn't, no point trying to look it up in\n # the database.\n if SHA1_RE.search(activation_key):\n try:\n profile = self.get(activation_key=activation_key)\n except self.model.DoesNotExist:\n return False\n if not profile.activation_key_expired():\n subscriber = profile.subscriber\n subscriber.is_active = True\n subscriber.deactivation_key = profile.activation_key\n subscriber.save()\n profile.activation_key = self.model.ACTIVATED\n profile.save()\n return subscriber\n return False", "title": "" }, { "docid": "84f28e4dfd1da841f3754c3f85f87c7b", "score": "0.6209953", "text": "def activate_user(activation_token):\n # Attempt to decode the token. If decode fails, the token is not valid so return None\n try:\n usr_id = jwt.decode(activation_token, current_app.config['SECRET_KEY'], 'HS256')['activate_user']\n except:\n return None \n\n # Retrieve the user\n sqa_sess = sqa_session()\n usr = sqa_sess.query(User).get(usr_id)\n \n # If user has been deleted\n if usr is None:\n return None\n \n # If user is not Pending - i.e. has been activated previously\n if usr.Status_Pending == False:\n return -1\n \n # Activate user and return user object\n usr.Status_Pending = False\n usr.Status_Active = True\n sqa_sess.commit()\n return usr", "title": "" }, { "docid": "0763deb1811776d9b55c7bfb120a6aeb", "score": "0.6199916", "text": "def test_activate_user(self):\n self.client.force_authenticate(user=self.user)\n\n data = {\n 'activation_token': self.activation_token.key,\n }\n\n response = self.client.post(\n reverse('users_activation'),\n data,\n format='json',\n )\n\n # It's the good user\n self.assertEqual(json.loads(response.content)['id'], self.user.id)\n\n # We read a new time the user to be synchronized\n user_sync = User.objects.get(id=self.user.id)\n\n # The user is now active\n self.assertTrue(user_sync.is_active)\n\n # The token has been removed\n tokens = ActionToken.objects.filter(\n user=user_sync,\n type='account_activation',\n )\n self.assertTrue(len(tokens) == 0)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "title": "" }, { "docid": "ae82f3ac182f37a9160f08feeac48e1d", "score": "0.61574686", "text": "def activate(self): \n self.active = True\n self.activated_on = current_tztime()\n self.save()", "title": "" }, { "docid": "98ed5432683c05d834dda9cf14fb3134", "score": "0.6148894", "text": "def on_user_post_save(sender, instance, created, **kwargs):\n # Setup activation\n if created and not kwargs.get('raw', False):\n ApiKey.objects.get_or_create(user=instance)\n\n panda_users = Group.objects.get(name='panda_user')\n instance.groups.add(panda_users)\n\n salt = sha.new(str(random.random())).hexdigest()[:5]\n activation_key = sha.new(salt + instance.username).hexdigest()\n\n user_profile = UserProfile.objects.create(\n user=instance,\n activation_key=activation_key\n )\n\n email_subject = 'Welcome to PANDA, please activate your account!'\n email_body = 'Hello there, the administrator of your organization\\'s PANDA has signed you up for an account.\\n\\nTo activate your account, click this link:\\n\\nhttp://%s/#activate/%s' % (config_value('DOMAIN', 'SITE_DOMAIN'), user_profile.activation_key)\n\n send_mail(email_subject,\n email_body,\n [instance.email])", "title": "" }, { "docid": "49f08be05ea622df70181603ef8a2edd", "score": "0.61150074", "text": "def test_customer_account_management_v1_activate_by_id_put(self):\n pass", "title": "" }, { "docid": "03742a14e9e82af6cc969c520584190a", "score": "0.61149305", "text": "def activate_user_view(request, code=None, *args, **kwargs):\n # if code:\n # qs = Profile.objects.filter(activation_key=code)\n # if qs.exists() and qs.count() == 1:\n # profile = qs.first()\n # if not profile.activated:\n # user_ = profile.user\n # user_.is_active = True\n # user_.save()\n # profile.activated = True\n # profile.activation_key = None\n # return redirect(\"/login\")\n qs = Profile.objects.all()\n profile = qs.first().order_by('-timestamp')\n user_ = profile.user\n user_.is_active = True\n user_.save()\n profile.activated = True\n profile.activation_key = None\n return redirect(\"/login\")", "title": "" }, { "docid": "dc56c251a579d60922cb97f7cf65a1a3", "score": "0.61026835", "text": "def _send_activation_email(self, request, user, password=None, is_activation=True):\n current_site = get_current_site(request)\n context = {\n 'site_name': current_site.name,\n 'absolute_base_uri': request.build_absolute_uri('/'),\n 'user': user\n }\n if is_activation:\n activation_key = signing.dumps(\n obj=getattr(user, user.USERNAME_FIELD),\n salt=edw_settings.REGISTRATION_PROCESS['registration_salt']\n )\n activation_link = request.build_absolute_uri(reverse('registration_activate', kwargs={'activation_key': activation_key}))\n context.update({\n 'activation_link': activation_link,\n 'expiration_days': edw_settings.REGISTRATION_PROCESS['account_activation_days'],\n })\n\n if password:\n context.update({\n 'password': password\n })\n if six.PY2:\n context = Context(context)\n\n tmp_is_activation = 'send' if not is_activation else 'activate'\n subject = select_template([\n f'{edw_settings.APP_LABEL}/email/{tmp_is_activation}-account-subject.txt',\n f'edw/email/{tmp_is_activation}-account-subject.txt',\n\n ]).render(context)\n # Email subject *must not* contain newlines\n subject = ''.join(subject.splitlines())\n text_message = select_template([\n f'{edw_settings.APP_LABEL}/email/{tmp_is_activation}-account-body.txt',\n f'edw/email/{tmp_is_activation}-account-body.txt',\n\n ]).render(context)\n html_message = select_template([\n f'{edw_settings.APP_LABEL}/email/{tmp_is_activation}-account-body.html',\n f'edw/email/{tmp_is_activation}-account-body.html',\n\n ]).render(context)\n\n user.email_user(subject, text_message, html_message=html_message)", "title": "" }, { "docid": "238370995f564b08d9c165f6698b76d4", "score": "0.6100323", "text": "def Activation(self):\n from tm.system.user.models import Activation\n return Activation", "title": "" }, { "docid": "14d2e790e09870d6e08cf48f26ef4c01", "score": "0.6098996", "text": "def activate_user(self, user):\n user = User.objects.get(email=user['email'])\n user.is_active = True\n user.save()\n return user", "title": "" }, { "docid": "184683b6749d751f5f5b9fc727c32d78", "score": "0.60709405", "text": "def activate_user(self, activation_key, commit=True, user=None):\n # Make sure the key we're trying conforms to the pattern of a\n # SHA1 hash; if it doesn't, no point trying to look it up in\n # the database.\n if SHA1_RE.search(activation_key):\n try:\n if user:\n assert user.activation_key == activation_key\n else:\n user = self.get(activation_key=activation_key)\n except self.model.DoesNotExist:\n return False\n if not user.activation_key_expired():\n user.is_active = True\n user.invite_accepted_on = now()\n user.activation_key = self.ACTIVATED\n if commit:\n user.save()\n return user\n return False", "title": "" }, { "docid": "91f0319326f421153a03d30969924311", "score": "0.6062654", "text": "def test_deactivate_account(self):\n pass", "title": "" }, { "docid": "91f0319326f421153a03d30969924311", "score": "0.6062654", "text": "def test_deactivate_account(self):\n pass", "title": "" }, { "docid": "8c5ba9dddb3443bae2e07a904001e1fd", "score": "0.6053884", "text": "def post_activate(self, session):\n self.activated()", "title": "" }, { "docid": "faf3d765688eef0593ec7b373be00f8a", "score": "0.6048031", "text": "def __withdraw_from_acc(self):\n # this is a flag to let the system knows what type of activity\n # will be recorded in the history file!\n action_done = None\n user_id = input(\"\\nPlease, incert User ID here:\\n->\")\n amount = float(input(\"\\nHow much our client wants to be collected?\\n->\"))\n # Calling for the layout which displays the client info for our\n # staff at the balcony.\n Bank.__acme_layout(self, user_id)\n # The user proceeds with the log-in either way!\n if Bank.__log_in(self) == True:\n # This calls for a method that checks if the user is trying \n # to do a wrong type of operation, for example: trying to \n # collect money from checking account, but only savings \n # exists in the system!\n if Bank.__wrong_operation_acc(self, user_id) != False:\n # Calling for a method that check if the account is\n # currently blocked by too many overdrafts!\n if Bank.__overdraft_blocker_acc(self, user_id) != False:\n # Calling for a method that will check if the user\n # is doing or can do a overdraft!\n if Bank.__overdraft_protection_acc(self, user_id) != False:\n self.__bank_users_data[user_id][3] -= amount\n # Calling the same method again to check if after\n # transaction, the user would cross the protection!\n if Bank.__overdraft_protection_acc(self, user_id) != False:\n Bank.__overdraft_acc_charge(self, user_id)\n Bank.__new_file(self)\n Bank.__update_file(self)\n a = self.__bank_users_data[user_id][3]\n print(\"\\nTransaction succeeded!\")\n print(f\"\\n{amount} was collected from yours Checking Account!\")\n print(f\"\\nYour Checking Account currently holds: {a}\\n\")\n action_done = True\n # Activating the method that stores history!\n if action_done == True:\n action = \"Checking Account withdraw\"\n Bank.__transaction_hist(self, user_id, amount, action)\n Bank.__update_hist_file(self)\n return f\"\\nTransaction succeeded!\\n{amount} was collected from yours Checking Account!\\nYour Checking Account currently holds: {a}\\n\"\n else:\n # If the users cross the overdraft protection\n # line, the money comes back to the account!\n self.__bank_users_data[user_id][3] += amount\n a = self.__bank_users_data[user_id][3]\n print(\"\\nTransaction denied!\")\n print(\"\\nYou cannot have a depbt over -100,00!\")\n print(f\"\\n{amount} was sturned to yours Checking Account!\")\n print(f\"\\nYour Checking Account currently holds: {a}\\n\")\n action_done = False\n if action_done == False:\n action = \"Checking Account withdraw DENIED!\"\n Bank.__transaction_hist(self, user_id, amount, action)\n Bank.__update_hist_file(self)\n return f\"\\nTransaction denied!\\n{amount} was sturned to yours Savings Account!\\nYou cannot have less then 00,00 in your saving account!\\nYour Savings Account currently holds: {a}\\n\"\n else:\n print(\"\\nToo many overdrafts in this account! Operation denied!\\n\")\n else:\n print(\"\\nAccount is blocked by too many overdrafts!\\n\")\n else:\n print(\"\\nThis saving account cannot do this operation, \\\n first you need to create a checking account!\\n\")\n else:\n print(\"\\nWrong credentials, try again!\\n\")", "title": "" }, { "docid": "53193b6c2943fe345e12dd86c165f113", "score": "0.6040077", "text": "def activate(request, activation_key):\n from registration.models import RegistrationProfile\n activation_key = activation_key.lower()\n account = RegistrationProfile.objects.activate_user(activation_key)\n\n if account:\n # ** hack for logging in the user **\n # when the login form is posted, user = authenticate(username=data['username'], password=data['password'])\n # ...but we cannot authenticate without password... so we work-around authentication\n account.backend = settings.AUTHENTICATION_BACKENDS[0]\n _login(request, account)\n try:\n contact = Contact.objects.get(user=account)\n request.session[CUSTOMER_ID] = contact.id\n send_welcome_email(contact.email, contact.first_name, contact.last_name)\n signals.satchmo_registration_verified.send(contact, contact=contact)\n except Contact.DoesNotExist:\n # Treated for better compatibility with registation tests without error\n pass\n\n context = RequestContext(request, {\n 'account': account,\n 'expiration_days': config_value('SHOP', 'ACCOUNT_ACTIVATION_DAYS'),\n })\n return render_to_response('registration/activate.html',\n context_instance=context)", "title": "" }, { "docid": "9e30d3ede7b86fcb861cdd1455e04fad", "score": "0.6037292", "text": "def activate_user(self, username):\n cursor = self.get_cursor()\n sql = \"UPDATE accounts_user SET is_active = TRUE WHERE username = %s\"\n safe_execute(cursor, sql, (username,))", "title": "" }, { "docid": "a1c71fe9d710535d18bde8cf51a9f663", "score": "0.60262126", "text": "def post(self, request):\n username = request.POST['username']\n first_name = request.POST['first_name']\n last_name = request.POST['last_name']\n email = request.POST['email']\n password = request.POST['password']\n\n\n context = {\n 'fieldValues': request.POST\n }\n\n if not User.objects.filter(username=username).exists():\n if not User.objects.filter(email=email).exists():\n if len(password) < 6:\n messages.error(request, 'Password too short')\n return render(request, 'userpages/register.html', context)\n\n user = User.objects.create_user(\n username=username,\n first_name=first_name,\n last_name=last_name,\n email=email\n )\n user.set_password(password)\n user.is_active = False\n user.save()\n current_site = get_current_site(request)\n email_body = {\n 'user': user,\n 'domain': current_site.domain,\n 'uid': urlsafe_base64_encode(force_bytes(user.pk)),\n 'token': account_activation_token.make_token(user),\n }\n\n link = reverse('activate', kwargs={\n 'uidb64': email_body['uid'], 'token': email_body['token']})\n\n email_subject = 'Activate your account'\n\n activate_url = 'http://'+current_site.domain+link\n\n email = EmailMessage(\n email_subject,\n 'Hi '+user.username + ', Please the link below to activate your account \\n'+activate_url,\n '[email protected]',\n [email],\n )\n email.send(fail_silently=False)\n messages.success(request, f\"An Account activation link has ben sent to {user.email}\")\n return redirect('login')\n\n return render(request, 'userpages/register.html')", "title": "" }, { "docid": "4aca818c16a4474c6c31ba0879dca6a5", "score": "0.6019487", "text": "def save_account(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "91a10c72356691aa1d73afd47a1c2b14", "score": "0.5971067", "text": "def ActivateView(request, **kwargs):\n # if user is associated with given uuid\n try:\n url_uuid = kwargs.pop('uuid')\n url_email = request.GET.get('q', '')\n user = AccountUser.objects.get(uuid=url_uuid)\n except:\n return AccountError(request, 'activate')\n\n # uuid and email match user to prevent random email reset\n if user.email != url_email:\n return AccountError(request, 'activate')\n\n user.is_activated = True\n user.uuid = uuid.uuid4()\n user.save()\n\n return render(request, 'activate.html', {'email': user.email})", "title": "" }, { "docid": "a27c396cd14de63ab8a04500ca30ba61", "score": "0.5966495", "text": "def disable_account(self):\n pass", "title": "" }, { "docid": "6ae5622548475baab2594f72fb1c4b5c", "score": "0.59624034", "text": "def activate_view(request, activation_key=None):\n if activation_key:\n account = Account.objects.activate(activation_key)\n if account:\n # activated\n return HttpResponseRedirect(reverse('activate_done'))\n else:\n # activate failed\n data = {'activate_failed': True}\n return render(request, 'sfaccount/activate.html', data)\n else:\n # ask user for the 'activation_key'\n activate_key_url = reverse('activate_key',\n kwargs={'activation_key': 'XXX'})\n activate_key_url = re.sub(r'XXX/$', '', activate_key_url)\n data = {'activate_key_url': activate_key_url}\n return render(request, 'sfaccount/activate.html', data)", "title": "" }, { "docid": "b1c730cdb590cd5616e4a1605edfc595", "score": "0.59463924", "text": "def activate_account(accountname):\n\n try:\n with session_scope() as session:\n mgr = manager_factory.for_session(session)\n verify_account(accountname, mgr)\n\n result = mgr.activate_account(accountname)\n if result:\n return account_db_to_status_msg(result), 200\n else:\n return make_response_error('Error updating account state'), 500\n except AccountNotFoundError as ex:\n return make_response_error('Account not found', in_httpcode=404), 404\n except Exception as e:\n logger.exception('API Error')\n return make_response_error('Error activating account', in_httpcode=500), 500", "title": "" }, { "docid": "8584d78c52d2d5eec8130da14744be96", "score": "0.5895835", "text": "def send_activation_email(self, temp_password=None):\n ctx_dict = {\n 'activation_key': self.activation_key,\n 'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS,\n 'temp_password': temp_password,\n 'user': self\n }\n #subject = render_to_string('registration/activation_email_subject.txt',\n # ctx_dict)\n ## Email subject *must not* contain newlines\n #subject = ''.join(subject.splitlines())\n subject = 'Holiday - invitation'\n \n message = render_to_string('registration/activation_email.txt',\n ctx_dict)\n \n self.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)\n self.invite_sent_on = now()\n self.save()", "title": "" }, { "docid": "1d8632a6521c41ff027d786496b16716", "score": "0.5895187", "text": "def check_activated(self, auto_resolve=False):\n try:\n self.load(auto_resolve)\n except (Exception, RuntimeError) as e:\n _exit(str(e))\n\n if not self.activation.is_valid:\n _exit('Activation code/email not valid.')", "title": "" }, { "docid": "24e597dd8c09fc022fef50dcd940cf63", "score": "0.58895934", "text": "def admin_account( self ):\n raise NotImplementedError( )", "title": "" }, { "docid": "f757cc94ef525517dbe427e676851857", "score": "0.5883301", "text": "def activate(self, request, pk=None):\n\t\tif request.method != 'PUT':\n\t\t\traise BinderMethodNotAllowed()\n\n\t\tself._require_model_perm('activate', request)\n\n\t\tdecoded = request.body.decode()\n\t\ttry:\n\t\t\tbody = json.loads(decoded)\n\t\texcept ValueError:\n\t\t\traise BinderRequestError(_('Invalid request body: not a JSON document.'))\n\n\t\terrors = {}\n\t\tfor item in ['activation_code']:\n\t\t\tif body.get(item) is None:\n\t\t\t\terrors[item] = ['missing']\n\t\tif len(errors) != 0:\n\t\t\traise BinderValidationError(errors)\n\n\t\ttry:\n\t\t\tuser = self.model._default_manager.get(pk=pk)\n\t\texcept (TypeError, ValueError, OverflowError, self.model.DoesNotExist):\n\t\t\tuser = None\n\t\tif user is None or not self.token_generator.check_token(user, body.get('activation_code')):\n\t\t\traise BinderNotFound()\n\n\t\tlogger.info('login for {}/{} via successful activation'.format(user.id, user))\n\n\t\tuser.is_active = True\n\t\tuser.save()\n\t\tself.auth_login(request, user)\n\t\treturn self.respond_with_user(request, user.id)", "title": "" }, { "docid": "f07c484db412830092b7340de71ccfcd", "score": "0.5882116", "text": "def create_email_activation_token(self, user):\n activation = self.Activation()\n activation_token_expiry_seconds = int(self.registry.settings.get(\"tm.registry.activation_token_expiry_seconds\", 24 * 3600))\n activation.expires_at = now() + timedelta(seconds=activation_token_expiry_seconds)\n\n self.dbsession.add(activation)\n self.dbsession.flush()\n user.activation = activation\n return activation.code, activation_token_expiry_seconds", "title": "" }, { "docid": "95d70bab8045755b16a8aed907ec0529", "score": "0.5849475", "text": "def activate_users(self, request, queryset):\n for profile in queryset:\n RegistrationProfile.objects.activate_user(profile.activation_key)", "title": "" }, { "docid": "95d70bab8045755b16a8aed907ec0529", "score": "0.5849475", "text": "def activate_users(self, request, queryset):\n for profile in queryset:\n RegistrationProfile.objects.activate_user(profile.activation_key)", "title": "" }, { "docid": "95d70bab8045755b16a8aed907ec0529", "score": "0.5849475", "text": "def activate_users(self, request, queryset):\n for profile in queryset:\n RegistrationProfile.objects.activate_user(profile.activation_key)", "title": "" }, { "docid": "22557e146ac1bf6fb70d24eedae41cd8", "score": "0.5819725", "text": "def __withdraw_from_acc(self):\n action_done = None\n amount = float(input(\"How much you want to be collected?\\n->\"))\n User.__acme_layout(self)\n if User.__log_in(self) == True:\n if User.__wrong_operation_acc(self, self.__users_id) != False:\n if User.__overdraft_blocker_acc(self, self.__users_id) != False:\n if User.__overdraft_protection_acc(self, self.__users_id) != False:\n self.__bank_users_data[self.__users_id][3] -= amount\n if User.__overdraft_protection_acc(self, self.__users_id) != False:\n User.__overdraft_acc_charge(self, self.__users_id)\n User.__new_file(self)\n User.__update_file(self)\n a = self.__bank_users_data[self.__users_id][3]\n print(\"\\nTransaction succeeded!\")\n print(f\"\\n{amount} was collected from yours Checking Account!\")\n print(f\"\\nYour Checking Account currently holds: {a}\\n\")\n action_done = True\n if action_done == True:\n action = \"Checking Account withdraw\"\n User.__transaction_hist(self, self.__users_id, amount, action)\n User.__update_hist_file(self)\n return f\"\\nTransaction succeeded!\\n{amount} was collected from yours Checking Account!\\nYour Checking Account currently holds: {a}\\n\"\n else:\n self.__bank_users_data[self.__users_id][3] += amount\n a = self.__bank_users_data[self.__users_id][3]\n print(\"\\nTransaction denied!\")\n print(\"\\nYou cannot have a depbt over -100,00!\")\n print(f\"\\n{amount} was sturned to yours Checking Account!\")\n print(f\"\\nYour Checking Account currently holds: {a}\\n\")\n action_done = False\n if action_done == False:\n action = \"Checking Account withdraw DENIED!\"\n User.__transaction_hist(self, self.__users_id, amount, action)\n User.__update_hist_file(self)\n return f\"\\nTransaction denied!\\n{amount} was sturned to yours Savings Account!\\nYou cannot have less then 00,00 in your saving account!\\nYour Savings Account currently holds: {a}\\n\"\n else:\n print(\"\\nToo many overdrafts in this account! Operation denied!\\n\")\n else:\n print(\"\\nAccount is blocked by too many overdrafts!\\n\")\n else:\n print(\"\\nThis saving account cannot do this operation, \\\n first you need to create a checking account!\\n\")\n else:\n print(\"\\nWrong credentials, try again!\\n\")", "title": "" }, { "docid": "feb79f6a972c91a2fed8a352daf865ed", "score": "0.5812795", "text": "def test_activation_successful(self):\n response = self.client_stub.post('/register/', self.form_data)\n activation_hash_url = response.context[0]['activation_hash_url']\n activation_hash = activation_hash_url.split('/')[-1]\n response = self.client.get('/activation/%s' % activation_hash)\n self.assertEquals(response.templates[0].name,\n 'authentication/activation_successful.html')", "title": "" }, { "docid": "fd16beae0d5a80d87df2771f52026323", "score": "0.57934165", "text": "def respond_user_inactive(self, request, user):\n try:\n email_address = EmailAddress.objects.get(\n user=user,\n email=user.email)\n self.add_message(\n request,\n messages.INFO,\n 'account/messages/'\n 'email_confirmation_sent.txt',\n {'email': user.email})\n email_address.send_confirmation(request)\n except EmailAddress.DoesNotExist:\n pass\n\n return super(AccountAdapter, self).respond_user_inactive(request, user)", "title": "" }, { "docid": "73e4d0e88015d194aba9a468bf9bc08d", "score": "0.5784319", "text": "def send_activation_mail_view(request,\n template_name='sfaccount/activate_send_mail.html'):\n if request.method == \"POST\":\n form = SFEmailForm(request.POST)\n if form.is_valid():\n email = form.cleaned_data['email']\n accounts = Account.objects.filter(user__email__iexact=email)\n for account in accounts:\n account.send_activation_email(async=False)\n return HttpResponseRedirect(reverse('activate'))\n else:\n form = SFEmailForm()\n context = {\n 'form': form,\n 'title': u\"重发激活邮件\",\n }\n return TemplateResponse(request, template_name, context)", "title": "" }, { "docid": "ace180da0047934e9b407253e3de8b9a", "score": "0.57800466", "text": "def touch_account(self, address: Address) -> None:\n ...", "title": "" }, { "docid": "ace180da0047934e9b407253e3de8b9a", "score": "0.57800466", "text": "def touch_account(self, address: Address) -> None:\n ...", "title": "" }, { "docid": "82c8c87897b5f9eaa442474a022a6e30", "score": "0.57708555", "text": "def send_activation_email(email,host,activation_key):\n from django.core.mail import EmailMultiAlternatives\n\n subject, from_email, to = 'Crowdsourcing Account Activation', settings.EMAIL_SENDER, email\n activation_url = 'https://'+ host + '/account-activation/' +activation_key\n text_content = 'Hello, \\n ' \\\n 'Activate your account by clicking the following link: \\n' + activation_url +\\\n '\\nGreetings, \\nCrowdsourcing Team'\n\n\n html_content = '<h3>Hello,</h3>' \\\n '<p>Activate your account by clicking the following link: <br>' \\\n '<a href=\"'+activation_url+'\">'+activation_url+'</a></p>' \\\n '<br><br> Greetings,<br> <strong>crowdresearch App Team</strong>'\n msg = EmailMultiAlternatives(subject, text_content, from_email, [to])\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()", "title": "" }, { "docid": "a14ddf1847cd11f60e872597e9d9fb57", "score": "0.57546717", "text": "def activate(self, actCode):\r\n\r\n data = '{\"activationCode\": \"' + actCode + '\"}'\r\n\r\n try:\r\n req = urllib2.Request(self._urlAct, data, self.HEADERS_ACT)\r\n response = urllib2.urlopen(req)\r\n except Exception:\r\n raise\r\n else:\r\n result = json.loads(response.read())\r\n if result[\"status\"] == \"created\":\r\n self.__initData(result[\"thingToken\"])\r\n return True\r\n else:\r\n print(result[\"message\"])\r\n return False", "title": "" }, { "docid": "25338204642cefb8ca08f5c00695b72c", "score": "0.5736472", "text": "def get(self, request):\n token = request.GET.get('token')\n try:\n payload = jwt.decode(token, settings.SECRET_KEY)\n user = User.objects.get(id=payload['user_id'])\n if not user.is_active:\n\n user.is_active = True\n user.save()\n logging.debug('user activation successful')\n return Response({'email': 'Successfully activated'}, status=status.HTTP_200_OK)\n except jwt.ExpiredSignatureError as identifier:\n logging.exception('Exception due to expired signature')\n return Response({'error': 'Activation Expired'}, status=status.HTTP_400_BAD_REQUEST)\n except jwt.exceptions.DecodeError as identifier:\n logging.exception('Exception due to error in decoding')\n return Response({'error': 'Invalid token'}, status=status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "941e70468269f1a0f7b3c920ce94e001", "score": "0.572", "text": "def test_user_account_settings_deactivate_account(self, driver, session):\n settings_page = user.AccountSettingsPage(driver)\n settings_page.goto()\n assert user.AccountSettingsPage(driver, verify=True)\n\n # Scroll down to the Deactivate account section at the bottom of the page and\n # click the Request deactivation button\n settings_page.scroll_into_view(\n settings_page.request_deactivation_button.element\n )\n settings_page.request_deactivation_button.click()\n\n # First click the Cancel button on the Confirmation modal\n settings_page.confirm_deactivation_modal.cancel_button.click()\n assert settings_page.pending_deactivation_message.absent()\n\n # Click the Request deactivation button again and this time click the Request\n # button on the modal\n settings_page.request_deactivation_button.click()\n settings_page.confirm_deactivation_modal.request_button.click()\n\n # Verify that the account pending deactivation message is now displayed\n assert settings_page.pending_deactivation_message.present()\n assert (\n settings_page.pending_deactivation_message.text\n == 'Your account is currently pending deactivation.'\n )\n\n # Click the Undo deactivation request button\n settings_page.undo_deactivation_request_button.click()\n\n # First click Cancel on the Undo Deactivation Request modal\n settings_page.undo_deactivation_modal.cancel_button.click()\n assert settings_page.pending_deactivation_message.present()\n assert (\n settings_page.pending_deactivation_message.text\n == 'Your account is currently pending deactivation.'\n )\n\n # Click the Undo deactivation request button again and this time click the\n # Undo deactivation request button on the modal\n settings_page.undo_deactivation_request_button.click()\n settings_page.undo_deactivation_modal.undo_request_button.click()\n assert settings_page.pending_deactivation_message.absent()\n assert settings_page.request_deactivation_button.present()", "title": "" }, { "docid": "a14f33e2b7ae8ab53bd09f89ff38bcb0", "score": "0.57106584", "text": "def receive_activation(self, owner_actor):\n return None", "title": "" }, { "docid": "48b9c30fd9218a851235984faf498a51", "score": "0.56970227", "text": "def test_activation_password(self):\n p = self._profile()\n u = RegisterProfile.objects.activate_profile(p.activation_key)\n ok_(u.check_password('asdf1234'))", "title": "" }, { "docid": "23699e6c77287555a02cb5615194a287", "score": "0.5695543", "text": "def activate_user(\n strategy, backend, user=None, is_new=False, **kwargs\n): # pylint: disable=unused-argument\n if user.is_active:\n return {}\n\n export_inquiry = compliance_api.get_latest_exports_inquiry(user)\n\n # if the user has an export inquiry that is considered successful, activate them\n if not compliance_api.is_exports_verification_enabled() or (\n export_inquiry is not None and export_inquiry.is_success\n ):\n user.is_active = True\n user.save()\n\n return {}", "title": "" }, { "docid": "67e34fb616b48d4dedf6f1cdeab79c78", "score": "0.569067", "text": "def activate_user(UserId=None):\n pass", "title": "" }, { "docid": "5889f714bc1530471e922dd2c1b1b1ef", "score": "0.56905705", "text": "def send_activation(user):\n token = dump_token(user.email, salt='token-activate')\n secret_url = url_for(\n 'token.activate', token=token, _external=True\n )\n\n msg = Message()\n msg.subject = 'Lerkeveld Underground - Activeer Account'\n msg.add_recipient(user.email)\n msg.body = render_template(\n 'emails/activation.txt', user=user, secret_url=secret_url\n )\n msg.html = render_template(\n 'emails/activation.html', user=user, secret_url=secret_url\n )\n asyncio.run(send_async_email(msg))", "title": "" }, { "docid": "b0eb7c8a415628bc75fe3b7775db7cc9", "score": "0.5690322", "text": "def add_account(self):\n \n print(\"Account number \"+ self.account_number +\" has Kshs.\"+ str(self.account_balance) + \" only as balance\")", "title": "" }, { "docid": "17cbadd634e44b1dcb4379b190c2920e", "score": "0.5683946", "text": "def activate_user(self, verification_key,\n username=None, password=None, full_name=None):\n #pylint:disable=too-many-arguments\n at_time = datetime_or_now()\n try:\n token = self.get_token(verification_key=verification_key)\n if token:\n token_username = (\n token.user.username if token.user else token.slug)\n LOGGER.info('active user %s through code: %s ...',\n token_username, verification_key,\n extra={'event': 'activate', 'username': token_username,\n 'verification_key': verification_key,\n 'email_verification_key': token.email_verification_key,\n 'phone_verification_key': token.phone_verification_key})\n user_model = get_user_model()\n with transaction.atomic():\n if token.email_verification_key == verification_key:\n token.email_verification_key = None\n token.email_verified_at = at_time\n elif token.phone_verification_key == verification_key:\n token.phone_verification_key = None\n token.phone_verified_at = at_time\n if not token.user:\n try:\n token.user = user_model.objects.get(\n email__iexact=token.email)\n except user_model.DoesNotExist:\n token.user = user_model.objects.create_user(\n username, email=token.email,\n password=password)\n token.save()\n previously_inactive = has_invalid_password(token.user)\n needs_save = False\n if full_name:\n token.full_name = full_name\n #pylint:disable=unused-variable\n first_name, mid_name, last_name = \\\n full_name_natural_split(full_name)\n token.user.first_name = first_name\n token.user.last_name = last_name\n LOGGER.info('%s (first_name, last_name) needs '\\\n 'to be saved as (\"%s\", \"%s\")', verification_key,\n token.user.first_name, token.user.last_name)\n needs_save = True\n if username:\n token.user.username = username\n LOGGER.info('%s username needs to be saved as \"%s\"',\n verification_key, token.user.username)\n needs_save = True\n if password:\n token.user.set_password(password)\n LOGGER.info('%s password needs to be saved',\n verification_key)\n needs_save = True\n if not token.user.is_active:\n token.user.is_active = True\n LOGGER.info('%s user needs to be activated',\n verification_key)\n needs_save = True\n if needs_save:\n token.user.save()\n return token.user, previously_inactive\n except Contact.DoesNotExist:\n pass # We return None instead here.\n return None, None", "title": "" }, { "docid": "e01574a836fb24387b4fa8bbcdb97a13", "score": "0.5681267", "text": "def send_activation_email(self, request):\n\t\tif request.method != 'PUT':\n\t\t\traise BinderMethodNotAllowed()\n\n\t\t# For lack of a better check\n\t\tself._require_model_perm('reset_password', request)\n\n\t\tdecoded = request.body.decode()\n\t\ttry:\n\t\t\tbody = json.loads(decoded)\n\t\texcept ValueError:\n\t\t\traise BinderRequestError(_('Invalid request body: not a JSON document.'))\n\n\t\tlogger.info('activation email attempt for {}'.format(body.get('email', '')))\n\n\t\tif body.get('email') is None:\n\t\t\traise BinderValidationError({'email': ['missing']})\n\n\t\ttry:\n\t\t\tuser = self.model._default_manager.get(email=body.get('email'))\n\t\texcept self.model.DoesNotExist:\n\t\t\traise BinderNotFound()\n\n\t\tif user.is_active:\n\t\t\tif user.last_login is None:\n\t\t\t\t# TODO: Figure out a way to make this customisable without\n\t\t\t\t# allowing injection of arbitrary URLs (phishing!)\n\t\t\t\tself._send_activation_email(request, user)\n\t\t\t\tresponse = JsonResponse({'code': 'sent'})\n\t\t\t\tresponse.status_code = 201\n\t\t\telse:\n\t\t\t\tresponse = JsonResponse({'code': 'already active'})\n\t\telse:\n\t\t\tresponse = JsonResponse({'code': 'blacklisted'})\n\t\t\tresponse.status_code = 400\n\n\t\treturn response", "title": "" }, { "docid": "4701c1cf9fb51ea1ad6fb94a2624b0f4", "score": "0.5670594", "text": "def activate(self):\n pass", "title": "" }, { "docid": "41cd1553c0e9279d724a86196b6e5665", "score": "0.5656939", "text": "def _activation(self, scan):\n raise NotImplementedError()", "title": "" }, { "docid": "43fd7a2df943bbef4d8b6905435d5cf3", "score": "0.5649862", "text": "def activate(username):\n user = User.get_by_username(username)\n\n if not user:\n click.echo('User does not exist')\n return\n\n if user.is_active:\n click.echo('User is already active')\n return\n\n user.is_active = True\n\n try:\n correct = True\n db.session.commit()\n\n click.echo('User activated')\n\n except Exception as e:\n # Catch anything unknown\n correct = False\n\n click.echo('Error activating user')\n click.echo(e)\n\n finally:\n if not correct:\n # Cleanup\n db.session.rollback()", "title": "" }, { "docid": "d2bd62919b100d60e8c99acf6ae5a7f2", "score": "0.56491864", "text": "def update_account(self, force=False):\n pass", "title": "" }, { "docid": "ff57f35ea7f3b606ac1a20017f8f69e5", "score": "0.5631858", "text": "def load_account(self):\n pass", "title": "" }, { "docid": "80d50d7c3fd6d34513a5dde72e979da9", "score": "0.5609027", "text": "def activate(self) -> None:\n pass", "title": "" }, { "docid": "7b370d1d02d293de817480352fefdd0a", "score": "0.5604695", "text": "def activatePass(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "bebd810ddb2433a74fc960e9de4e9090", "score": "0.56015974", "text": "def confirm_email_verify(self, app_name, activation_key):\n # Make sure the key we're trying conforms to the pattern of a\n # SHA1 hash; if it doesn't, no point trying to look it up in\n # the database.\n if SHA1_RE.search(activation_key):\n try:\n user = self.get(app__name = app_name, activation_key=activation_key)\n except self.model.DoesNotExist:\n return False\n if not user.activation_key_expired():\n user.email_verified = True\n user.activation_key = self.model.ACTIVATED\n user.save()\n return user\n return False", "title": "" }, { "docid": "d2c16952a0693b571c073d9f6175d8aa", "score": "0.55846274", "text": "def mark_an_account_as_active(self, account_id):\n data = {\n 'JSONString': ''\n }\n url = base_url + account_id + '/active'\n resp = zoho_http_client.post(url, self.details, self.headers, data)\n return parser.get_message(resp)", "title": "" }, { "docid": "989292f15c0b44c0d5b60c0f044312c0", "score": "0.55774647", "text": "def generate_activation_link(user, request=None, send=True):\n token = default_token_generator.make_token(user)\n uidb64 = urlsafe_base64_encode(\n force_bytes(user.username)).decode()\n domain = 'https://{}'.format(get_current_site(request))\n subject = 'Author\\'s Heaven account email verification'\n route = \"api/activate\"\n url = \"{}/{}/{}/{}/\".format(domain, route, token, uidb64)\n message = 'Please follow the following link to activate your account \\n {}'.format(\n url)\n from_email = settings.EMAIL_HOST_USER\n to_list = [user.email]\n if send:\n send_mail(\n subject=subject,\n from_email=from_email,\n recipient_list=to_list,\n message=message,\n fail_silently=False)\n\n return token, uidb64", "title": "" }, { "docid": "c5a5fb6abf65554a84231c2542f82394", "score": "0.55761594", "text": "def test_account_registration(self):\n User = get_user_model() # pylint: disable=invalid-name\n email = '[email protected]'\n password = 's4f3passw0rd!'\n\n get_response = self.client.get(reverse('registration_register'))\n self.assertEqual(200, get_response.status_code)\n self.assertIsInstance(get_response.context['form'], UserCreationForm)\n self.assertTemplateUsed('registration/registration_form.html')\n self.assertTemplateUsed('base.html')\n\n post_response = self.client.post(\n reverse('registration_register'),\n data={\n 'email': email,\n 'password1': password,\n 'password2': password,\n },\n )\n self.assertRedirects(post_response, reverse('registration_complete'))\n self.assertTrue(\n User.objects.filter(email=email).exists())\n user = User.objects.get(email=email)\n self.assertTrue(user.check_password(password))\n self.assertFalse(user.is_active)\n self.assertFalse(self.client.login(username=email, password=password))\n\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].to, [email])\n self.assertEqual(\n mail.outbox[0].subject, 'Account activation on testserver')\n urlmatch = re_search(\n r'https?://[^/]*(/.*activate/\\S*)',\n mail.outbox[0].body)\n self.assertIsNotNone(urlmatch, 'No URL found in sent email')\n url_path = urlmatch.groups()[0]\n self.assertEqual(\n reverse('registration_activate',\n kwargs={'activation_key': url_path.split('/')[3]}),\n url_path)\n activation_get_response = self.client.get(url_path)\n self.assertRedirects(\n activation_get_response,\n reverse('registration_activation_complete'))\n # reload user from DB\n user = User.objects.get(email=email)\n self.assertTrue(user.is_active)\n self.assertTrue(self.client.login(username=email, password=password))", "title": "" }, { "docid": "c647c7b3b2b8332e275f0253a8952753", "score": "0.5572505", "text": "def Activated(self):\r\n return", "title": "" }, { "docid": "f80cb2ccc84cc0b5b4d24dcde6e9be48", "score": "0.5569628", "text": "def activate(self):\n \n pass", "title": "" }, { "docid": "2036e58d279633e0c3e1acccfe7e2b02", "score": "0.5568924", "text": "def activate(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "799bac5db1722451f7ee1288f904afdc", "score": "0.5567429", "text": "def test_active_account(self):", "title": "" }, { "docid": "d5385944c2b2fe7a29b0e420abc70b4b", "score": "0.55596393", "text": "def activate(self, sid):\n return self.update(sid, status=Account.ACTIVE)", "title": "" }, { "docid": "e1ff5c82855fe3da2673cfa7afc3f713", "score": "0.554876", "text": "def send_activated_email(user_email, user_email_hash):\r\n send_email('You\\'re all set to access our benchmarks!',\r\n [user_email],\r\n 'activated-email.html',\r\n {'title': 'You\\'re all set to access our benchmarks!',\r\n 'email_hash': user_email_hash})", "title": "" }, { "docid": "4a303b158cf75be115c8a3ed49680d03", "score": "0.5545594", "text": "def activation_key_expired(self):\n expiration_date = datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS)\n return self.activation_key == RegistrationManager.ACTIVATED or \\\n (self.date_joined + expiration_date <= datetime_now())", "title": "" }, { "docid": "9af898fd15648e4bd6437d195a7e332f", "score": "0.5545312", "text": "def _handle_account_status(self, param, disable=False):\n action_result = self.add_action_result(ActionResult(dict(param)))\n summary = action_result.update_summary({})\n actstr = \"disabled\" if disable else \"enabled\"\n if not self._ldap_bind():\n return RetVal(action_result.set_status(phantom.APP_ERROR))\n\n user = param['user'].lower()\n ar_data = {}\n\n # let the analyst use samaccountname if they wish\n if param[\"use_samaccountname\"]:\n user_info = self._sam_to_dn([user])\n ar_data[\"samaccountname\"] = user\n if user_info[user] is False:\n return RetVal(action_result.set_status(\n phantom.APP_ERROR,\n \"No users found.\"\n ))\n else:\n ar_data[\"user_dn\"] = user_info[user]\n ar_data[\"samaccountname\"] = user\n user = user_info[user]\n else:\n ar_data[\"user_dn\"] = user\n\n try:\n query_params = {\n \"attributes\": \"useraccountcontrol\",\n \"filter\": \"(distinguishedname={})\".format(user)\n }\n resp = json.loads(self._query(query_params))\n if len(resp['entries']) == 0:\n return RetVal(action_result.set_status(\n phantom.APP_ERROR,\n \"No user found.\"\n ))\n\n uac = int(resp['entries'][0]['attributes']['userAccountControl'])\n\n # capture the original status for logging\n init_status = \"disabled\" if (uac & 0x02 != 0) else \"enabled\"\n ar_data[\"starting_status\"] = init_status\n\n if disable:\n mod_uac = uac | 0x02\n else: # enable\n mod_uac = uac & (0xFFFFFFFF ^ 0x02)\n\n res = self._ldap_connection.modify(\n user, {'userAccountControl': [\n (ldap3.MODIFY_REPLACE, [mod_uac])\n ]})\n if not res:\n return RetVal(action_result.set_status(\n phantom.APP_ERROR,\n self._ldap_connection.result,\n ))\n except Exception as e:\n self.debug_print(\"[DEBUG] disable_account error = {}\".format(e))\n return RetVal(action_result.set_status(\n phantom.APP_ERROR,\n \"\",\n exception=e\n ))\n\n summary['account_status'] = actstr\n action_result.add_data(ar_data)\n return RetVal(action_result.set_status(\n phantom.APP_SUCCESS\n ))", "title": "" }, { "docid": "1ff59aa2c74ccd7ac8fe185fd41c85cd", "score": "0.5537645", "text": "def activate(request, klass, id):\n if not request.user.is_authenticated():\n url = reverse(klass.__name__.lower() + '_activate', args=[id])\n return HttpResponseRedirect(reverse('user_signin') + '?next=' + url)\n\n obj = klass.get_object(id)\n if not obj: raise Http404\n if not obj.can_activate(request.user):\n return HttpResponseForbidden()\n\n obj.is_public = True\n obj.save()\n obj.current=klass.set_current(obj)\n obj.save()\n\n return HttpResponseRedirect(obj.get_absolute_slugurl())", "title": "" }, { "docid": "41f843d637333cb2e373cc8d11ea81aa", "score": "0.553406", "text": "def confirm(self):\n\n self.confirmed_at = datetime.datetime.now()\n self.confirmed = True\n self.active = True\n self.save()", "title": "" }, { "docid": "473093b1ac96ebc17b59327921145fc2", "score": "0.5516017", "text": "def _create_new_account(self):\n\n self.generated_password = self._generate_password()\n self._send_registration_mail()\n\n self._account_store.create_account(\n self.username_given,\n self.generated_password,\n self.email_given)\n\n self.session.msg(\n \"Your account has been created, and your randomly \"\n \"generated password has been sent to %s. Please \"\n \"check your mail, find the password, and re-connect.\\n\" %\n self.email_given)\n self.session.msg(\"Disconnecting...\")\n self.session.disconnect_client()", "title": "" }, { "docid": "6e0ec652be86e757b7e9dae604d77470", "score": "0.5509", "text": "def register_account_for_deletion(self, beneficiary: Address) -> None:\n ...", "title": "" }, { "docid": "3b01775bf4e8c9e73acef8103e229d74", "score": "0.5494275", "text": "def update_account():\n add_entities(account, async_add_entities, tracked)", "title": "" }, { "docid": "be8abc231811e7d37c3426b307317ad1", "score": "0.5493961", "text": "def get_when_not_logged_in(self):\n code = self.request.matchdict.get(\"code\")\n id_ = self.request.matchdict.get(\"id\")\n\n try:\n id_ = int(id_)\n except ValueError as err:\n raise httpexceptions.HTTPNotFound() from err\n\n activation = models.Activation.get_by_code(self.request.db, code)\n if activation is None:\n self.request.session.flash(\n jinja2.Markup(\n _(\n \"We didn't recognize that activation link. \"\n \"Have you already activated your account? \"\n \"If so, try logging in using the username \"\n \"and password that you provided.\"\n ),\n ),\n \"error\",\n )\n return httpexceptions.HTTPFound(location=self.request.route_url(\"login\"))\n\n user = models.User.get_by_activation(self.request.db, activation)\n if user is None or user.id != id_:\n raise httpexceptions.HTTPNotFound()\n\n user.activate()\n\n self.request.session.flash(\n jinja2.Markup(\n _(\n \"Your account has been activated! \"\n \"You can now log in using the password you provided.\"\n ),\n ),\n \"success\",\n )\n\n self.request.registry.notify(ActivationEvent(self.request, user))\n\n return httpexceptions.HTTPFound(\n location=self.request.route_url(\"login\", _query={\"username\": user.username})\n )", "title": "" }, { "docid": "e42e1002ce76c3bf6dceae8d1c2316a4", "score": "0.5486973", "text": "def confirm_registration_view(request, user_id, token):\n user_id = force_text(urlsafe_base64_decode(user_id))\n user = User.objects.get(pk=user_id)\n\n if user and account_activation_token.check_token(user, token):\n user.is_active = True\n user.save()\n return redirect('accounts:login')", "title": "" }, { "docid": "74a4f5259a3e6e41e8230ec08e8eff75", "score": "0.54833215", "text": "def account_moid(self, account_moid):\n\n self._account_moid = account_moid", "title": "" }, { "docid": "74a4f5259a3e6e41e8230ec08e8eff75", "score": "0.54833215", "text": "def account_moid(self, account_moid):\n\n self._account_moid = account_moid", "title": "" }, { "docid": "74a4f5259a3e6e41e8230ec08e8eff75", "score": "0.54833215", "text": "def account_moid(self, account_moid):\n\n self._account_moid = account_moid", "title": "" }, { "docid": "da8a1b96ec79786d09e31ae810510d14", "score": "0.547247", "text": "def Account(self,\n acc_idx: int) -> Bip44Base:\n pass", "title": "" }, { "docid": "4220c9e4e67097400714732ce9a56c83", "score": "0.5470419", "text": "def _deactivate(self):\n self.active = False\n self.date_deactivated = timezone.now()", "title": "" }, { "docid": "d2166e6960ef759804251a3967a0f69b", "score": "0.5462767", "text": "def resend_activation_email(self, request, queryset):\n if Site._meta.installed:\n site = Site.objects.get_current()\n else:\n site = RequestSite(request)\n\n for profile in queryset:\n if not profile.activation_key_expired():\n profile.send_activation_email(site)", "title": "" }, { "docid": "d2166e6960ef759804251a3967a0f69b", "score": "0.5462767", "text": "def resend_activation_email(self, request, queryset):\n if Site._meta.installed:\n site = Site.objects.get_current()\n else:\n site = RequestSite(request)\n\n for profile in queryset:\n if not profile.activation_key_expired():\n profile.send_activation_email(site)", "title": "" }, { "docid": "d2166e6960ef759804251a3967a0f69b", "score": "0.5462767", "text": "def resend_activation_email(self, request, queryset):\n if Site._meta.installed:\n site = Site.objects.get_current()\n else:\n site = RequestSite(request)\n\n for profile in queryset:\n if not profile.activation_key_expired():\n profile.send_activation_email(site)", "title": "" } ]
f711ca37a1fdec93e8a60278f7c73cba
Defines the parameters for the URL file extension condition.
[ { "docid": "f560c1554531ace2643728faa2cfc4c3", "score": "0.0", "text": "def __init__(__self__, *,\n extensions: Sequence[str],\n odata_type: str):\n pulumi.set(__self__, \"extensions\", extensions)\n pulumi.set(__self__, \"odata_type\", odata_type)", "title": "" } ]
[ { "docid": "3c8dedea53fa6892400d0ed462827869", "score": "0.69008166", "text": "def __init__(__self__, *,\n name: str,\n parameters: 'outputs.UrlFileExtensionConditionParametersResponse'):\n pulumi.set(__self__, \"name\", 'UrlFileExtension')\n pulumi.set(__self__, \"parameters\", parameters)", "title": "" }, { "docid": "1ad295e0231db545c4caa449f857787f", "score": "0.6313625", "text": "def parameters(self) -> 'outputs.UrlFileExtensionConditionParametersResponse':\n return pulumi.get(self, \"parameters\")", "title": "" }, { "docid": "8d02b303cf76723a12c99eaaa2151147", "score": "0.54066294", "text": "def file_filter(url, filetypes=None):\n if not filetypes: return True\n return any(filetype in url.split(\".\")[-1] for filetype in filetypes)", "title": "" }, { "docid": "10889ec395668b7ef9149eba76a8da64", "score": "0.5397952", "text": "def right_configuration_extension(self):\r\n file_name=[]\r\n \r\n if self.file_exist():\r\n \r\n #Identify the file type, file_name has the name of the file [0] and [1] the extension\r\n file_name=self.config_file.split('.',1)\r\n \r\n if file_name[1]=='txt' or file_name[1]=='ini':# or file_name[1]=='csv':\r\n return True\r\n\r\n return False", "title": "" }, { "docid": "3aefa8a39ff920e9f0c0dcb3f2024fc1", "score": "0.5387804", "text": "def addFilenameFilter(call,args,kwargs,nodeClass):\n pass", "title": "" }, { "docid": "2318a8179ea40ade465732ba7c1e13a8", "score": "0.5387582", "text": "def supports_extension(self):", "title": "" }, { "docid": "056db42c44acb3d0589187d4c91528b0", "score": "0.53665686", "text": "def is_param_upload_only(self, foo_param):\n if \"racDownload\" in foo_param.interfaces or \"emDownload\" in foo_param.interfaces:\n return False\n else:\n return True", "title": "" }, { "docid": "09e4a0ddb9aa209803267636d7890ca2", "score": "0.53636885", "text": "def is_param_download_only(self, foo_param):\n if \"racUpload\" in foo_param.interfaces or \"emUpload\" in foo_param.interfaces:\n return False\n else:\n return True", "title": "" }, { "docid": "f60c5a13add9f1383e08480d514457a3", "score": "0.53494656", "text": "def uri_param(event_name, param, value, **kwargs):\n cli_argument = param\n qualified_param_name = '.'.join(event_name.split('.')[1:])\n if qualified_param_name in PARAMFILE_DISABLED or \\\n getattr(cli_argument, 'no_paramfile', None):\n return\n else:\n return _check_for_uri_param(cli_argument, value)", "title": "" }, { "docid": "4c77d2448a3a7e0f7365d70a25eda0ce", "score": "0.53431416", "text": "def ParameterFilePath(self) -> str:", "title": "" }, { "docid": "0b2b6bec0b62b7cc9d04068f5621d7a8", "score": "0.5333374", "text": "def addFilenameSearch(self, name, value):\n pass", "title": "" }, { "docid": "06931068448c8f8604ca58e854ed7535", "score": "0.53195965", "text": "def checkURLContainsExtension(extensionConfig, url):\n for type in extensionConfig:\n # Return true if it ends with one of the extensions, and debug log it\n if url.endswith(extensionConfig[type]):\n logger.info(\"url (\"+url+\") ends with \"+extensionConfig[type])\n return True\n # Return true if it exists with a '?' after it, and debug log it\n updExt = extensionConfig[type] + \"?\"\n if updExt in url:\n logger.info(\"url (\"+url+\") contains \"+updExt)\n return True\n return False", "title": "" }, { "docid": "c12315c05f82599d71f24ecc5730af87", "score": "0.5290514", "text": "def get_allowed_http_params(self):\n return ['cmd', 'target', 'targets[]', 'current', 'tree',\n 'name', 'content', 'src', 'dst', 'cut', 'init',\n 'type', 'width', 'height', 'upload[]', 'dirs[]']", "title": "" }, { "docid": "a17032a8f4c0526735882ef0e4080f96", "score": "0.5276041", "text": "def __init__(self, file_type):\n self.SCHEME = r'\\b(?:http|ftp)s?'\n self.TLD = r'(?:xn--[a-zA-Z0-9]{4,20}|[a-zA-Z]{2,20})'\n self.DNS_NAME = r'(?:[a-zA-Z0-9\\-\\.]+\\.' + self.TLD + ')'\n self.NUMBER_0_255 = r'(?:25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9][0-9]|[0-9])'\n self.IPv4 = r'(?:' + self.NUMBER_0_255 + r'\\.){3}' + self.NUMBER_0_255\n self.SERVER = r'(?:' + self.IPv4 + '|' + self.DNS_NAME + ')'\n self.PORT = r'(?:\\:[0-9]{1,5})?'\n self.SERVER_PORT = self.SERVER + self.PORT\n self.URL_PATH = r'(?:/[a-zA-Z0-9\\-\\._\\?\\,\\'/\\\\\\+&%\\$#\\=~]*)?' # [^\\.\\,\\)\\(\\s\"]\n self.URL_RE = self.SCHEME + r'\\://' + self.SERVER_PORT + self.URL_PATH\n self.re_url = re.compile(self.URL_RE)\n\n self.RE_PATTERNS = {\n 'url_v1': re.compile(self.URL_RE),\n 'url_v2': re.compile(\n r'[a-zA-Z]+://[-a-zA-Z0-9.]+(?:/[-a-zA-Z0-9+&@#/%=~_|!:,.;]*)?(?:\\?[a-zA-Z0-9+&@#/%=~_|!:,.;]*)?'),\n 'ipv4_v1': re.compile(self.IPv4),\n 'ipv4_v2': re.compile(\n r'\\b(?:(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])\\.){3}(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])\\b'),\n 'email_v1': re.compile(r'(?i)\\b[A-Z0-9._%+-]+@' + self.SERVER + '\\b'),\n 'email_v2': re.compile(r'[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,6}'),\n 'domain': re.compile(r'(?=^.{1,254}$)(^(?:(?!\\d+\\.|-)[a-zA-Z0-9_\\-]{1,63}(?<!-)\\.?)+(?:[a-zA-Z]{2,})$)'),\n \"exe_name\": re.compile(\n r\"(?i)\\b\\w+\\.(EXE|PIF|GADGET|MSI|MSP|MSC|VBS|VBE|VB|JSE|JS|WSF|WSC|WSH|WS|BAT|CMD|DLL|SCR|HTA|CPL|CLASS|JAR|PS1XML|PS1|PS2XML|PS2|PSC1|PSC2|SCF|LNK|INF|REG)\\b\"),\n 'btc': re.compile(r'\\b[13][a-km-zA-HJ-NP-Z1-9]{25,34}\\b'),\n 'file_path': re.compile(r'((?:(?:[A-Za-z]:)|\\/home|(%[A-Za-z]+%\\\\))[^\\.]+\\.[A-Za-z]{2,8})')\n }", "title": "" }, { "docid": "41f10072560ade94adf1b95459a7456b", "score": "0.5264212", "text": "def get_url_params(self, param_map={}):\n param_map.setdefault('dotpath', 'dotpath')\n return [r'(?P<{dotpath}>[\\w\\.]+)'.format(**param_map)]", "title": "" }, { "docid": "401aa1b7a39a6a354f125effab60849c", "score": "0.5230211", "text": "def file_extension(cls):\n raise NotImplementedError", "title": "" }, { "docid": "2ccab0727ad549c5d89bae9f691a51ad", "score": "0.5229411", "text": "def allowed_file(filename): \n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in app.config['ALLOWED_EXTENSIONS']", "title": "" }, { "docid": "0756eeca139a53b6318c08ea5522a4cc", "score": "0.5214552", "text": "def test_extension_param(self, mocker):\n self.mock_ds9(mocker)\n imviewer = self.make_imview()\n\n imviewer.disp_parameters['extension'] = 'frame'\n assert imviewer.get_extension_param() == 'all'\n\n imviewer.disp_parameters['extension'] = 'cube'\n assert imviewer.get_extension_param() == 'all'\n\n imviewer.disp_parameters['extension'] = 'first'\n assert imviewer.get_extension_param() == 0\n\n imviewer.disp_parameters['extension'] = '1'\n assert imviewer.get_extension_param() == 1\n\n imviewer.disp_parameters['extension'] = 'SCI'\n assert imviewer.get_extension_param() == 'SCI'\n\n # special case for display, but not for headers -\n # if actually present, will show extension header;\n # if automagic only, will show all headers\n imviewer.disp_parameters['extension'] = 'S/N'\n assert imviewer.get_extension_param() == 'S/N'", "title": "" }, { "docid": "394be22c0c71d21a3f7ff652f65e8766", "score": "0.5207765", "text": "def get_extension():", "title": "" }, { "docid": "0787eaf4445f0c9fb253e7e11560fcd6", "score": "0.5207066", "text": "def file_to_apply(filename, extension_list = None, exclude_file_list = None, \n include_file_list = None, verbose = False):\n if extension_list is None:\n extension_list = []\n if exclude_file_list is None:\n exclude_file_list = []\n if include_file_list is None:\n include_file_list = []\n \n if verbose:\n print \" testing\", filename\n if include_file_list == [] and exclude_file_list == []:\n condition = (os.path.splitext(filename)[1] in extension_list)\n elif include_file_list != [] and exclude_file_list == []:\n condition = (os.path.splitext(filename)[1] in extension_list\n and (os.path.split(filename)[1] in include_file_list \n or os.path.splitext(os.path.split(filename)[1])[0] \n in include_file_list ))\n elif include_file_list == [] and exclude_file_list != []:\n condition = (os.path.splitext(filename)[1] in extension_list\n and (os.path.split(filename)[1] not in exclude_file_list \n or os.path.splitext(os.path.split(filename)[1])[0] \n not in exclude_file_list))\n else:\n raise ValueError(\"Both an exclude list and include list were provided.\"\n \"Please clarify by using either or None. Currently, \"\n \"include = %s, exclude = %s\" \n % (exclude_file_list, include_file_list))\n return condition", "title": "" }, { "docid": "21891581836fea6c595f669ce3355d7c", "score": "0.520361", "text": "def filename_allowed(self, filename, extensions=None):\n _, ext = os.path.splitext(filename)\n return self.extension_allowed(ext, extensions)", "title": "" }, { "docid": "a216ab805342551ce967985eb477eadc", "score": "0.5189033", "text": "def ext_matches(fp, *ext):\n return fp.lower().endswith(ext)", "title": "" }, { "docid": "e2909341f727558e8813065d3c4793f1", "score": "0.51712453", "text": "def test_required_filepaths_are_defined():\n for item in ['external','internal']:#,'test','dev']:\n assert item in params.fileDict", "title": "" }, { "docid": "48f56eca7ba079130cc1e030e0ef7a4c", "score": "0.51621515", "text": "def define_parameters(self):\n self.add_argument('--inputfile',\n \tdest\t= 'inputfile',\n \ttype\t= str,\n \toptional\t= False,\n \thelp = 'input file')", "title": "" }, { "docid": "fcfee0eab2aec32524c6eeae0dd28ac2", "score": "0.51621425", "text": "def required_file_extensions(self):\n raise Exception(\"Missing implementation of abstract property.\")", "title": "" }, { "docid": "606a9c3762dd33a022e04cd0c070b5a8", "score": "0.5155268", "text": "def allowed_file(filename, fext):\n return '.' in filename and filename.rsplit('.', 1)[1] in fext", "title": "" }, { "docid": "35997d7f78306ce2d53da1fa10a1c82e", "score": "0.51441044", "text": "def allowed_file(filename):\n return '.' in filename and filename.rsplit('.',1)[1] in app.config['ALLOWED_EXTENSIONS']", "title": "" }, { "docid": "33e4a7df4e54e933c2083b69be4ed468", "score": "0.5143999", "text": "def allowed(filename):\n return '.' in filename and filename.split('.')[1] in app.config['ALLOWED_EXTENSIONS']", "title": "" }, { "docid": "12b8b10f0ed8dc815036dce67053785f", "score": "0.5137862", "text": "def build_filename_filter(filename_filter):\n\n filters = filename_filter.split(',')\n result = '&options[producer_granule_id][pattern]=true'\n for filter in filters:\n result += '&producer_granule_id[]=' + filter_add_wildcards(filter)\n return result", "title": "" }, { "docid": "53096c0d614b144f65400c7f42d7d067", "score": "0.5134202", "text": "def valid_url_extension(url, extension_list=VALID_IMAGE_EXTENSIONS):\n return any([url.endswith(e)\n for e in extension_list])", "title": "" }, { "docid": "87daef0964db56c5808b53c6fdc8a146", "score": "0.5117276", "text": "def set_file_ext(self,val):\n if isinstance(val,str):\n if val[0] != '.':\n value = '.' + val\n else:\n value = val\n self._run_ext = value\n else:\n raise AttributeError('Source file extension can be only a string')", "title": "" }, { "docid": "28e1365dbc5796731acf8faf90bd35c0", "score": "0.51065105", "text": "def extension_allowed(self, ext, extensions=None):\n\n extensions = extensions or self.extensions\n if not extensions:\n return True\n if ext.startswith('.'):\n ext = ext[1:]\n return ext.lower() in extensions", "title": "" }, { "docid": "4ad5287ac6c6c1d5dc5d5e4574a538eb", "score": "0.5094281", "text": "def allowed_file(filename):\n return '.' in filename", "title": "" }, { "docid": "0d64ff57370de0c30eaed75914e5d85d", "score": "0.50939137", "text": "def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS']", "title": "" }, { "docid": "3df402cf3764669335515e862b82aed4", "score": "0.5092217", "text": "def _allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in config.ALLOWED_EXTENSIONS", "title": "" }, { "docid": "c74287dbe422adf92ce99ce6d6b154d4", "score": "0.50909483", "text": "def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in Configuration.ALLOWED_EXTENSIONS", "title": "" }, { "docid": "3f151f1f8807a099e00a3f1753ea338f", "score": "0.508263", "text": "def allowed_file(filename):\n\treturn '.' in filename and \\\n\t\tfilename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS']", "title": "" }, { "docid": "beca047973750e09e818eb5b7b897420", "score": "0.50794977", "text": "def pass_custom_regexes(entity_name):\n # ensure that version is in the filename\n if \"_v\" not in entity_name and \"_V\" not in entity_name:\n do_notok(filename, 'Missing version portion of filename (ie. _v1)')\n return False\n return True", "title": "" }, { "docid": "b48a841bdb686059bb1b9f37156d6c1a", "score": "0.507702", "text": "def file_allowed(self, fs, extensions=None):\n return self.filename_allowed(fs.filename, extensions)", "title": "" }, { "docid": "26b16a458ce908c7b63e97f9c1526476", "score": "0.50721395", "text": "def allowed_file(filename):\n return filename.endswith(ALLOWED_EXTENSIONS)", "title": "" }, { "docid": "47bf20adb53a0529d6c67a6acee7366b", "score": "0.5064349", "text": "def url(self, **kwargs):\n\n if not hasattr(self, 'file') or not self.file:\n return \"\"\n\n url = self.file['url']\n args = ['{0}={1}'.format(k, v) for k, v in kwargs.items()]\n\n if args:\n url += '?{0}'.format('&'.join(args))\n\n return url", "title": "" }, { "docid": "bfe51db2b7ee80accf04d4b294a7b6c4", "score": "0.50590116", "text": "def file_allowed(self, storage, basename):\n return self.extension_allowed(extension(basename))", "title": "" }, { "docid": "b8c2c95805fbb4ae5614953522a1318a", "score": "0.50569415", "text": "def valid_parameter_filenames(self) -> List[str]:\n match = re.search(r'filenames: (.+?)$', self.__str__())\n return match.group(1).split(',')", "title": "" }, { "docid": "873e4d3d09cb8f79f975b07679d79cc4", "score": "0.5056643", "text": "def allowed_file(file_ext):\n return file_ext.strip(\".\") in ALLOWED_EXTENSIONS", "title": "" }, { "docid": "d28158cbf3fd4dddaa86b1589dce937e", "score": "0.5052915", "text": "def allowed_file(self, filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in self.ALLOWED_EXTENSIONS", "title": "" }, { "docid": "53fa2228b709308874551b399d792f75", "score": "0.5040305", "text": "def allowed_file(filename):\n return \".\" in filename and filename.rsplit(\".\", 1)[1].lower() in ALLOWED_EXTENSIONS", "title": "" }, { "docid": "53fa2228b709308874551b399d792f75", "score": "0.5040305", "text": "def allowed_file(filename):\n return \".\" in filename and filename.rsplit(\".\", 1)[1].lower() in ALLOWED_EXTENSIONS", "title": "" }, { "docid": "a341bc59250f521388e3a9ecdd1da6be", "score": "0.50329024", "text": "def allowed_extension(filename):\n\n val = False\n if path.splitext(filename)[-1].lower() in ALLOWED_EXTENSIONS:\n val = True\n\n return val", "title": "" }, { "docid": "5ef927624b5b40e8a6500b907306e211", "score": "0.5032429", "text": "def allowed_file(filename):\r\n return '.' in filename and \\\r\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS", "title": "" }, { "docid": "c86969a31ae3ab540e6cf0fb67d500bc", "score": "0.50309706", "text": "def allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS", "title": "" }, { "docid": "c86969a31ae3ab540e6cf0fb67d500bc", "score": "0.50309706", "text": "def allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS", "title": "" }, { "docid": "7207f313f4012fde1791408f4bcf35de", "score": "0.50301814", "text": "def build_url(self, filename): \r\n # sanitize url to prevent multiple slashes \r\n remote_url = urlparse(self.param('url'))\r\n url = \"%s://%s%s\" % ( remote_url.scheme, remote_url.netloc, filename)\r\n if 'QUERY_STRING' in request.environ:\r\n query_string = request.environ['QUERY_STRING']\r\n rxremove = self.param('rxremove')\r\n if rxremove is not None:\r\n query_string = re.sub(rxremove, '', query_string)\r\n url = url + '?' + query_string\r\n return url", "title": "" }, { "docid": "d37250ffb7659d3f51c147b41f5ebf60", "score": "0.5021711", "text": "def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower()\\\n in app.config['ALLOWED_EXTENSIONS']", "title": "" }, { "docid": "b69f3a80c5d660c2d6298c3d9cb6cab4", "score": "0.5016731", "text": "def allowed_modfile(filename):\n return '.' in filename and filename.rsplit('.', 1)[-1].lower() in ALLOWED_EXTENSIONS # it it's extension is allowed", "title": "" }, { "docid": "41a2298af84b3d4ef880f6583f57f260", "score": "0.5016099", "text": "def guess_extension(content_type):", "title": "" }, { "docid": "4111f6b7237739829cdadbc134d3c88f", "score": "0.4995689", "text": "def _include_file(self, root_parts, f):\n if len(root_parts) and root_parts[0] == \"lwc\":\n # only include expected file extensions within lwc components\n return f.lower().endswith((\".js\", \".js-meta.xml\", \".html\", \".css\", \".svg\"))\n return True", "title": "" }, { "docid": "7c7806c801a9c5fd17cd661356296f0b", "score": "0.49901226", "text": "def allowed_file(self, filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in self.ALLOWED_EXTENSIONS", "title": "" }, { "docid": "8dca002c6477ecfdc526218e3cd505a2", "score": "0.49814087", "text": "def allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[-1].lower() in ALLOWED_EXTENSIONS", "title": "" }, { "docid": "5dee91ddb935a9d9fed2814065c9f9e8", "score": "0.49780384", "text": "def _build_extension_filter(self, extensions):\n extension_filter = Q()\n for extension in extensions.split(','):\n extension = extension.strip()\n if extension:\n extension_filter |= Q(extension__iexact=extension)\n return extension_filter", "title": "" }, { "docid": "fcd2ec49f2fe69ab1f5f2a7c340a167d", "score": "0.4977159", "text": "def filename(self, val):\n pass", "title": "" }, { "docid": "81862e8040f5e3c336bd7f3539bc1ecd", "score": "0.497625", "text": "def validate_url(**kwargs):\n if not validators.url(kwargs.get('file_url')):\n raise AttributeError(\"Please enter a valid url\")", "title": "" }, { "docid": "d5f2936129dd4078f5c2d4712324b785", "score": "0.49711043", "text": "def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS", "title": "" }, { "docid": "d5f2936129dd4078f5c2d4712324b785", "score": "0.49711043", "text": "def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS", "title": "" }, { "docid": "d5f2936129dd4078f5c2d4712324b785", "score": "0.49711043", "text": "def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS", "title": "" }, { "docid": "9d513f289a717fc9cc77130fade1eb15", "score": "0.49691778", "text": "def allowedFile(filename):\n return ('.' in filename and filename.rsplit('.', 1)[1] in\n app.config['ALLOWED_EXTENSIONS'])", "title": "" }, { "docid": "0babd3366595ca50141a121c74a7e39d", "score": "0.49627602", "text": "def allowed_file(filename, allowed_extensions):\n return '.' in filename and os.path.splitext(filename)[1].lower() in allowed_extensions", "title": "" }, { "docid": "364d2ded8f7bb902c327e35a95edd57a", "score": "0.49602988", "text": "def test_allowed_file(self):\n with fa.app.test_request_context():\n fa.app.preprocess_request()\n npt.assert_equal(fa.allowed_file(\"abc.dat\"), True)\n npt.assert_equal(fa.allowed_file(\"abc.csv\"), True)\n npt.assert_equal(fa.allowed_file(\"abc.txt\"), True)\n npt.assert_equal(fa.allowed_file(\"abc.exe\"), False)\n npt.assert_equal(fa.allowed_file(\"abc.sh\"), False)", "title": "" }, { "docid": "5a36600bb12ad3a470c5ed8853684f69", "score": "0.49586415", "text": "def extension_from_parameters(args):\n ext = \"\"\n ext += \".A={}\".format(args.activation)\n ext += \".B={}\".format(args.batch_size)\n ext += \".E={}\".format(args.epochs)\n ext += \".O={}\".format(args.optimizer)\n ext += \".LOSS=CONTAM\"\n ext += \".LR={}\".format(args.learning_rate)\n ext += \".CF={}\".format(\"\".join([x[0] for x in sorted(args.cell_features)]))\n ext += \".DF={}\".format(\"\".join([x[0] for x in sorted(args.drug_features)]))\n if args.feature_subsample > 0:\n ext += \".FS={}\".format(args.feature_subsample)\n if args.dropout > 0:\n ext += \".DR={}\".format(args.dropout)\n if args.warmup_lr:\n ext += \".wu_lr\"\n if args.reduce_lr:\n ext += \".re_lr\"\n if args.residual:\n ext += \".res\"\n if args.use_landmark_genes:\n ext += \".L1000\"\n if args.no_gen:\n ext += \".ng\"\n for i, n in enumerate(args.dense):\n if n > 0:\n ext += \".D{}={}\".format(i + 1, n)\n if args.dense_feature_layers != args.dense:\n for i, n in enumerate(args.dense):\n if n > 0:\n ext += \".FD{}={}\".format(i + 1, n)\n\n return ext", "title": "" }, { "docid": "d276147d49194ebef0ba4f575a1f8db9", "score": "0.4957857", "text": "def allowed_file(cls, fileName):\n return '.' in fileName and fileName.rsplit('.', 1)[1].lower() in cls.ALLOWED_EXTENSIONS", "title": "" }, { "docid": "f1e7c40e9f4e7aa43e4f71c8976c9028", "score": "0.4954694", "text": "def is_valid_extension(self):\n return self.extension in self.file_type_dict", "title": "" }, { "docid": "7241fc73f720b461b818af25b5c91dd0", "score": "0.4953114", "text": "def extension_by_url(self):\r\n ext = os.path.splitext(self.filename or '')[1]\r\n if ext:\r\n return ext[1:]", "title": "" }, { "docid": "1831f929346f94d2956cf7f60ab61257", "score": "0.49526983", "text": "def AddExtension(self, ext):", "title": "" }, { "docid": "0f1be3cb81f99342a8b50566d6ae077e", "score": "0.49513322", "text": "def FILE_EXTENSIONS(self) -> tuple[str, ...]:\n return (\n 'tif',\n 'tiff',\n 'ome.tif',\n 'lsm',\n 'stk',\n 'qpi',\n 'pcoraw',\n 'qptiff',\n 'ptiff',\n 'ptif',\n 'gel',\n 'seq',\n 'svs',\n 'scn',\n 'zif',\n 'ndpi',\n 'bif',\n 'tf8',\n 'tf2',\n 'btf',\n 'eer',\n )", "title": "" }, { "docid": "4693c015a2f86460acdecbf08217851b", "score": "0.4951037", "text": "def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS", "title": "" }, { "docid": "4693c015a2f86460acdecbf08217851b", "score": "0.4951037", "text": "def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS", "title": "" }, { "docid": "4693c015a2f86460acdecbf08217851b", "score": "0.4951037", "text": "def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS", "title": "" }, { "docid": "4693c015a2f86460acdecbf08217851b", "score": "0.4951037", "text": "def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS", "title": "" }, { "docid": "4693c015a2f86460acdecbf08217851b", "score": "0.4951037", "text": "def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS", "title": "" }, { "docid": "4693c015a2f86460acdecbf08217851b", "score": "0.4951037", "text": "def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS", "title": "" }, { "docid": "4693c015a2f86460acdecbf08217851b", "score": "0.4951037", "text": "def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS", "title": "" }, { "docid": "4693c015a2f86460acdecbf08217851b", "score": "0.4951037", "text": "def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS", "title": "" }, { "docid": "4693c015a2f86460acdecbf08217851b", "score": "0.4951037", "text": "def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS", "title": "" }, { "docid": "4693c015a2f86460acdecbf08217851b", "score": "0.4951037", "text": "def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS", "title": "" }, { "docid": "4693c015a2f86460acdecbf08217851b", "score": "0.4951037", "text": "def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS", "title": "" }, { "docid": "7e528ab5576e505fea8c40a20b4c1105", "score": "0.4947529", "text": "def isExtension(rFile: generic.jar.ResourceFile) -> bool:\n ...", "title": "" }, { "docid": "c13fa5fb7d8191033d18c854c2a51bd8", "score": "0.4942763", "text": "def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS", "title": "" }, { "docid": "fedbb5c55108c171c455a8057b6a6a64", "score": "0.49379143", "text": "def dt_matches_files_omit_parameters_brief():", "title": "" }, { "docid": "e810811276ee86b9497a54d486097917", "score": "0.4936865", "text": "def has_file_allowed_extension(filename: str, extensions: Tuple[str, ...]) -> bool:\n return filename.lower().endswith(extensions)", "title": "" }, { "docid": "500a44fbf71a9d062d3ffb92f9181985", "score": "0.4923075", "text": "def _get_file_patterns(self):\n\n return ['.py']", "title": "" }, { "docid": "a02b7173a49911f3a5c927e24d7ed5ed", "score": "0.49161044", "text": "def ext(url):\n filename, file_extension = os.path.splitext(url)\n return(file_extension)", "title": "" }, { "docid": "7fc9da138b18f7185087e334730f96ef", "score": "0.49143705", "text": "def SetExtension(self, extension):", "title": "" }, { "docid": "493ea6a10cfe2acaf437535a6c1ef00d", "score": "0.49088347", "text": "def isExtensionFile(self,filePath):\n return fnmatch.fnmatch(filePath,\"*/Extensions-*\")", "title": "" }, { "docid": "b12963b73aed9a069cb60f950e93f39b", "score": "0.4905239", "text": "def add_extra_filename_values(filename_format, rows, urls, dry_run):\n file_fields = list(get_fmt_names(filename_format))\n if any(i.startswith(\"_url\") for i in file_fields):\n for row, url in zip(rows, urls):\n row.update(get_url_parts(url))\n\n if any(i.startswith(\"_url_filename\") for i in file_fields):\n if dry_run: # Don't waste time making requests.\n dummy = get_file_parts(\"BASE.EXT\", \"_url_filename\")\n for idx, row in enumerate(rows):\n row.update(\n {k: v + str(idx) for k, v in dummy.items()})\n else:\n num_urls = len(urls)\n log_progress(lgr.info, \"addurls_requestnames\",\n \"Requesting file names for %d URLs\", num_urls,\n label=\"Requesting names\", total=num_urls,\n unit=\" Files\")\n for row, url in zip(rows, urls):\n # If we run into any issues here, we're just going to raise an\n # exception and then abort inside dlplugin. It'd be good to\n # disentangle this from `extract` so that we could yield an\n # individual error, drop the row, and keep going.\n filename = get_url_filename(url)\n if filename:\n row.update(get_file_parts(filename, \"_url_filename\"))\n else:\n raise ValueError(\n \"{} does not contain a filename\".format(url))\n log_progress(lgr.info, \"addurls_requestnames\",\n \"%s returned for %s\", url, filename,\n update=1, increment=True)\n log_progress(lgr.info, \"addurls_requestnames\",\n \"Finished requesting file names\")", "title": "" }, { "docid": "a3f1f08ac41918bcd457fdd843f8b92b", "score": "0.4896281", "text": "def allowed_file(filename: str) -> str:\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS", "title": "" }, { "docid": "eff94fc1398c5d068d1342835b801b1c", "score": "0.48951083", "text": "def extractor_method(self) -> str:\n if not self._api_parameters:\n self._refresh()\n if set(self._api_parameters.keys()) == {'n', 'm', 'x', 'y'}:\n return 'ext1'\n return 'ext2'", "title": "" }, { "docid": "4e1d34d3d0023b29709aea56b08ba391", "score": "0.4886128", "text": "def handle_ext(self, fpath):", "title": "" }, { "docid": "0ffde45b3f456aa8e6d4d2a084eeb9c0", "score": "0.4875069", "text": "def validate_ext(self, ext_str):\n if re.match(\"^\\.[A-Za-z0-9]+$\", ext_str):\n return {'success':'true'}\n else:\n return {'error': ext_str +\\\n ' does not match required file extension format. eg: .yaml'\n }", "title": "" }, { "docid": "1b83ae39f1ec2debf1333997fde98a1e", "score": "0.48676646", "text": "def allowed_file(filename):\n if '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS:\n return True\n return False", "title": "" }, { "docid": "db0dc60b64488a9a1f8b0481823f98cf", "score": "0.48630133", "text": "def add_js_file_conditional(\n self, filename: str, condition: [Callable, None], **kwargs\n) -> None:\n self.add_js_file(filename, **kwargs)\n self.config.conditional_script_files[filename] = condition", "title": "" }, { "docid": "538458e72dcc7e06dfcdd122f2b1e377", "score": "0.4855796", "text": "def getInputSpecification(cls):\n inputSpecification = super(ETImporter, cls).getInputSpecification()\n inputSpecification.addSub(InputData.parameterInputFactory(\"fileFormat\", contentType=InputData.StringType))\n return inputSpecification", "title": "" }, { "docid": "6540ed7e69f06d1cb035c00c0d6ee9bd", "score": "0.48557734", "text": "def extension_allowed(self, ext):\n return ((ext in self.config.allow) or\n (ext in self.extensions and ext not in self.config.deny))", "title": "" } ]
98d408709aadb6dcc58288bcc29b0d8e
Keep and forward the propose to other proposers if receive it for the first time.
[ { "docid": "96f8cee9f5053856dd73219d9ded8c49", "score": "0.55881333", "text": "def handle_value_propose(self, message):\n\n message = tuple(message.split(\",\"))\n logging.info(\"{} {} received propose:{},{}\".format(self.role, self.id, *message))\n \n if not message in self.message_proposed:\n self.message_proposed.add(message)\n raw_message = \"propose:{},{}\".format(*message)\n logging.info(\"{} {} is sending {} to proposers\".format(self.role, self.id, raw_message))\n raw_message = bytes(raw_message, \"utf-8\")\n self.send_socket.sendto(raw_message, self.config[\"proposers\"])", "title": "" } ]
[ { "docid": "bb5c4aff698b5f7723a6e3de829420b3", "score": "0.6077841", "text": "def msg_propose_accept(self, conn, msg):\r\n if msg.commandnumber in self.outstandingproposes:\r\n prc = self.outstandingproposes[msg.commandnumber]\r\n if msg.inresponseto == prc.ballotnumber:\r\n prc.receivedcount += 1\r\n prc.receivedfrom.add(conn.peerid)\r\n if self.debug: self.logger.write(\"Paxos State\",\r\n \"got an accept for proposal ballotno %s \"\\\r\n \"commandno %s proposal %s making %d/%d accepts\" % \\\r\n (prc.ballotnumber, prc.commandnumber,\r\n prc.proposal, prc.receivedcount, prc.ntotal))\r\n if prc.receivedcount >= prc.nquorum:\r\n if self.debug: self.logger.write(\"Paxos State\", \"Agreed on %s\" % str(prc.proposal))\r\n # take this response collector out of the outstanding propose set\r\n self.add_to_proposals(prc.commandnumber, prc.proposal)\r\n # delete outstanding messages that caller does not need to check for anymore\r\n del self.outstandingproposes[msg.commandnumber]\r\n\r\n # now we can perform this action on the replicas\r\n performmessage = create_message(MSG_PERFORM, self.me,\r\n {FLD_COMMANDNUMBER: prc.commandnumber,\r\n FLD_PROPOSAL: prc.proposal,\r\n FLD_SERVERBATCH: isinstance(prc.proposal,\r\n ProposalServerBatch),\r\n FLD_CLIENTBATCH: isinstance(prc.proposal,\r\n ProposalClientBatch),\r\n FLD_DECISIONBALLOTNUMBER: prc.ballotnumber})\r\n\r\n if self.debug: self.logger.write(\"Paxos State\", \"Sending PERFORM!\")\r\n if len(self.replicas) > 0:\r\n self.send(performmessage, group=self.replicas)\r\n self.perform(parse_message(performmessage), designated=True)\r\n if self.debug: self.logger.write(\"State\", \"returning from msg_propose_accept\")", "title": "" }, { "docid": "6e5993774a98c07bb229218ea1f7e8a1", "score": "0.60003537", "text": "def process_propose(self, msg: ProposeMessage):\n dataset = msg.dataset\n\n if self.verify_revealed_secret(dataset.revealed_secret):\n self.revealed_secrets[self.round] = dataset.revealed_secret\n self.compute_beacon(msg.dataset.revealed_secret)\n else:\n self.flag_adversary(msg.sender)\n return False\n\n if not self.verify_proposal(msg):\n self.flag_adversary(msg.sender)\n return False\n\n if dataset.prev_round_idx > 0:\n stored_prev_cc = self.confirmation_certificates[dataset.prev_round_idx]\n if stored_prev_cc:\n assert dataset.confirmation_certificate\n assert stored_prev_cc.dataset_header_digest == dataset.confirmation_certificate.dataset_header_digest\n else:\n self.confirmation_certificates[dataset.prev_round_idx] = dataset.confirmation_certificate\n\n self.propose_messages[self.round] = msg\n self.datasets[self.round] = dataset\n if not self.beacon:\n self.compute_beacon(dataset.revealed_secret)\n self.acknowledge()", "title": "" }, { "docid": "f0baff58e8d4112218d17b5c737ef94b", "score": "0.59429514", "text": "def deliver_message(self, m: Message):\n if m.type == \"PROPOSE\":\n # The proposer gets a propose message\n self.value = m.value\n self.proposed_value = self.value\n n = self.get_next_n()\n for accept_dest in self.acs:\n self.network.queue_message(Message(self, accept_dest, \"PREPARE\", n=n))\n\n elif m.type == \"PROMISE\":\n # The proposer gets a promise message\n if m.prior:\n self.value = m.prior[1]\n self.network.queue_message(Message(self, m.src, \"ACCEPT\", m.n, value=self.value))\n elif m.type == \"ACCEPTED\" and not self.has_consensus:\n # Adds 1 to accepted_count\n self.accepted_count += 1\n # Checks if Proposer has reached consensus\n self.has_consensus = self.accepted_count > Proposer.acceptor_count // 2\n if self.has_consensus:\n # Send a SUCCES message to all learners\n for l in Computer.lears:\n self.network.queue_message(Message(self, l, \"SUCCES\", m.n, m.value))\n self.accepted_count = 0\n self.has_consensus = False\n\n elif m.type == \"REJECTED\":\n # Adds 1 to rejected_count\n self.rejected_count += 1\n # Checks if Proposer has been rejected\n if self.rejected_count > Proposer.acceptor_count // 2:\n # If Proposer has been rejected, queue new PREPARE messages\n n = self.get_next_n()\n for accept_dest in self.acs:\n self.network.queue_message(Message(self, accept_dest, \"PREPARE\", n=n))\n self.accepted_count = 0\n self.rejected_count = 0", "title": "" }, { "docid": "60fdb73d5db2a83e079eb21b89bdf37a", "score": "0.5906117", "text": "def test_single_proposal_prepare(self):\n print \"\\n\\n[Info] ##########[SINGLE PROPOSAL TEST]##########\\n\"\n\n # craft the message, proposal = 0, instance = 1\n self.send_prepare(0, 1, MESSAGE_TYPE.PREPARE_ACK, 0, 1)", "title": "" }, { "docid": "3ea1dbc97c6ce1e68fd4d3ec1837f7f1", "score": "0.57818073", "text": "def on_propose(self, msg_id: int, dialogue_id: int, origin: str, target: int, proposals: PROPOSE_TYPES):\n #print(\"[{0}]: Received propose from agent {1}\".format(self.public_key, origin))\n for i, p in enumerate(proposals):\n self.chargerList.append({\n 'agent': origin,\n 'price_kilowatt_hour': p.values['price_kilowatt_hour'],\n 'charger_bonus': p.values['charger_bonus'],\n 'charger_location': {\n 'lat': p.values['charger_location'].latitude,\n 'lng': p.values['charger_location'].longitude\n }})\n\n self.send_decline(msg_id, dialogue_id, origin, msg_id + 1)\n self.current = self.current + 1\n if self.current == self.agentCount:\n print(json.dumps(self.chargerList))\n self.stop()\n\n\n #print(\"[{0}]: Proposal {1}: {2}\".format(self.public_key, i, p.values))\n #self.chargerList.append(p)\n\n # TODO: save proposals to global var\n #print(\"[{0}]: Decline Propose.\".format(self.public_key))", "title": "" }, { "docid": "128323c24d1c47906d9d8668f79c6ed5", "score": "0.57275814", "text": "def forward_to_learner(self):\n\n while True:\n messages_not_forward = self.message_received - self.message_forward\n if bool(messages_not_forward):\n for decide_sequence, value in list(messages_not_forward):\n if int(decide_sequence) == self.forward_count:\n message = (decide_sequence, value)\n message = \"forward:{},{}\".format(*message)\n logging.info(\"{} {} is sending {} to learners\".format(self.role, self.id, message))\n message = bytes(message, \"utf-8\")\n self.send_socket.sendto(message, self.config[\"learners\"])\n self.forward_count += 1", "title": "" }, { "docid": "b20a4a55cc120c133fe891c25cdc5e68", "score": "0.5722373", "text": "def initialize_proposer(self):\n # the msg type need to handle dup\n proposer_msg_types = [\n MESSAGE_TYPE.PREPARE_ACK,\n MESSAGE_TYPE.PREPARE_NACK,\n MESSAGE_TYPE.ACCEPT_ACK,\n ]\n msg_history = set()\n\n def if_dup(msg, msg_history):\n # handle duplication\n if msg.msg_type in proposer_msg_types:\n msg_signature = (\n msg.msg_type,\n msg.value,\n msg.proposal,\n msg.r_proposal,\n msg.client_id,\n msg.instance,\n msg.origin_id)\n if msg_signature in msg_history:\n # dup, pass\n # print \"dup msg received by proposer!\"\n return True\n else:\n msg_history.add(msg_signature)\n return False\n\n # counter for proposer number\n proposer_num = self.server_id\n\n # log file\n write_lock = Lock()\n write_lock.acquire()\n logfile = open(\"server\" + str(self.server_id) + \".txt\", \"w+\")\n write_lock.release()\n\n def send_to_acceptors(msg, server_connections):\n assert isinstance(msg, message.message)\n # send the proposal to acceptors\n for s_socket in server_connections:\n try:\n s_socket.sendall(pickle.dumps(msg))\n except Exception, e:\n server_connections.remove(s_socket)\n print \"{}: ERROR - {}\".format(self.DEBUG_TAG, e)\n pass\n # TODO: remove the dead connections\n\n def print_instance_resolutions():\n for i in self.instance_resolutions:\n tcmd = self.instance_resolutions[i][0]\n assert isinstance(tcmd, command.command)\n print \"cid: {}: {}\".format(\n self.instance_resolutions[i][1], tcmd)\n\n def execute_command(exe_command):\n # execute the command on the list\n if exe_command.command_type == COMMAND_TYPE.LOCK:\n assert exe_command.resource_id not in self.lock_set\n self.lock_set.append(exe_command.resource_id)\n assert exe_command.resource_id in self.lock_set\n elif exe_command.command_type == COMMAND_TYPE.UNLOCK:\n assert exe_command.resource_id in self.lock_set\n self.lock_set.remove(exe_command.resource_id)\n assert exe_command.resource_id not in self.lock_set\n else:\n assert(False)\n\n # Initialize server connections unless it's to yourself\n server_connections = []\n\n for (serv_id, serv) in enumerate(self.server_list):\n if serv_id == self.server_id:\n continue\n target_host = serv[\"host\"]\n target_port = serv[\"internal_port\"]\n try:\n connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n connection.setsockopt(\n socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n connection.connect((target_host, target_port))\n server_connections.append(connection)\n except Exception, e:\n print \"{} Failed to connect to {}:{}\".format(\n self.DEBUG_TAG, target_host, target_port)\n\n # Open a client port and listen on port for connections\n try:\n self.client_socket = socket.socket(\n socket.AF_INET, socket.SOCK_STREAM)\n self.client_socket.setsockopt(\n socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.client_socket.bind((self.host, self.client_port))\n self.client_socket.listen(30)\n # set the timeout to check for exit conditions\n self.client_socket.settimeout(.1)\n except Exception, e:\n raise Exception(\n self.DEBUG_TAG + \": cannot open client port.\" + str(e))\n\n # Enter the main loop of the proposer\n done = 0\n\n # Initialize proposer state\n PROPOSING = 10 # proposing a proposal to the Paxos group\n ACCEPTING = 11 # waiting for accept messages to come back\n IDLE = 12 # no proposals in flight\n READY = 13 # state in which a proposal is ready for submission\n ACCEPT = 14 # state issuing accept requests\n\n state = IDLE\n\n # Initialize proposer data structures\n instance = 0 # the instance this node is proposing for\n\n ######################################################################\n # Open connection to the client\n # - accepts a client connection\n # - processes the client requests until client is done\n ######################################################################\n\n # Begin processing messages from the message queue\n while (done == 0):\n\n # accept an incoming client connection\n try:\n (client_connection, address) = self.client_socket.accept()\n client_connection.settimeout(.5)\n except Exception, e:\n # check for an exit message\n try:\n m = self.proposer_queue.get(block=True, timeout=1)\n if (m.msg_type == MESSAGE_TYPE.EXIT):\n done = 1\n client_done = 1\n break\n else:\n self.proposer_queue.put(m)\n except Exception, e:\n pass\n continue\n\n client_done = 0\n\n ###################################################################\n # While the client still has messages it wants to issue\n # - check the request queue for commands to propose first\n # - get the messages from the client connection\n # - check the lock set - if the lock you want is held, put the\n # request on the message request queue and spin in a WAIT\n # state until you get a lock release\n ###################################################################\n\n # client processing loop:\n # service as many message from the client as\n # needed until socket closed\n while (client_done == 0):\n # receive the client command to propose:\n # - if you get an EOF exit gracefully\n try:\n c_msgs = client_connection.recv(1000)\n except Exception, e:\n client_connection.close()\n break\n\n # unpack the message and get the command to propose from client\n try:\n c_msg = pickle.loads(c_msgs)\n except EOFError, e:\n print c_msgs + \" - \" + str(e)\n client_done = 1\n break\n\n # validate that you got a valid message with command payload\n assert isinstance(c_msg, message.message)\n assert isinstance(c_msg.value, command.command)\n assert c_msg.msg_type == message.MESSAGE_TYPE.CLIENT\n assert c_msg.client_id is not None\n\n # the command sent by client\n client_command = c_msg.value\n orig_client_id = c_msg.client_id\n\n # the command learnt by proposer (if any)\n learnt_command = c_msg.value\n learnt_client = c_msg.client_id\n\n state = READY\n # Paxos proposal phase if not IDLE\n while state != IDLE:\n\n ###########################################################\n # READY - node is ready to propose\n ###########################################################\n if state == READY:\n\n # craft the proposal packet and send to acceptors\n msg = message.message(\n MESSAGE_TYPE.PREPARE,\n proposer_num, instance, None,\n self.server_id, c_msg.client_id)\n\n assert msg.client_id is not None\n send_to_acceptors(msg, server_connections)\n # update the state\n state = PROPOSING\n\n ###########################################################\n # PROPOSING - wait for the prepare proposals to come back\n ###########################################################\n elif (state == PROPOSING):\n\n # PREPARE_NACKs received\n pre_nacks = []\n # response count\n response_cnt = 0\n\n while response_cnt <= self.group_size() / 2:\n try:\n # listen to responses on the server msg queue\n msg = self.proposer_queue.get(\n block=True, timeout=1)\n assert msg.client_id is not None\n\n # if an exception occurs and we're not done,\n # consider the proposal failed\n except Exception as e:\n # attempt another proposal round\n state = READY\n break\n\n assert isinstance(msg, message.message)\n\n if if_dup(msg, msg_history):\n continue\n\n # if the message ia a prepare ack and matches your\n # proposal/instance, increment ack count\n if msg.instance != instance:\n # ignore these messages since they're leftover\n pass\n else:\n if msg.msg_type == MESSAGE_TYPE.PREPARE_ACK:\n # good, +1 ack\n assert msg.instance == instance\n response_cnt += 1\n elif msg.msg_type == MESSAGE_TYPE.PREPARE_NACK:\n # store it\n nack_msg = (msg.proposal, msg)\n pre_nacks.append(nack_msg)\n response_cnt += 1\n elif msg.msg_type == MESSAGE_TYPE.EXIT:\n # exit\n print self.DEBUG_TAG + \" Proposer exit...\"\n done = 1\n client_done = 1\n break\n elif msg.msg_type == MESSAGE_TYPE.ACCEPT_ACK:\n pass\n else:\n print self.DEBUG_TAG + str(msg)\n raise ValueError(\n \"Wrong message got by prop {}\".format(\n msg))\n\n # if timeout try another round (higher prop number)\n if response_cnt <= self.group_size() / 2:\n # update state\n state = READY\n # update proposal num\n proposer_num += self.group_size()\n continue\n\n # learn the value of highest prop from responses\n if pre_nacks:\n highest_p, p_msg = max(pre_nacks)\n learnt_command = p_msg.value\n learnt_client = p_msg.client_id\n\n # if you won but are blocked, feint a failure\n if (learnt_client == orig_client_id and\n learnt_command == client_command and\n learnt_command.command_type == COMMAND_TYPE.LOCK and\n client_command.resource_id in self.lock_set):\n time.sleep(1)\n print \"---------------------------------------\"\n print \"{} won but blocked\".format(self.DEBUG_TAG)\n print \"cmd_r_id: {}\".format(\n client_command.resource_id)\n print \"lockset: {}\".format(self.lock_set)\n print \"leart: {}\".format(learnt_command)\n print \"---------------------------------------\"\n # update the state\n state = READY\n # update proposal num\n proposer_num += self.group_size()\n else:\n state = ACCEPT\n\n ###########################################################\n # ACCEPT - send the accept messages\n ###########################################################\n elif (state == ACCEPT):\n\n # craft the accept packet\n accept_msg = message.message(\n MESSAGE_TYPE.ACCEPT,\n proposer_num, instance, learnt_command,\n self.server_id, learnt_client)\n\n # send the accept requests\n assert accept_msg.client_id is not None\n send_to_acceptors(accept_msg, server_connections)\n\n # advance state\n state = ACCEPTING\n\n ###########################################################\n # ACCEPTING - wait for the accepting messages to come back\n ###########################################################\n elif (state == ACCEPTING):\n\n response_cnt = 0\n\n while response_cnt <= self.group_size() / 2:\n try:\n msg = self.proposer_queue.get(\n block=True, timeout=1)\n except Exception, e:\n print \"{} Accepting timed out - {}\".format(\n self.DEBUG_TAG, e)\n break\n\n assert isinstance(msg, message.message)\n\n if if_dup(msg, msg_history):\n continue\n\n # check messages on the queue for acks\n if msg.instance != instance:\n # ignore left over messages from lower instance\n assert msg.instance < instance\n pass\n else:\n if msg.msg_type == MESSAGE_TYPE.ACCEPT_ACK:\n # only care response for this accept req\n if msg.proposal == proposer_num:\n response_cnt += 1\n elif msg.msg_type == MESSAGE_TYPE.PREPARE_ACK:\n # ignore leftover prepare ack messages\n pass\n elif msg.msg_type == MESSAGE_TYPE.PREPARE_NACK:\n pass\n elif msg.msg_type == message.MESSAGE_TYPE.EXIT:\n client_done = 1\n done = 1\n else:\n raise ValueError(\"Should not reach here.\")\n\n # update proposer number, in any case\n proposer_num += self.group_size()\n\n # proposal was accepted\n if response_cnt > self.group_size() / 2:\n # yeah! accepted\n if (learnt_command == client_command and\n learnt_client == orig_client_id):\n state = IDLE\n # send a response message\n assert msg.client_id is not None\n client_ack_msg = message.message(\n MESSAGE_TYPE.CLIENT_ACK, None, instance,\n client_command, self.server_id,\n msg.client_id)\n client_connection.send(\n pickle.dumps(client_ack_msg))\n else:\n state = READY\n\n self.instance_resolutions[instance] = (\n learnt_command, msg.client_id)\n\n write_lock.acquire()\n logfile.write(\"{} -> cid:{} - {}\\n\".format(\n instance, learnt_client, learnt_command))\n write_lock.release()\n\n # execute command\n execute_command(learnt_command)\n\n # print \"{} execute {} from cid: {}\".format(\n # self.DEBUG_TAG, learnt_command, learnt_client)\n\n # move to the next instance\n instance += 1\n # reset learnt command\n learnt_command = client_command\n learnt_client = orig_client_id\n\n else:\n # break by timeout:\n # propose again\n state = READY\n # update proposal num\n proposer_num += self.group_size()\n\n ###########################################################\n # Failure state\n ###########################################################\n else:\n assert(False)\n\n # close command processing loop\n # close while loop\n # close connection processing loop\n write_lock.acquire()\n logfile.close()\n write_lock.release()", "title": "" }, { "docid": "848f2e3224d994ff1f95b46340501d04", "score": "0.5711765", "text": "def msg_propose(self, conn, msg):\r\n if msg.ballotnumber >= self.quorumballotnumber:\r\n if self.debug: self.logger.write(\"Paxos State\",\r\n \"propose received with acceptable ballotnumber %s\"\r\n % str(msg.ballotnumber))\r\n self.quorumballotnumber = msg.ballotnumber\r\n newpvalue = PValue(msg.ballotnumber,msg.commandnumber,msg.proposal)\r\n self.quorumaccepted.add(newpvalue)\r\n replymsg = create_message(MSG_PROPOSE_ACCEPT, self.me,\r\n {FLD_BALLOTNUMBER: self.quorumballotnumber,\r\n FLD_INRESPONSETO: msg.ballotnumber,\r\n FLD_COMMANDNUMBER: msg.commandnumber})\r\n conn.send(replymsg)\r\n if self.durable:\r\n self.file.write(str(newpvalue))\r\n os.fsync(self.file)\r\n else:\r\n if self.debug: self.logger.write(\"Paxos State\",\r\n \"propose received with non-acceptable ballotnumber %s\"\r\n % str(msg.ballotnumber))\r\n replymsg = create_message(MSG_PROPOSE_REJECT, self.me,\r\n {FLD_BALLOTNUMBER: self.quorumballotnumber,\r\n FLD_INRESPONSETO: msg.ballotnumber,\r\n FLD_COMMANDNUMBER: msg.commandnumber})\r\n conn.send(replymsg)", "title": "" }, { "docid": "b78c196b20ed3e75334981cc1ce2fad0", "score": "0.5709178", "text": "def decide_a_value(self):\n\n while True:\n if len(self.living_proposers_id) > 0:\n break\n while True:\n leader_id = min(self.living_proposers_id)\n \n if self.id != leader_id:\n continue\n \n messages_not_decided = self.message_proposed - self.message_decided\n if bool(messages_not_decided) :\n message_to_decide = messages_not_decided.pop()\n\n _, value = message_to_decide\n decide_message_to_acceptor = (self.decide_count, value)\n\n raw_message = \"decide:{},{}\".format(*decide_message_to_acceptor)\n logging.info(\"{} is sending {} to acceptors\".format(self.role, raw_message))\n raw_message = bytes(raw_message, \"utf-8\")\n self.send_socket.sendto(raw_message, self.config[\"acceptors\"])\n\n raw_message = \"Pdecide:{},{}\".format(*message_to_decide)\n logging.info(\"{} is sending {} to proposers\".format(self.role, raw_message))\n raw_message = bytes(raw_message, \"utf-8\")\n self.send_socket.sendto(raw_message, self.config[\"proposers\"])\n \n self.decide_count += 1\n self.message_decided.add(message_to_decide)", "title": "" }, { "docid": "63da407945e069f621a03c7bab2795f5", "score": "0.57018787", "text": "def connectionMade(self):\n self.peer.setPeer(self)\n self.peer.transport.resumeProducing()", "title": "" }, { "docid": "ac9cdd194ec0ecd38ec9aac5e3892edb", "score": "0.56799763", "text": "def propose(self, state):\n raise NotImplementedError", "title": "" }, { "docid": "2b3c98a1d91b1e96611d430729fbecc5", "score": "0.5661672", "text": "def test_multiple_proposal_prepare(self):\n print \"\\n\\n[Info] ##########[MULTIPLE PROPOSAL TEST]##########\\n\"\n\n # send and receive a valid proposal, proposal = 1, instance = 0\n self.send_prepare(1, 0, MESSAGE_TYPE.PREPARE_ACK, 1, 0)\n print \"[Info] First prepare request successful...\"\n\n # send and receive another valid proposal, proposal = 3, instance = 0\n self.send_prepare(3, 0, MESSAGE_TYPE.PREPARE_ACK, 3, 0)\n print \"[Info] Second prepare request successful...\"\n\n # send an not receive a lower numbered proposal\n proposal, instance = 2, 0\n msg = message.message(MESSAGE_TYPE.PREPARE,\n proposal, instance, None, self.dummy_server_id)\n self.message_socket.send(pickle.dumps(msg))\n\n try:\n self.acceptor_connection.settimeout(1.0)\n self.acceptor_connection.recv(1000)\n assert False # time out should happen\n except Exception, e:\n print e\n pass\n\n print \"[Info] Fourth prepare request test successful...\"\n\n # send a higher number proposal just to make sure\n # the proposer didn't die, proposal = 11, instance = 0\n self.send_prepare(11, 0, MESSAGE_TYPE.PREPARE_ACK, 11, 0)\n print \"[Info] Fifth prepare request successful...\"", "title": "" }, { "docid": "26b796f562f29a0b5ee1fb4745193027", "score": "0.56332576", "text": "def initialize_acceptor(self):\n\n def response_proposer(msg, prop_id):\n assert server_connections[prop_id]\n response_conn = server_connections[prop_id]\n try:\n response_conn.sendall(pickle.dumps(rmsg))\n except Exception, e:\n server_connections.remove(response_conn)\n print self.DEBUG_TAG + \"WARN - fail to response \" + e\n\n # the msg types that will be duped\n acceptor_msg_types = [\n MESSAGE_TYPE.PREPARE,\n MESSAGE_TYPE.ACCEPT,\n ]\n\n # msg_history to handle dups\n msg_history = set()\n\n # open socket connections to each server: server_id -> connection\n server_connections = dict()\n for server_id, p_server in enumerate(self.server_list):\n target_host = p_server[\"host\"]\n target_port = p_server[\"internal_port\"]\n\n try:\n connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n connection.setsockopt(\n socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n connection.connect((target_host, target_port))\n server_connections[server_id] = connection\n except Exception:\n #print \"{} Acceptor failed to connect to {}:{}, {}\".format(\n # self.DEBUG_TAG, target_host, target_port)\n continue\n\n accept_history = dict() # instance -> prep_p, acc_p, acc_v, cid\n\n # the msg types that will be duped\n acceptor_msg_types = [\n MESSAGE_TYPE.PREPARE,\n MESSAGE_TYPE.ACCEPT,\n ]\n\n # msg_history to handle dups\n msg_history = set()\n\n # Enter the proposal processing loop\n # - dequeue message for this proposer and process them\n done = 0\n\n while done == 0:\n\n # get a message of the queue\n msg = self.acceptor_queue.get()\n\n # handle duplication\n if msg.msg_type in acceptor_msg_types:\n msg_signature = (\n msg.msg_type,\n msg.value,\n msg.proposal,\n msg.r_proposal,\n msg.client_id,\n msg.instance,\n msg.origin_id)\n if msg_signature in msg_history:\n # dup, pass\n # print \"dup msg to acceptor!\"\n continue\n else:\n msg_history.add(msg_signature)\n\n ###################################################################\n # handle PREPARE request\n ###################################################################\n if msg.msg_type == MESSAGE_TYPE.PREPARE:\n\n # extract the data fields\n p_instance = msg.instance\n p_proposal = msg.proposal\n\n # that is not the first message with this instance\n if p_instance in accept_history:\n # unpack history info\n h_prep, h_accp, h_accv, cid = accept_history[p_instance]\n\n # only response if p_proposal higher than current\n if p_proposal > h_prep:\n\n # decide message type\n if h_accp != -1:\n msg_type = MESSAGE_TYPE.PREPARE_NACK\n else:\n msg_type = MESSAGE_TYPE.PREPARE_ACK\n # send the nack back\n rmsg = message.message(\n msg_type, p_proposal, p_instance,\n h_accv, self.server_id, client_id=cid,\n r_proposal=h_accp)\n response_proposer(rmsg, msg.origin_id)\n\n # update accept_history\n accept_history[p_instance] = (\n p_proposal, h_accp, h_accv, cid)\n else:\n # send ack back\n rmsg = message.message(\n MESSAGE_TYPE.PREPARE_ACK, p_proposal, p_instance,\n msg.client_id, self.server_id)\n response_proposer(rmsg, msg.origin_id)\n\n # update accept_history\n accept_history[p_instance] = (p_proposal, -1, None, None)\n\n ###################################################################\n # handle ACCEPT request\n ###################################################################\n elif msg.msg_type == MESSAGE_TYPE.ACCEPT:\n # extract the data fields\n p_instance = msg.instance\n p_value = msg.value\n p_proposal = msg.proposal\n p_client_id = msg.client_id\n\n # check to see if the proposal number for\n # this instance is high enough\n if p_instance in accept_history:\n h_prep, h_accp, h_accv, cid = accept_history[p_instance]\n\n if p_proposal >= h_prep:\n # send accept_ack message\n rmsg = message.message(\n MESSAGE_TYPE.ACCEPT_ACK, p_proposal, p_instance,\n p_value, self.server_id, client_id=p_client_id)\n response_proposer(rmsg, msg.origin_id)\n # update accept_history\n accept_history[p_instance] = (\n p_proposal, p_proposal, p_value, p_client_id)\n else:\n # send accept_ack message\n rmsg = message.message(\n MESSAGE_TYPE.ACCEPT_ACK, p_proposal, p_instance,\n p_value, self.server_id, client_id=p_client_id)\n response_proposer(rmsg, msg.origin_id)\n # update accept_history\n accept_history[p_instance] = (\n p_proposal, p_proposal, p_value, p_client_id)\n ###################################################################\n # handle EXIT message\n ###################################################################\n elif (msg.msg_type == message.MESSAGE_TYPE.EXIT):\n done = 1\n # should never get this far\n else:\n raise ValueError(\"Should not be here.\")\n # shut down inter-server communication channels\n try:\n assert(len(server_connections.keys()) > 0)\n for skey in server_connections.keys():\n server_connections[skey].close()\n except Exception, e:\n print \"{} ERROR - failed to close server conn... {}\".format(\n self.DEBUG_TAG, e)", "title": "" }, { "docid": "2730dbdb6048ef6b8ef999066b89c0b0", "score": "0.5531211", "text": "def forward(self):\n return", "title": "" }, { "docid": "68b871168b1113d13dfcacfac52a070a", "score": "0.552201", "text": "def send_propose_item(self, engineer, item):\n self.__proposed_item.append(item)\n self.__was_proposed_to_me.append(False)\n self.send_message(engineer, MessagePerformative.PROPOSE, item)", "title": "" }, { "docid": "4931176074f38c1dc1ae57e42c38dac2", "score": "0.54925126", "text": "def transfer_to_broker(self):\n while True:\n data = self.recv_agent.recv()\n info_cmd = get_msg_info(data, \"cmd\")\n\n new_cmd = info_cmd + self.learner_postfix\n set_msg_info(data, broker_id=self.broker_id,\n explorer_id=self.explorer_id, cmd=new_cmd)\n\n self.send_broker.send(data)", "title": "" }, { "docid": "5800d37e84e70c834a2be99f1c7bc466", "score": "0.5472852", "text": "def start_election(self):\n self.current_id = (self.current_id[0] + 1, self.node_id())\n self.promises = 1\n self.network_node.broadcast({\n 'do': 'paxos_prepare',\n 'proposal_id': self.current_id,\n })\n self.highest_promised = self.current_id", "title": "" }, { "docid": "352c39c81f3a3a8820daaaa8922c9444", "score": "0.54572904", "text": "def forward(self):\n return None", "title": "" }, { "docid": "352c39c81f3a3a8820daaaa8922c9444", "score": "0.54572904", "text": "def forward(self):\n return None", "title": "" }, { "docid": "c205a780d50e749fde6e6c9398dcc404", "score": "0.5444387", "text": "def forward(self):\n pass", "title": "" }, { "docid": "c205a780d50e749fde6e6c9398dcc404", "score": "0.5444387", "text": "def forward(self):\n pass", "title": "" }, { "docid": "c205a780d50e749fde6e6c9398dcc404", "score": "0.5444387", "text": "def forward(self):\n pass", "title": "" }, { "docid": "c205a780d50e749fde6e6c9398dcc404", "score": "0.5444387", "text": "def forward(self):\n pass", "title": "" }, { "docid": "eeacc3ac341b998a1619a183b536f1c1", "score": "0.5404664", "text": "def forward(self):\n encounters = self.encounters()\n if not encounters:\n return\n\n # forward only when N-th% of neighbors has changed\n neighbors = self.neighbors()\n change_ratio = len(encounters) / len(neighbors)\n if change_ratio < self.n_th / 100:\n return\n\n for agent in encounters:\n for msg in self.received:\n # change forwarding probability based on the number of duplicates\n ndups = max(self.received[msg] - 1, 0)\n p = max(1 / self.c**ndups, self.min_p)\n if random.uniform(0, 1) > p:\n continue\n self.sendmsg(agent, msg)", "title": "" }, { "docid": "5ff03879fe1a61135d4a8b811616413f", "score": "0.5394365", "text": "def msg_prepare_adopted(self, conn, msg):\r\n if msg.inresponseto in self.outstandingprepares:\r\n prc = self.outstandingprepares[msg.inresponseto]\r\n prc.receivedcount += 1\r\n prc.receivedfrom.add(conn.peerid)\r\n if self.debug: self.logger.write(\"Paxos State\",\r\n \"got an accept for ballotno %s \"\\\r\n \"commandno %s proposal %s with %d/%d\"\\\r\n % (prc.ballotnumber, prc.commandnumber, prc.proposal,\r\n prc.receivedcount, prc.ntotal))\r\n assert msg.ballotnumber == prc.ballotnumber, \"[%s] MSG_PREPARE_ADOPTED cannot have non-matching ballotnumber\" % self\r\n # add all the p-values from the response to the possiblepvalueset\r\n if msg.pvalueset is not None:\r\n prc.possiblepvalueset.union(msg.pvalueset)\r\n\r\n if prc.receivedcount >= prc.nquorum:\r\n if self.debug: self.logger.write(\"Paxos State\", \"suffiently many accepts on prepare!\")\r\n # take this response collector out of the outstanding prepare set\r\n del self.outstandingprepares[msg.inresponseto]\r\n # choose pvalues with distinctive commandnumbers and highest ballotnumbers\r\n pmaxset = prc.possiblepvalueset.pmax()\r\n for commandnumber,proposal in pmaxset.iteritems():\r\n self.add_to_proposals(commandnumber, proposal)\r\n # If the commandnumber we were planning to use is overwritten\r\n # we should try proposing with a new commandnumber\r\n if self.proposals[prc.commandnumber] != prc.proposal:\r\n self.pick_commandnumber_add_to_pending(prc.proposal)\r\n self.issue_pending_commands()\r\n for chosencommandnumber,chosenproposal in self.proposals.iteritems():\r\n # send proposals for every outstanding proposal that is collected\r\n if self.debug: self.logger.write(\"Paxos State\", \"Sending PROPOSE for %d, %s\"\r\n % (chosencommandnumber, chosenproposal))\r\n newprc = ResponseCollector(prc.quorum, prc.ballotnumber,\r\n chosencommandnumber, chosenproposal)\r\n self.outstandingproposes[chosencommandnumber] = newprc\r\n propose = create_message(MSG_PROPOSE, self.me,\r\n {FLD_BALLOTNUMBER: prc.ballotnumber,\r\n FLD_COMMANDNUMBER: chosencommandnumber,\r\n FLD_PROPOSAL: chosenproposal,\r\n FLD_SERVERBATCH: isinstance(chosenproposal,\r\n ProposalServerBatch)})\r\n self.send(propose, group=newprc.quorum)\r\n # As leader collected all proposals its state is up-to-date\r\n # and it is done initializing\r\n self.leader_initializing = False\r\n self.stateuptodate = True\r\n # become active\r\n self.active = True", "title": "" }, { "docid": "faa2b8c3eb0fa0ce2c43c9591c403471", "score": "0.5392574", "text": "def incrementer_proceed(last_received=[None]):\n while True:\n number = yield from receive()\n yield from send(number + 1)\n yield from proceed() # this basically does nothing\n last_received[0] = number", "title": "" }, { "docid": "ff43a51e6329fe1531615ff6264456cf", "score": "0.5391507", "text": "def forward(self):\r\n pass", "title": "" }, { "docid": "c96290af2f2eec97591f83af8360e133", "score": "0.5389977", "text": "def tick(self):\n \n if self.state == PropControllerState.LISTENING:\n # If we have all of one pose...\n if np.all(np.isfinite(self.measured_pose)):\n # Record the current pose as element 0 of self.prev_poses\n # shuffling the other poses to the right. Then, truncate the\n # list to be a maximum of 3 poses long.\n self.prev_poses = [np.copy(self.measured_pose)] + self.prev_poses\n self.prev_poses = self.prev_poses[:3]\n elif self.state == PropControllerState.CONTROLLING:\n # Calculate new commanded pose\n p0, p1, p2 = self.prev_poses[:3]\n p0 = (1+alpha-k)*p0 - alpha*p1 + k*self.commanded_pose\n #p0 = 0.8*p1 + 0.2*self.commanded_pose\n \n # Shuffle prev poses array\n self.prev_poses = [p0] + self.prev_poses\n self.prev_poses = self.prev_poses[:3]\n\n delta = self.commanded_pose - p0\n delta_mag = np.sqrt(np.sum(delta * delta))\n epsilon = 0.05\n\n rospy.logerr('p0: '+ str(p0))\n rospy.logerr('p1: '+ str(p1))\n rospy.logerr('p2: '+ str(p2))\n rospy.logerr('Cmded pose: ' + str(self.commanded_pose))\n rospy.logerr('delta: ' +str(delta))\n #rospy.logerr('Controlling with prev poses: ' + str(self.prev_poses))\n\n # If we're sufficiently close, or have gone mad, transition to listening\n # state and reset the commanded pose.\n if delta_mag <= epsilon or delta_mag >= 3:\n rospy.logerr('Finished with delta mag: ' + str(delta_mag))\n self.commanded_pose = np.ones(5) * np.nan\n self.state = PropControllerState.LISTENING\n elif self.pubs is not None:\n # Actually publish *IF* we have some publishers\n for i in range(len(self.pubs)):\n self.pubs[i].publish(p0[i])", "title": "" }, { "docid": "5196a55c32214e593334e2725883cf46", "score": "0.5383526", "text": "def truth_processor(self):\n while True:\n msg = yield self.comm_in.next()\n if msg.origin_id in self.current_message.keys():\n self.prev_message[msg.origin_id] = self.current_message[msg.origin_id]\n else:\n self.prev_message[msg.origin_id] = None\n\n self.current_message[msg.origin_id] = msg\n self.position_update.succeed(value=msg.origin_id)\n self.position_update = self.env.event()", "title": "" }, { "docid": "b8d852032d4d40a81f989debb7bdf5bc", "score": "0.5380999", "text": "def _forward(self, messages):\n assert isinstance(messages, (tuple, list))\n assert len(messages) > 0\n assert all(isinstance(message, Message.Implementation) for message in messages)\n assert all(message.community == messages[0].community for message in messages)\n assert all(message.meta == messages[0].meta for message in messages)\n\n result = True\n meta = messages[0].meta\n if isinstance(meta.destination, (CommunityDestination, CandidateDestination)):\n for message in messages:\n # CandidateDestination.candidates may be empty\n candidates = set(message.destination.candidates)\n # CommunityDestination.node_count is allowed to be zero\n if isinstance(meta.destination, CommunityDestination) and meta.destination.node_count > 0:\n max_candidates = meta.destination.node_count + len(candidates)\n for candidate in meta.community.dispersy_yield_verified_candidates():\n if len(candidates) < max_candidates:\n candidates.add(candidate)\n else:\n break\n result = result and self._send(tuple(candidates), [message])\n else:\n raise NotImplementedError(meta.destination)\n\n return result", "title": "" }, { "docid": "f7a15c43f64fed5e2feffc10608ea091", "score": "0.5371277", "text": "def propose_action(self):\n\n # Check if termination criterion has been reached\n self.check_termination()\n\n # Decide the next action to take\n if self.terminate: # Check if termination has been triggered\n if self.pending_evals == 0: # Only terminate if nothing is pending, otherwise take no action\n return Proposal(\"terminate\")\n elif self.converged:\n if self.use_restarts: # Start a new run\n if self.asynchronous or self.pending_evals == 0: # We can restart immidiately, else wait\n self.sample_restart() # Trigger the restart\n return self.init_proposal() # We are now in phase 1, so make an initial proposal\n else:\n return\n elif self.batch_queue: # Propose point from the batch_queue\n if self.phase == 1:\n return self.init_proposal()\n else:\n return self.adapt_proposal()\n else: # Make new proposal in the adaptive phase\n self.phase = 2\n if self.asynchronous: # Always make proposal with asynchrony\n self.generate_evals(num_pts=1)\n elif self.pending_evals == 0: # Make sure the entire batch is done\n self.generate_evals(num_pts=self.batch_size)\n\n # We allow generate_evals to trigger a restart, so check if status has changed\n if self.converged:\n if self.use_restarts: # Start a new run\n if self.asynchronous or self.pending_evals == 0: # We can restart immidiately, else wait\n self.sample_restart() # Trigger the restart\n return self.init_proposal() # We are now in phase 1, so make an initial proposal\n else:\n return\n\n # Launch the new evaluations (the others will be triggered later)\n return self.adapt_proposal()", "title": "" }, { "docid": "269b569b1113d33356dc91e6fabe61c4", "score": "0.5363086", "text": "def connectionMade(self):\n self._socks5_addr = self.factory._socks5_addr\n self._cvpn_addr = self.factory._cvpn_addr\n self._con_mon = self.factory._con_mon\n self._ct_dps = self.factory._ct_dps\n p = self.transport.getPeer()\n self._src_addr = (p.host, p.port)\n\n # Tell the CT that we're the producer for this tunnel\n # This way we can slow recvs from the proxy when the\n # link to the client can't handle it\n if self._ct_dps:\n # We don't know which CT_DP this actually belongs to..\n # http or tls or ? so we tell them all!\n for ct_dp in self._ct_dps.itervalues():\n ct_dp.setProducer(self.transport.getPeer(), self)", "title": "" }, { "docid": "4c0bfad5c8663fd06ca6159996c47451", "score": "0.53324974", "text": "def propose(self, tag):\n if self.enable_reset():\n self.prp[self.uid] = Prp(1, tag)\n self.all[self.uid] = False", "title": "" }, { "docid": "995b3f8e3ae63eadae1512fce546d5b2", "score": "0.5310901", "text": "def Produced(self):\n\n\t\tself._must_reset = True", "title": "" }, { "docid": "fbefe51c8e49c97619633310913bc2ab", "score": "0.5278334", "text": "def send_prepare(self, prop, ins, rmsg_type, rmsg_prop, rmsg_ins):\n msg = message.message(\n MESSAGE_TYPE.PREPARE, prop, ins, None, self.dummy_server_id)\n self.message_socket.send(pickle.dumps(msg))\n\n print \"[Info] Sent a proposal to acceptor...\"\n rmsgs = self.acceptor_connection.recv(1000)\n rmsg = pickle.loads(rmsgs)\n assert isinstance(rmsg, message.message)\n\n print \"[Info] Received a response from server...\"\n\n assert rmsg.msg_type == rmsg_type\n if rmsg.proposal != rmsg_prop:\n print (rmsg.proposal, rmsg_prop)\n\n assert rmsg.proposal == rmsg_prop\n assert rmsg.instance == rmsg_ins\n\n return rmsg", "title": "" }, { "docid": "7a5071759459f8d440ebf64ac633f3dd", "score": "0.5271091", "text": "def _do_proposal(self):\n yield [self._do_proposal_for(client) for client in self.clients]\n self.lobby.proposals.remove(self)", "title": "" }, { "docid": "c16c895531da41916b67a0657df89949", "score": "0.5267571", "text": "def success(self) -> None:\n for learner in self.l:\n learner.receive_message(Message(self, learner, 'SUCCESS', self.accepted, None, None))\n\n for acceptor in self.a:\n acceptor.prior_promised_value = None\n acceptor.prior_promised_id = 0", "title": "" }, { "docid": "da8cb65edf7ef86dcc4605b5ccee4a63", "score": "0.519307", "text": "def handle_p1b(self, msg):\n res = None\n state = self.proposerState.get(msg.inst)\n if state is None:\n state = self.ProposerState(msg.crnd)\n\n if state.crnd == msg.crnd:\n if msg.nid not in state.nids:\n state.nids.add(msg.nid)\n if state.hvrnd <= msg.vrnd:\n state.hval = msg.val\n\n state.saved = True\n res = PaxosMessage(10, msg.inst, state.crnd, state.hvrnd, state.hval)\n self.proposerState[msg.inst] = state\n\n return res", "title": "" }, { "docid": "a44ba2f28e470f72dc920f12806d5b19", "score": "0.51810205", "text": "def deliver():", "title": "" }, { "docid": "0fb20772204dd52bed3a15ee66f1b9b9", "score": "0.5176319", "text": "def forward_interest(self):\n pass", "title": "" }, { "docid": "92cccf35ae92399750300017a35005cf", "score": "0.5158068", "text": "def run(self):\n\n super().run()\n logging.info(\"{} {} starts running...\".format(self.role, self.id))\n for value in sys.stdin:\n value = value.strip()\n message = \"propose:{},{}\".format(self.id, value)\n message = bytes(message, \"utf-8\")\n logging.info(\"{} {} is sending {} to proposers\".format(self.role, self.id, value))\n self.send_socket.sendto(message, self.config[\"proposers\"])\n print(\"{} {} is done.\".format(self.role, self.id))", "title": "" }, { "docid": "6a474920304ff86a1329c103b2d9fb38", "score": "0.5149242", "text": "def process_as_present(self):\n self.order.amount = 0\n self.order.status = Order.SHIPPED\n self.order.save(update_fields=['amount', 'status'])\n\n preferences = self.customer.preferences\n preferences.present_next = False\n preferences.save(update_fields=['present_next'])\n\n self.result['processed_by'] = 'present'", "title": "" }, { "docid": "d88940bb1797d8e04b08dadfa207aacd", "score": "0.512604", "text": "def start_negotiation(self, engineer):\n item = self.__preferences.most_preferred(self.get_non_proposed_items())\n self.send_propose_item(engineer, item)", "title": "" }, { "docid": "56d746706e79af48e29a2d83b2045118", "score": "0.5125055", "text": "def handle_propose(self, src, proposal_id, index, value):\n if proposal_id < self.highest_promised:\n log.info('Rejecting Proposal: proposal_id < highest_promised')\n return\n while len(self.log) <= index:\n self.log.append(None)\n self.log[index] = value\n self.network_node.send(src, {\n 'do': 'paxos_accept',\n 'proposal_id': proposal_id,\n 'index': index,\n })", "title": "" }, { "docid": "066ecc43052677c4dd5a6fdc9c0e3615", "score": "0.51167923", "text": "def prerun(self, payload):\r\n if self._once:\r\n self._destroy = True\r\n if self._instream:\r\n self.run(payload, True)", "title": "" }, { "docid": "9bda3b748dbd49fbeb37d12046cfca57", "score": "0.5114413", "text": "def resumeProducing(self):\n time_now = int(time.time())\n log(\"Resumed TCP redirect. \"\n \"Dropped %s messages during %s seconds \",\n self.dropped, time_now - self.last_paused)\n self.paused = False\n self.dropped = 0\n self.last_paused = None", "title": "" }, { "docid": "e2a612a133e1807193177e1a110da94c", "score": "0.5097063", "text": "def forward(self):\n if self.isFirst:\n self.warp_fake_A = self.prev_fake_A\n self.mask = torch.ones(1, 1, self.opt.crop_size[0], self.opt.crop_size[1]).to(self.device)\n else:\n self.warp_fake_A = F.grid_sample(self.prev_fake_A, self.flow, align_corners=True) * self.mask\n fake_A_code = self.netG_B_encoder(self.real_B)\n warp_A_code, mask = self.netG_Warp_encoder(self.warp_fake_A, self.mask)\n mask = mask.repeat(1, warp_A_code.shape[1], 1, 1)\n self.fake_A = self.netG_B_decoder(torch.cat((fake_A_code, warp_A_code), dim=1),\n torch.cat((torch.ones_like(mask), mask), dim=1))\n if self.isTrain:\n self.rec_B = self.netG_A(self.fake_A)", "title": "" }, { "docid": "fe0020b19ffcd119faa9f30513ec8b15", "score": "0.5082356", "text": "def propose_move(self) -> tuple:\n pass", "title": "" }, { "docid": "d9789f511239e7d4f1d10838e55d1f03", "score": "0.50749", "text": "def _handle_preapproval(self, features):\r\n log.debug(\"Server supports subscription pre-approvals.\")\r\n self.xmpp.features.add('preapproval')", "title": "" }, { "docid": "e782651bcc7a4b6623609a6734b3c072", "score": "0.50712883", "text": "def redirect_to_leader(self, msg, msg_type):\n if self.current_leader is None:\n self.transition_state(State.CANDIDATE)\n else:\n msg['type'] = msg_type\n msg['destination'] = self.current_leader\n self.send_to_broker(msg)", "title": "" }, { "docid": "3cca1203692cd9039fbb043e38f2dcc9", "score": "0.5069647", "text": "def one_round_simultaneous_proposals(self):\n proposals_in_current_round = []\n next_round_proposer_uuid = set()\n\n for want_to_propose_uuid in self.unmatched_proposer_uuid:\n want_to_propose = self.proposer_uuid_dict[want_to_propose_uuid]\n next_propose_to = want_to_propose.propose_next()\n if next_propose_to != Proposer.NO_NEXT_PROPOSAL:\n self.proposal_count += 1\n rejection_uuid = self.responder_uuid_dict[next_propose_to].respond_to_proposal(want_to_propose_uuid)\n if rejection_uuid is None:\n want_to_propose.register_response(is_acceptance=True)\n elif rejection_uuid == want_to_propose_uuid:\n next_round_proposer_uuid.add(want_to_propose_uuid)\n else:\n want_to_propose.register_response(is_acceptance=True)\n rejected_proposer = self.proposer_uuid_dict[rejection_uuid]\n rejected_proposer.register_response(is_acceptance=False)\n next_round_proposer_uuid.add(rejection_uuid)\n\n proposals_in_current_round.append(Proposal(self.proposal_count, want_to_propose_uuid,\n next_propose_to, rejection_uuid))\n\n self.unmatched_proposer_uuid = next_round_proposer_uuid\n return proposals_in_current_round", "title": "" }, { "docid": "20ef3f8bc589fb0b19860b4aac81c6f4", "score": "0.50608104", "text": "def _do_proposal_for(self, client):\n with (yield self.lobby.proposal_locks[client].acquire()):\n if self.is_declined:\n return\n self.invited.add(client)\n try:\n result = yield client.ui.ask_yes_no(\n self._get_invitation_prompt(client),\n leave_question=True\n )\n if result:\n yield self.accept(client)\n else:\n self.decline(client)\n except ClientDeclinedFlag:\n return", "title": "" }, { "docid": "5073a4a972e6e56a0d2c81e9975ab732", "score": "0.5036377", "text": "def resume_previous(self):\n\n # restore saved data\n current_state = self.current_state\n next_state = self.next_state\n action = self.action\n traj = self.trajectory\n\n # finnish updating using saved data and other agents answers\n self.finnish_update(current_state, next_state, action, traj.trajectory_key)\n\n if self.is_goal(next_state):\n\n # next_state is the goal state - add traj to it's agent_trajectories here\n # since this trajectory will be removed and the state will not get updated with update function\n next_state.add_agent_trajectory(self.id, traj.trajectory_key)\n trajectory = self._forward_trajectories.pop(traj.trajectory_key)\n self._backward_trajectories[traj.trajectory_key] = trajectory\n\n self.planner.goal_reached(self.id, traj)\n\n if len(self.planner.agents) == 1:\n self.create_new_trajectory(self.initial_state)\n\n # break from main loop\n return True\n\n # if we have more than 1 agent, no matter what the next state is,\n # request Q values to choose the best agent to act\n # we do this instead of requesting on public changes to solve the issue of agents that can\n # perform private actions and never reach a public state change\n if len(self.planner.agents) > 1:\n # request best Q Value of next state from other agents\n self.planner.add_message(Message(QVALUE_REQ_MSG, next_state, self.id, traj.trajectory_key,\n prev_state=current_state, prev_action=action))\n\n # break from main loop - took out of public change for the case where an agent never gets to\n # perform a public action but can always perform private actions\n return True\n\n return False", "title": "" }, { "docid": "1a96170bcb2c1c230ea1f7bac2896d96", "score": "0.5032995", "text": "def follow_up(self, **kwargs):\n if self.end_convo:\n q_obj = kwargs.get(\"q_obj\", None)\n try:\n q_obj.put(self.messages_heard[-1].decode())\n except AttributeError as e:\n print(f\"in spokenmessages.py, follow_up {e}\\n\"\n f\"tx_reason is {self.messages_to_be_spoken_raw[1].decode()}\")", "title": "" }, { "docid": "64608a9108b2f724f6751b9326c49929", "score": "0.5028869", "text": "def subscribe(self):\n contracts = self.get_messages('contract')\n if contracts:\n premia = {contract.content.premium: contract for contract in contracts}\n cheapest = premia[min(premia)]\n self.send(cheapest.sender, 'addcontract', cheapest.content)\n self.contracts.add(cheapest.content)", "title": "" }, { "docid": "1930f91b510186e090c1d00a87684644", "score": "0.50286686", "text": "def handle_leader_consensus(self, message):\n\n id = int(message)\n # logging.info(\"{} {} received living proposer {}\".format(self.role, self.id, id))\n time_now = time.time()\n if id not in self.living_proposers_id:\n self.living_proposers_id.append(id)\n self.living_proposers_timer.append(time_now)\n else:\n self.living_proposers_timer[self.living_proposers_id.index(id)] = time_now\n\n index_of_dead_proposers = []\n for index, time_last_seen in enumerate(self.living_proposers_timer):\n if time_now - time_last_seen > 3:\n index_of_dead_proposers.append(index)\n\n self.living_proposers_id = [id for index, id in enumerate(self.living_proposers_id) if not index in index_of_dead_proposers]\n self.living_proposers_timer = [time_last_seen for index, time_last_seen in enumerate(self.living_proposers_timer) if not index in index_of_dead_proposers]", "title": "" }, { "docid": "ee90ae088d42f53c0cf6b89d6e2ffb97", "score": "0.5027954", "text": "def forward(self):\n raise NotImplementedError", "title": "" }, { "docid": "ee90ae088d42f53c0cf6b89d6e2ffb97", "score": "0.5027954", "text": "def forward(self):\n raise NotImplementedError", "title": "" }, { "docid": "8703c84de40196c5544ed7bf563b25ca", "score": "0.50227064", "text": "def aPropos(self):\r\n Apropos()\r\n return True", "title": "" }, { "docid": "81eef64f54eab4c49b52e6a8aea8a1f7", "score": "0.50038487", "text": "def replay(self):\n pass", "title": "" }, { "docid": "fe5a21bd0ac6e03893649d419335945f", "score": "0.5002152", "text": "def on_initial_proposal(self, proposal):\n if proposal.accepted:\n self.on_initial_accepted(proposal)\n else:\n self.on_initial_rejected(proposal)", "title": "" }, { "docid": "cdd3aedbfa3e1ce35400e5da5bcd6e95", "score": "0.49995396", "text": "def proceed(self):\n with self._cv:\n self._proceed_flag = True\n self._cv.notify_all()", "title": "" }, { "docid": "2d08f66b7b6c17e1f6425a16ca701648", "score": "0.49926367", "text": "def pre_send(self, *args, **kwargs):", "title": "" }, { "docid": "c3f122d3c8f862e6724402b856154419", "score": "0.49872327", "text": "def share(self, send, receive):\n pass", "title": "" }, { "docid": "e3c39d2fd0ac9e7ac55686750a0e4518", "score": "0.49871108", "text": "def pose_cb(self, msg):\n self.car_pos[0] = msg.pose.position.x\n self.car_pos[1] = msg.pose.position.y\n self.car_pos[2] = msg.pose.position.z\n\n # only calc final waypoints if base waypoints have been published\n if self.base_waypoints:\n self._calculate_next_waypoints()\n self._publish_next_waypoints()", "title": "" }, { "docid": "e50a69deec5699015295899c01cad1a0", "score": "0.4987031", "text": "async def proposal_feed(sio, recv):\n conn = await create_connection()\n subscription = await proposals_query.subscribe_to_proposals(conn)\n while await subscription.fetch_next():\n proposal = await subscription.next()\n proposal_resource = await compile_proposal_resource(\n conn, proposal.get(\"new_val\")\n )\n\n conn.close()\n\n next_id = escape_user_input(recv.get(\"next_id\"))\n if (\n proposal_resource[\"status\"] == \"OPEN\"\n and next_id in proposal_resource[\"approvers\"]\n ):\n await sio.emit(\"feed\", json.dumps({\"open_proposal\": proposal_resource}))\n if next_id == proposal_resource[\"opener\"]:\n await sio.emit(\"feed\", json.dumps({\"user_proposal\": proposal_resource}))", "title": "" }, { "docid": "9261d8dc0a25bb2380191a2b6669fa46", "score": "0.49867836", "text": "def handle_prepare(self, src, proposal_id):\n if proposal_id < self.highest_promised:\n log.info('Rejecting Prepare: proposal_id < highest_promised')\n return\n self.highest_promised = proposal_id\n majority = self.majority(self.get_last_applied_value_index())\n self.network_node.send(src, {\n 'do': 'paxos_promise',\n 'proposal_id': proposal_id,\n 'acc_id': self.current_id,\n 'accepted': self.log,\n 'majority': majority,\n })\n self.current_leader = proposal_id[1]", "title": "" }, { "docid": "468841e51a03d590b56846e2a9f696a2", "score": "0.49809834", "text": "def deliver_message(self, m: Message):\n if m.type == \"PREPARE\":\n # The acceptor gets a PREPARE message\n # If the n saved in prior is smaller than that of the message, send a PROMISE\n if self.prior[0] < m.n:\n if self.prior[1]:\n self.network.queue_message(Message(src=self, dst=m.src, type_=\"PROMISE\", n=m.n, prior=self.prior))\n else:\n self.network.queue_message(Message(self, m.src, \"PROMISE\", m.n))\n \n elif m.type == \"ACCEPT\":\n # The acceptor gets an ACCEPT message\n # If the n saved in prior is smaller than that of the message, send a PROMISE\n if self.prior[0] < m.n:\n # Send an ACCEPTED message to proposer\n self.network.queue_message(Message(self, m.src, \"ACCEPTED\", m.n, value=m.value))\n # update prior value\n self.prior = (m.n, m.value)\n else:\n self.network.queue_message(Message(self, m.src, \"REJECTED\", m.n))", "title": "" }, { "docid": "611df7a45bd9dc079c73faf875cffab1", "score": "0.49748728", "text": "def subscriberToPreparer():\n\tconnection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))\n\tchannel = connection.channel()\n\t\n\tchannel.queue_declare(queue='queueToFront')\n\t\n\tdef callback(ch, method, properties, response):\n\t\tif(response != ''):\n\t\t\tchannel.cancel()\n\t\t\tconnection.close()\n\t\t\tglobal product \n\t\t\tproduct = response\t\n\n\tchannel.basic_consume(callback, queue='queueToFront', no_ack=True)\n\t\n\tchannel.start_consuming()\n\t\n\treturn product", "title": "" }, { "docid": "e76d9bfa09df6ebc0c968e0cbbbaf197", "score": "0.49665374", "text": "def connection_process(self, socket):\n\n def push_to_acceptor_queue(q_msg):\n \"\"\"\n push to acceptor\n \"\"\"\n self.acceptor_queue_lock.acquire()\n self.acceptor_queue.put(q_msg)\n self.acceptor_queue_lock.release()\n\n def push_to_proposer_queue(q_msg):\n \"\"\"\n push to proposer\n \"\"\"\n self.proposer_queue_lock.acquire()\n self.proposer_queue.put(q_msg)\n self.proposer_queue_lock.release()\n\n done = 0\n try:\n while (done == 0):\n\n # receive the message\n smsg = socket.recv(1000)\n\n # unpack the message data\n msg = pickle.loads(smsg)\n\n assert isinstance(msg, message.message)\n\n # switch on the message type\n msg_type = msg.msg_type\n\n # route the message to the appropriate process based its type\n proposer_msg_types = [\n MESSAGE_TYPE.PREPARE_ACK,\n MESSAGE_TYPE.PREPARE_NACK,\n MESSAGE_TYPE.ACCEPT_ACK,\n ]\n acceptor_msg_types = [\n MESSAGE_TYPE.PREPARE,\n MESSAGE_TYPE.ACCEPT,\n ]\n # draw a dice here\n dice = random.random()\n\n if msg_type in proposer_msg_types: # internal prop msgs\n # drop message\n if dice < self.drop_rate:\n # print \"drop a messge to proposer\"\n continue\n # dup message\n if dice < self.dup_rate:\n # print \"duplicate a messge to proposer\"\n push_to_proposer_queue(msg)\n # actually send message\n push_to_proposer_queue(msg)\n elif msg_type == MESSAGE_TYPE.CLIENT: # client msgs\n push_to_proposer_queue(msg)\n elif msg_type in acceptor_msg_types: # internal acc msgs\n # drop message\n if dice < self.drop_rate:\n # print \"drop a messge to acceptor\"\n continue\n # dup message\n if dice < self.dup_rate:\n # print \"duplicate a messge to acceptor\"\n push_to_acceptor_queue(msg)\n # actually send message\n push_to_acceptor_queue(msg)\n elif msg_type == message.MESSAGE_TYPE.CLIENT_ACK:\n raise ValueError(\"ERROR: Got a client ACK message.\")\n elif msg_type == message.MESSAGE_TYPE.EXIT:\n self.proposer_queue.put(msg)\n self.acceptor_queue.put(msg)\n done = 1\n else:\n raise ValueError(\n \" ERROR - Got a message which makes no sense.\")\n\n # check if the socket is alive\n socket.send(\" \")\n\n # if the socket closes, handle the disconnect exception and terminate\n except Exception, e:\n pass\n\n # close the server socket\n socket.close()", "title": "" }, { "docid": "5712e36f65fc3c64ce0a2a210d962e96", "score": "0.49647197", "text": "def test_counter_proposing(self):\n cfp_msg, buyer_dialogue = self.buyer_dialogues.create(\n counterparty=self.seller_addr,\n performative=FipaMessage.Performative.CFP,\n query=Query([Constraint(\"something\", ConstraintType(\">\", 1))]),\n )\n\n assert len(buyer_dialogue._outgoing_messages) == 1, \"No outgoing message.\"\n assert len(buyer_dialogue._incoming_messages) == 0, \"Some incoming messages.\"\n assert (\n buyer_dialogue.last_outgoing_message == cfp_msg\n ), \"wrong outgoing message in buyer dialogue after sending cfp.\"\n assert (\n buyer_dialogue.dialogue_label.dialogue_reference[0] != \"\"\n ), \"Dialogue reference incorrect.\"\n assert (\n buyer_dialogue.dialogue_label.dialogue_reference[1] == \"\"\n ), \"Dialogue reference incorrect.\"\n dialogue_reference_left_part = buyer_dialogue.dialogue_label.dialogue_reference[\n 0\n ]\n\n # cfp arrives at seller\n\n seller_dialogue = self.seller_dialogues.update(cfp_msg)\n\n assert len(seller_dialogue._outgoing_messages) == 0, \"Some outgoing message.\"\n assert len(seller_dialogue._incoming_messages) == 1, \"No incoming messages.\"\n assert (\n seller_dialogue.last_incoming_message == cfp_msg\n ), \"wrong incoming message in seller dialogue after receiving cfp.\"\n assert (\n seller_dialogue.dialogue_label.dialogue_reference[0] != \"\"\n ), \"Dialogue reference incorrect.\"\n assert (\n seller_dialogue.dialogue_label.dialogue_reference[1] != \"\"\n ), \"Dialogue reference incorrect.\"\n\n # seller creates proposal\n proposal_msg = seller_dialogue.reply(\n target_message=cfp_msg,\n performative=FipaMessage.Performative.PROPOSE,\n proposal=Description({\"foo1\": 1, \"bar1\": 2}),\n )\n\n assert len(seller_dialogue._outgoing_messages) == 1, \"No outgoing messages.\"\n assert len(seller_dialogue._incoming_messages) == 1, \"No incoming messages.\"\n assert (\n seller_dialogue.last_outgoing_message == proposal_msg\n ), \"wrong outgoing message in seller dialogue after sending proposal.\"\n\n # proposal arrives at buyer\n\n buyer_dialogue = self.buyer_dialogues.update(proposal_msg)\n\n assert len(buyer_dialogue._outgoing_messages) == 1, \"No outgoing messages.\"\n assert len(buyer_dialogue._incoming_messages) == 1, \"No incoming messages.\"\n assert (\n buyer_dialogue.last_incoming_message == proposal_msg\n ), \"wrong incoming message in buyer dialogue after receiving proposal.\"\n assert (\n buyer_dialogue.last_incoming_message == proposal_msg\n ), \"Wrong incoming message.\"\n assert (\n buyer_dialogue.dialogue_label.dialogue_reference[0] != \"\"\n ), \"Dialogue reference incorrect.\"\n assert (\n buyer_dialogue.dialogue_label.dialogue_reference[1] != \"\"\n ), \"Dialogue reference incorrect.\"\n assert (\n dialogue_reference_left_part\n == buyer_dialogue.dialogue_label.dialogue_reference[0]\n ), \"Dialogue refernce changed unexpectedly.\"\n\n # buyer creates counter proposal 1\n counter_proposal_msg_1 = buyer_dialogue.reply(\n target_message=proposal_msg,\n performative=FipaMessage.Performative.PROPOSE,\n proposal=Description({\"foo1\": 3, \"bar1\": 3}),\n )\n\n assert (\n len(buyer_dialogue._outgoing_messages) == 2\n ), \"incorrect number of outgoing_messages in buyer dialogue after sending counter-proposal 1.\"\n assert (\n len(buyer_dialogue._incoming_messages) == 1\n ), \"incorrect number of incoming_messages in buyer dialogue after sending counter-proposal 1.\"\n assert (\n buyer_dialogue.last_outgoing_message == counter_proposal_msg_1\n ), \"wrong outgoing message in buyer dialogue after sending counter-proposal 1.\"\n\n # counter-proposal 1 arrives at seller\n\n seller_dialogue = self.seller_dialogues.update(counter_proposal_msg_1)\n\n assert (\n len(seller_dialogue._outgoing_messages) == 1\n ), \"incorrect number of outgoing_messages in seller dialogue after receiving counter-proposal 1.\"\n assert (\n len(seller_dialogue._incoming_messages) == 2\n ), \"incorrect number of incoming_messages in seller dialogue after receiving counter-proposal 1.\"\n assert (\n seller_dialogue.last_incoming_message == counter_proposal_msg_1\n ), \"wrong incoming message in seller dialogue after receiving counter-proposal 1.\"\n\n # seller creates counter-proposal 2\n counter_proposal_msg_2 = seller_dialogue.reply(\n target_message=counter_proposal_msg_1,\n performative=FipaMessage.Performative.PROPOSE,\n proposal=Description({\"foo1\": 2, \"bar1\": 2}),\n )\n\n assert (\n len(seller_dialogue._outgoing_messages) == 2\n ), \"incorrect number of outgoing_messages in seller dialogue after sending counter-proposal 2.\"\n assert (\n len(seller_dialogue._incoming_messages) == 2\n ), \"incorrect number of incoming_messages in seller dialogue after sending counter-proposal 2.\"\n assert (\n seller_dialogue.last_outgoing_message == counter_proposal_msg_2\n ), \"wrong outgoing message in seller dialogue after sending counter-proposal 2.\"\n\n # counter-proposal 2 arrives at buyer\n\n buyer_dialogue = self.buyer_dialogues.update(counter_proposal_msg_2)\n\n assert (\n len(buyer_dialogue._outgoing_messages) == 2\n ), \"incorrect number of outgoing_messages in buyer dialogue after receiving counter-proposal 2.\"\n assert (\n len(buyer_dialogue._incoming_messages) == 2\n ), \"incorrect number of incoming_messages in buyer dialogue after receiving counter-proposal 2.\"\n assert (\n buyer_dialogue.last_incoming_message == counter_proposal_msg_2\n ), \"wrong incoming message in buyer dialogue after receiving counter-proposal 2.\"", "title": "" }, { "docid": "4379faac8a596813e701030770f5c5a8", "score": "0.49592474", "text": "def msg_prepare_preempted(self, conn, msg):\r\n if msg.inresponseto in self.outstandingprepares:\r\n prc = self.outstandingprepares[msg.inresponseto]\r\n if self.debug: self.logger.write(\"Paxos State\",\r\n \"got a reject for ballotno %s proposal %s with %d/%d\"\r\n % (prc.ballotnumber, prc.proposal,\r\n prc.receivedcount, prc.ntotal))\r\n # take this response collector out of the outstanding prepare set\r\n del self.outstandingprepares[msg.inresponseto]\r\n # become inactive\r\n self.active = False\r\n # handle reject\r\n self._handlereject(msg, prc)", "title": "" }, { "docid": "9c07d6b9c77c52a40ae0bc6cc8ae8621", "score": "0.49552506", "text": "def forward(self):\n\t\traise NotImplementedError", "title": "" }, { "docid": "f48030c7aa62c9b0d16556daf8eb185a", "score": "0.49471027", "text": "def reset_converse(self):\n self.should_converse = False\n self.intent_context = None", "title": "" }, { "docid": "b2029f5216a9c6346c1abbe3b4340151", "score": "0.49458358", "text": "def match_proposal(self,propID):\n infos=Proposal.objects.get(id=propID)\n requests=Request.objects.filter(nb_requested_seats__lte=infos.number_of_seats, status='P')\n for request in requests:\n found = False\n for offer in Offer.objects.filter(request=request):\n if Ride.objects.filter(offer=offer):\n found=True\n break\n if not found:\n route_points = RoutePoints.objects.filter(proposal=infos).order_by('order')\n valid_pair = list()\n for i in xrange(len(route_points)-1):\n if get_distance((request.departure_point_lat,request.departure_point_long),(route_points[i].latitude,route_points[i].longitude))<request.departure_range:\n for j in range(i+1,len(route_points)):\n if get_distance((request.arrival_point_lat,request.arrival_point_long),(route_points[j].latitude,route_points[j].longitude))<request.arrival_range:\n valid_pair.append((i,j))\n for (i,j) in valid_pair:\n #delete all not in time arrival\n if total_seconds(abs(get_time_at_point([(r.latitude,r.longitude) for r in route_points],j,infos.departure_time,infos.arrival_time)-request.arrival_time)) < request.max_delay:\n self.send_to(self.offermanager_port, ('buildoffer',\n request.id,\n infos.id, \n (\n route_points[i].latitude,\n route_points[i].longitude,\n get_time_at_point([(r.latitude,r.longitude) for r in route_points], \n i,\n infos.departure_time,\n infos.arrival_time),\n route_points[i].id\n ),\n (\n route_points[j].latitude,\n route_points[j].longitude,\n get_time_at_point([(r.latitude,r.longitude) for r in route_points],\n j,\n infos.departure_time,infos.arrival_time),\n route_points[j].id\n )\n ))", "title": "" }, { "docid": "f3b12a4a3fa9d9c2e2eb0e9ddc18523f", "score": "0.4941873", "text": "def proceed(self):\n\t\tpass", "title": "" }, { "docid": "0a23398f9573fa30e6ef1b314c76bfa7", "score": "0.4938777", "text": "def forward(self):\n\n assert self.payload\n assert self.payload_peer_address\n if self.payload_peer_address in self.connections:\n conn = self.connections[self.payload_peer_address]\n default = False\n else:\n conn = self.connections[None] # propagate exception if not created\n default = True\n _logger.debug(\"Forwarding datagram from peer: %s, default: %s\",\n self.payload_peer_address, default)\n self._forwarding_socket.sendto(self.payload, conn.getsockname())\n self.payload = \"\"\n self.payload_peer_address = None", "title": "" }, { "docid": "24da5a0065ca8713873e40e657360f39", "score": "0.4937395", "text": "def keep_resending(self):\n\n # Keep doing this forever\n while not rospy.is_shutdown():\n # Get the last item, wait if there are none\n try:\n msg = self.queue.get(timeout=0.3)\n except Queue.Empty:\n continue\n\n # Compute the remaining delay time\n delta = msg[0] + self.delay - rospy.get_time()\n if delta > self.accuracy:\n # Wait until the delay has passed\n rospy.sleep(delta)\n\n # Send the next message\n self.publisher.publish(msg[1])", "title": "" }, { "docid": "eeefa86f48f4678c2dda4b46c0a2adbc", "score": "0.49289337", "text": "def resumeProducing(self):\r\n if self._currentProducer:\r\n self._currentProducer.resumeProducing()\r\n else:\r\n self._task.resume()", "title": "" }, { "docid": "1622599b67e1b8bb18c1c843d1a7ed61", "score": "0.49062502", "text": "def pose_cb(self, msg):\n self.car_pos_x = msg.pose.position.x\n self.car_pos_y = msg.pose.position.y\n self.car_pos_z = msg.pose.position.z\n self.car_ori_x = msg.pose.orientation.x\n self.car_ori_x = msg.pose.orientation.y\n self.car_ori_z = msg.pose.orientation.z\n self.car_ori_w = msg.pose.orientation.w\n\n # only calc final waypoints if base waypoints have been published\n if self.base_waypoints and not self.stop_at_tl:\n self.car_wp_idx = self._find_wp_in_front_of_car()\n\n if not self.next_n_waypoints:\n self.next_n_waypoints, \\\n self.next_n_waypoint_glob_idxs = self._calc_next_waypoints(car_wp_idx=self.car_wp_idx)\n else:\n self._update_next_waypoints(next_n_waypoints=self.next_n_waypoints,\n next_n_waypoint_glob_idxs=self.next_n_waypoint_glob_idxs,\n car_wp_idx=self.car_wp_idx)\n\n self.final_waypoints_pub.publish(Lane(waypoints=self.next_n_waypoints))", "title": "" }, { "docid": "8fc1c560a10311c3dc72794b44ac5e4d", "score": "0.49006054", "text": "def process_message(self):\n while True:\n if not self.next_message:\n return False\n\n # check if next message is in the past, and drop it\n if (self.next_message.round, self.next_message.phase) < (self.round, self.phase):\n (self.logger.debug if self.is_leader else self.logger.warning)(\n \"dropping past message from round %d / phase %s\",\n self.next_message.round, self.next_message.phase.name\n )\n self.drop_message()\n else:\n break\n\n # check if next message is in the future, and process it at a later point in time\n if (self.next_message.round, self.next_message.phase) > (self.round, self.phase):\n return False\n\n msg_item = self.dequeue_message()\n msg_type = get_message_type(msg_item.content)\n msg_sender = get_message_sender(msg_item.content)\n\n if msg_sender == self.leader and msg_type != MessageType.Propose:\n self.logger.warning(f\"FLAGGING NODE {msg_sender} AS ADVERSARY, LEADER SENT DIFFERENT MESSAGE\")\n self.flag_adversary(msg_sender)\n self.recover()\n return True\n\n signed_msg: SignedMessage = SignedMessage.deserialize(\n msg_item.content) # the deserialize method here comes from its parent class Serializable\n msg = signed_msg.message # signature was already verified prior to insertion into the message buffer\n assert msg.round_idx == self.round\n assert msg.type.to_phase() == self.phase\n\n # TODO: add try/except for deserialization, and flag leader as adversial upon failure\n\n self.logger.debug(\"processing %s message\", msg_type.name)\n if msg_type == MessageType.Propose:\n self.process_propose(msg)\n elif msg_type == MessageType.Acknowledge:\n self.process_acknowledge(msg)\n elif msg_type == MessageType.Confirm:\n self.process_confirm(signed_msg)\n elif msg_type == MessageType.Recover:\n self.process_recover(msg)\n else:\n assert False, \"message type not considered\"\n\n return True", "title": "" }, { "docid": "d966ebeb7f31c270e64b001f153733ff", "score": "0.4894814", "text": "def connectionMade(self):\n self.transport.registerProducer(self, True)", "title": "" }, { "docid": "1ca2a0e50512af839d0ddb3b32bdb9df", "score": "0.4885375", "text": "async def gossip_arrival(self, k, pre, fin, FIN, prp, msg_all, echo, inc_nbrs):\n \n\n # Update counter\n if inc_nbrs:\n IncNbrHelper.merge(self.inc_nbrs, inc_nbrs)\n\n #Check for max tag\n max_tag = self.S.max_phase(['pre', 'fin', 'FIN'])\n if (max_tag[0] >= self.t_top and self.stabilized()):\n self.propose(max_tag)\n elif (self.and_every(self.all_same)):\n # We need to store the received tag values, for the implicit FIN to work\n (self.pre[k], self.fin[k], self.FIN[k]) = (pre, fin, FIN)\n i = self.uid\n # pre\n self.pre[i] = max( pre, fin, FIN, self.S.max_phase(['pre', 'fin', 'FIN']) )\n await self.S.update_phase(self.pre[i], None, 'pre')\n # fin\n self.fin[i] = max( fin, FIN, self.S.max_phase(['fin', 'FIN']) )\n await self.S.update_phase(self.fin[i], None, 'fin')\n #FIN\n implicitFinalized = []\n fin_tags = [t for t in self.fin.values() if t == fin]\n if len(fin_tags) >= self.quorum:\n implicitFinalized = [fin]\n self.FIN[i] = max( FIN, self.S.max_phase(['FIN']), *implicitFinalized )\n await self.S.update_phase(self.FIN[i], None, 'FIN')\n # Remove not relevant records\n self.S.relevant()\n\n # Run wrap around algorithm\n self.run_global_reset(k, prp, msg_all, echo)\n\n #Limit gossip frequency\n await asyncio.sleep(self.gsp_freq)", "title": "" }, { "docid": "947571be7719bb0917cb875da38d55c7", "score": "0.48837334", "text": "async def multiplex(self, msg):\n subject = msg.subject\n pers = person.Person(**json.loads(msg.data.decode()))\n\n await self.inc_metric(\"message-recv\")\n for _filter in self.get_filters():\n if not pers.applies_to(json.loads(_filter)):\n await self.inc_metric(\"message-droped\")\n continue\n\n subjects = self._redis.sscan(_filter)[1]\n await self.inc_metric(\"message-forwarded\")\n await self.inc_metric(\"message-multiplex-to-subjects\", len(subjects))\n for subject in subjects:\n await self.publish(subject.decode(), msg.data)", "title": "" }, { "docid": "c6fa968ca8e6c9a6bdd98681dcdff879", "score": "0.4874194", "text": "def propose_update(self, new_value):\n if self.paxos.proposed_value is None:\n self.paxos.propose_value( new_value )", "title": "" }, { "docid": "351307bcee79fcbaeafd7ae87400ccc0", "score": "0.48730168", "text": "def pose_cb(self, msg):\n num_all_ref_waypoints = len(self.all_ref_waypoints)\n if self.locator is None or num_all_ref_waypoints == 0:\n return\n\n # Detect distance.\n detect_distance = self.detect_second * self.current_linear_velocity\n if detect_distance < self.minimum_detect_distance:\n detect_distance = self.minimum_detect_distance \n detect_distance_squre = detect_distance**2\n\n # Find the reference waypoint ahead first\n _, _, idx_ahead = self.locator.locate_waypoints_around(msg.pose)\n i = 0\n while i < self.max_num_final_waypoints:\n idx = idx_ahead + i\n if idx >= num_all_ref_waypoints:\n idx -= num_all_ref_waypoints\n ref_waypoint = self.all_ref_waypoints[idx]\n wp_position = ref_waypoint.pose.pose.position\n distance_squre = self.pose_distance_squre(msg.pose.position, wp_position)\n if distance_squre > detect_distance_squre:\n if i == 0: return\n break\n\n waypoint = self.final_waypoints[i]\n waypoint.pose.pose = ref_waypoint.pose.pose\n waypoint.twist.twist.linear.x = ref_waypoint.twist.twist.linear.x\n # waypoint.twist.twist.linear.x = 50*1.609344 / 3.6\n waypoint.twist.twist.angular = ref_waypoint.twist.twist.angular\n i += 1\n \n waypoints = self.final_waypoints[:i]\n waypoints = self.process_traffic_lights(waypoints, idx_ahead, idx_ahead + i-1, msg.pose.position)\n waypoints = self.smooth_start(waypoints, msg.pose.position)\n\n final_waypoints = Lane()\n final_waypoints.header.frame_id = \"/world\"\n final_waypoints.waypoints = waypoints\n\n self.final_waypoints_pub.publish(final_waypoints)", "title": "" }, { "docid": "5ee4f31c1850d2ba7b9c91c2da783527", "score": "0.48726985", "text": "def start_paxos_round(self, value):\n if self.current_leader == self.node_id():\n # self.current_id = (self.current_id[0] + 1, self.current_id[1])\n self.network_node.broadcast({\n 'do': 'paxos_propose',\n 'proposal_id': self.current_id,\n 'index': len(self.log),\n 'value': value\n })\n self.log.append(value)\n while len(self.log) > len(self.acceptances):\n self.acceptances.append(1)\n else:\n self.network_node.send(self.current_leader, {\n 'do': 'paxos_relay',\n 'value': value,\n })", "title": "" }, { "docid": "a95a4a44a216fd4287f67a29ab0154fd", "score": "0.48701113", "text": "def transfer_to_agent(self):\n while True:\n data = self.recv_broker.get()\n cmd = get_msg_info(data, \"cmd\")\n if cmd == \"close\":\n logging.debug(\"enter explore close\")\n self.close()\n continue\n\n data = get_msg_data(data)\n self.send_agent.send(data)", "title": "" }, { "docid": "ba927ead6250fe24c56d4bf1d1093536", "score": "0.4864801", "text": "def pose_1_cb(self, msg):\n self.pose_1 = msg", "title": "" }, { "docid": "a3fe1b67ab00d7103941c8a2cc750284", "score": "0.4864565", "text": "def _reply_link_ready(self):\n LOG.info(\"Messaging is active (%(hostname)s:%(port)s%(vhost)s)\",\n {'hostname': self.hosts.current.hostname,\n 'port': self.hosts.current.port,\n 'vhost': (\"/\" + self.hosts.virtual_host\n if self.hosts.virtual_host else \"\")})\n\n for sender in self._all_senders.values():\n sender.attach(self._socket_connection.pyngus_conn,\n self.reply_link, self.addresser)", "title": "" }, { "docid": "42670e54f79d0dd65d4f7ff562efc2bf", "score": "0.4864284", "text": "def applyPropagation(self):\r\n change = True\r\n while change:\r\n change = self.propogateFeatures()", "title": "" }, { "docid": "4e6eaade0ebd1fae20f3213c6ba9ef40", "score": "0.48626477", "text": "def test_switch_back(self):\n # X -> foo\n p = Pipe('foo')\n foo_d = p.alive['foo'] \n proto1 = object()\n p.addConnection('foo', proto1)\n \n # X -> bar\n p.switch('bar')\n bar_d = p.alive['bar']\n proto2 = object()\n p.addConnection('bar', proto2)\n \n # X -> foo\n p.switch('foo')\n p.removeConnection('foo', proto1)\n \n self.assertFalse(foo_d.called, \"Should not have called\"\n \" since that is where the current forwarding is\")\n self.assertEqual(p._connections['foo'], 0)\n self.assertFalse(bar_d.called)\n \n p.removeConnection('bar', proto2)\n self.assertTrue(bar_d.called)", "title": "" }, { "docid": "f305ac325ffe9d64a23b426ceb77ef0b", "score": "0.48587662", "text": "def _handleArrive(self, curTime):\n prob = float(random.random())\n if prob <= self._arriveProb:\n self._numPassengers += 1\n print(self._numPassengers)\n self._passengerQ.enqueue(Passenger(self._numPassengers, curTime))", "title": "" }, { "docid": "7d7c8e4180eba68951e2948337ddd0b3", "score": "0.48585853", "text": "def proceed(self):\r\n\r\n pass", "title": "" }, { "docid": "70dbbb3dcae14aeec030296238b84249", "score": "0.48385015", "text": "def accept_exchange(self):\n\n # The check is not made if there are buildings on the property.\n if self.required_properties is not None:\n for prop in self.required_properties:\n prop.owner = self.player_from\n self.player_from.properties.append(prop)\n\n self.player_to.properties = [prop for prop in self.player_to.properties if\n prop not in self.required_properties]\n\n if self.given_properties is not None:\n for prop in self.given_properties:\n prop.owner = self.player_to\n self.player_to.properties.append(prop)\n\n self.player_from.properties = [prop for prop in self.player_from.properties if\n prop not in self.given_properties]\n\n if self.price_offered != 0:\n if not self.player_from.update_budget(-self.price_offered):\n # TODO: The player fails to give the money\n pass\n self.player_to.update_budget(self.price_offered)\n\n if self.asking_price != 0:\n if not self.player_to.update_budget(-self.asking_price):\n # # TODO: The player fails to give the money\n pass\n self.player_from.update_budget(self.asking_price)\n\n self.player_to.exchanges.remove(self)", "title": "" }, { "docid": "9cf4bd167e44e913c2ccee7d1a0139c9", "score": "0.48364452", "text": "def leader_running(self):\n\n if not self.isCurrentLeader:\n print(\"Not the leader - ping send\")\n gotPingResp = self.ping_leader() # blocks for the timeout time\n time.sleep(self.timeout)\n\n if not gotPingResp:\n print(\"No response detected from leader, starting election.\")\n if not self.electionInProgress and not gotPingResp:\n self.election_new()\n else:\n self.data_handler_new()\n for m in self.okMessages:\n self.send_ok(m)\n self.okMessages = []\n\n\n\n\n self.data_handler_new() # handles all messages\n\n for m in self.electionMessages:\n if int(m.pid) > self.pid: #They are the leader\n self.clIP = m.sourceIP\n self.isCurrentLeader = False\n else: # m.pid < self.pid:\n #self.election_new()\n self.send_ok(m)\n self.election_new()\n\n self.electionMessages = []\n\n time.sleep(self.timeout)", "title": "" }, { "docid": "e3544255a8d70fecdfdc87ead714a26e", "score": "0.48330167", "text": "def on_initial_accepted(self, proposal):\n self.accepted_count += 1\n proposal.record.add_callback(self.on_initial_update)\n proposal.record.ev_id = self.get_ev()", "title": "" }, { "docid": "50c0375e7feb40c8ea44078d9313480e", "score": "0.48300254", "text": "def forward(self, msg: str):\n self.lock.acquire(blocking=True)\n is_forwarded = self.forwarded_msgs.__contains__(msg)\n self.lock.release()\n if not is_forwarded:\n print(f'Received -> {msg} [FORWARD]')\n for conn in self.connections:\n self.send_msg(msg, conn)\n self.lock.acquire(blocking=True)\n self.forwarded_msgs[msg] = 1\n self.lock.release()\n else:\n print(f'Received -> {msg} [DON\\'T FORWARD]')\n self.lock.acquire(blocking=True)\n self.forwarded_msgs[msg] += 1\n self.lock.release()", "title": "" }, { "docid": "2ee587739dc6239e15114c98ce88fe86", "score": "0.4818604", "text": "def _do_pre_connection(self, auto_accept):\n\n while True:\n data = self.radv.sock.recv(1024)\n if data.find(NC_KA_PRE_CON) != -1:\n log.info(\"Adv sent a preconn ka.\")\n self.radv.sock.send(NC_KA_PRE_CON)\n log.debug(\"Dsc sent a preconn ka.\")\n elif data.find(NC_ACCEPT_CON) != -1:\n log.info(\"Adv accepted the connection.\")\n if auto_accept:\n self.radv.sock.send(NC_ACCEPT_CON)\n log.debug(\"Dsc accepted the connection.\")\n else:\n log.debug(\"Dsc still has to accept the connection.\")\n break\n elif data.find(NC_REJECT_CON) != -1:\n log.info(\"Adv rejected the connection.\")\n self.radv.sock.send(NC_REJECT_CON)\n log.debug(\"Dsc rejected the connection.\")\n exit(\"Dsc rejected the connection.\")\n else:\n log.warning(\"Adv sent {}.\".format(data))\n log.warning(\"Dsc don't know how to handle packet\")", "title": "" }, { "docid": "4d9918c0b12acafb391e471da2065922", "score": "0.4814745", "text": "def testRecreateConsumer(self):\n self.sf.Produce([self.e1, self.e2, self.e3])\n self.sf.AddConsumer('a')\n stream1 = self.sf.Consume('a')\n self.assertEqual(self.e1, stream1.Next())\n stream1.Commit()\n self.sf.RemoveConsumer('a')\n self.sf.AddConsumer('a')\n stream2 = self.sf.Consume('a')\n self.assertEqual(self.e2, stream2.Next())\n stream2.Commit()", "title": "" } ]
a9f28799bdc2e3a75c189e9bf6332bde
Finds the square root of the number passed in
[ { "docid": "0e4325d6e117d0376dd29e1ef40d39ca", "score": "0.8401177", "text": "def sqroot(x):\r\n return math.sqrt(x)", "title": "" } ]
[ { "docid": "1d04741b2844d4809581ed89a4c5afeb", "score": "0.84071213", "text": "def square_root(number):\n return math.sqrt(number)", "title": "" }, { "docid": "52578eaa2bf5a63e0685f70d6af221dd", "score": "0.80676985", "text": "def mySqrt(self, x):\n a = x\n b = (a + 1) // 2\n while b < a:\n a = b\n b = (a + x // a) // 2\n return a", "title": "" }, { "docid": "11a234b4ce0e4ea8f9bc93fa0541f82b", "score": "0.7980878", "text": "def square_root(my_number = 10):\n my_number = mt.sqrt(my_number)\n return my_number", "title": "" }, { "docid": "ad5b2e711e5eadc1979ce994f1b9c0b5", "score": "0.79683685", "text": "def sqrt(number):\n return pow(number, 0.5)", "title": "" }, { "docid": "e2d0ce42f7f133f5ecac36a197a9fc91", "score": "0.78975475", "text": "def sqrt(number):\n start_number = 1\n end_number = number // 2\n\n if number == 0 or number == 1:\n return number\n\n while start_number <= end_number:\n mid_number = (start_number + end_number) // 2\n if mid_number * mid_number == number:\n return mid_number\n elif mid_number * mid_number < number:\n start_number = mid_number + 1\n result = mid_number\n else:\n end_number = mid_number - 1\n\n return result", "title": "" }, { "docid": "794a4b06a8df1219a7369cda162d312c", "score": "0.7869849", "text": "def square_root(a):\n x = a - 1\n while True:\n y = (x + a/x) / 2\n if y == x:\n break\n x = y\n return x", "title": "" }, { "docid": "81384cb44369c6e5b24699ba6cdcb6bc", "score": "0.7865833", "text": "def sqrt_(n: float) -> float:\n counter = 1\n while 1:\n x = 0\n while x * x <= n:\n if abs((x * x) - n) < 0.00001:\n return x\n x += counter\n counter /= 10", "title": "" }, { "docid": "d132ee3a1e78513d66ead42c6d01475d", "score": "0.7780282", "text": "def sqrt(number : int) -> int:\n if type(number) is not int:\n # number must be an int\n raise TypeError('Input provided is not an integer')\n elif number < 0:\n # number must be positive\n raise ValueError('Square root function cannot handle negative inputs')\n elif number < 2:\n # Square root of 0 and 1 are itself\n return number\n \n\n upper_lim = number//2\n lower_lim = 2\n\n diff = upper_lim - lower_lim\n\n # Iterates until it reaches the convergence criteria. \n while diff > 1:\n\n mid_val = (lower_lim + upper_lim) // 2\n\n if mid_val*mid_val > number:\n upper_lim = mid_val\n else:\n lower_lim = mid_val\n\n diff = upper_lim - lower_lim\n\n # Check if mid_val^2 is bigger than the number and return lower_lim if it is\n if mid_val*mid_val > number:\n mid_val = lower_lim\n\n return mid_val", "title": "" }, { "docid": "8f06815e57e880ede023fa71812ef63d", "score": "0.77793044", "text": "def compute_square_root(num):\n \n \n # Use assert statement to guarantee that no matter what\n # num is a non-negative number \n \n assert 0 <= num \n \n \n x = num \n # initialize guess to x/2\n guess = x / 2 \n \n \n while abs((guess * guess) - x) > 1e-12:\n \n print(f'Current guess is: {guess}')\n \n # Update guess\n guess = 0.5 * (guess + (x / guess))\n \n print(f'The approximate square root of {num} is: {guess}')\n print(f'Using the built-in math module, it is: {math.sqrt(num)}')", "title": "" }, { "docid": "792be1b37d9a16043d0b6fc6765ae48c", "score": "0.7738865", "text": "def sqrt(x):\n\n if x < 0:\n raise ValueError(\"Cannot compute square root \"\n \"of negative number {}\".format(x))\n\n guess = x\n i = 0\n while guess * guess != x and i < 20:\n guess = (guess + x / guess) / 2.0\n i += 1\n return guess", "title": "" }, { "docid": "d4ce4b23221fbb25c73d883180f1aec7", "score": "0.77362645", "text": "def square_root(num1: float) -> float:\n # logic\n num2 = num1 ** 0.05\n return num2", "title": "" }, { "docid": "a705d50af663f79e13834902a2977595", "score": "0.77343583", "text": "def _sqrt(number):\n if not number:\n number = 0\n\n result = None\n try:\n result = math.sqrt(float(number))\n except ValueError:\n # Handling of square root for negative numbers isn't consistent (e.g.\n # in GAE production, an exception is thrown whereas locally nan is\n # returned). Here we cover the case on GAE.\n # Why does this ever even happen? One case is the 'anonymous\n # conversion' stuff (which is used at least by ebay),\n # in which the # conversions may be greater than the # visitors. In\n # that case, computing variance (our variance function is currently\n # written with limiting assumptions) involves the square root of a\n # negative number.\n if number < 0:\n return float(\"nan\")\n except:\n logging.warning(\"Can't take square root of %s\" % number)\n pass\n\n return result", "title": "" }, { "docid": "7f4e111516a0f371d24e2ea64fedb91b", "score": "0.7733261", "text": "def sqrt(x, e=0.000000000001):\n n = x\n y = 1\n while n - y > e and n:\n n = (n + y) / 2\n y = x / n\n return n", "title": "" }, { "docid": "6873588a1ae2dc211128e2fe033e688a", "score": "0.7713392", "text": "def square_root(num1: float) -> float:\n # logic here\n num2 = num1 ** 0.5\n return num2", "title": "" }, { "docid": "2b52f44b38051ac7d045001989dc4d9b", "score": "0.7674549", "text": "def square_root(a):\n x = 1\n while x * x != a:\n x = square_root_update(x, a)\n return x", "title": "" }, { "docid": "ca015fc0ce5440393c2aa58241bae41c", "score": "0.7656691", "text": "def sqrt(number):\n if number == '' or number is None:\n return 'Not a valid number'\n\n if number == 0 or number == 1:\n \treturn number\n square_root = 0\n array = list(range(number+1))\n square_root = binary_search(array, number, 0, len(array)-1, square_root)\n \n return square_root", "title": "" }, { "docid": "f2b446b8cb5f424cdb7a3866b85bc2c8", "score": "0.764414", "text": "def sqrt(x):\n return SqrtOp(x)", "title": "" }, { "docid": "a7be2ba2d2d7fd69c0dd9753e6ee1e2b", "score": "0.7603734", "text": "def square_root(num1):\n g = num1 ** 0.5\n return g", "title": "" }, { "docid": "ef58638621da464cd8c56b348d2b340d", "score": "0.759546", "text": "def sqrt(number):\n if number < 0:\n return None\n elif number == 0:\n return 0\n elif 1 < number <= 3:\n return 1\n input_map = [i for i in range(1, (number//2)+2)]\n return sqrt_helper(input_map, 0, len(input_map)-1, number)", "title": "" }, { "docid": "9dd3f2ef913bd713dd32d79c8405e914", "score": "0.7584232", "text": "def sqrt(x):\n x=float(x)\n if (x < 0):\n return None\n else:\n guess = 1\n tolerance = 0.000000001\n Iterations = 0\n MaxIterations = 100\n while ((((guess*guess)-x)**2) > (tolerance**2)) and (Iterations < MaxIterations) :\n guess = ((guess + (x/guess))/2)\n Iterations = Iterations + 1\n## print 'Iterations = ',Iterations\n return guess", "title": "" }, { "docid": "f7c9e0ebdb1d017babeffcba5751fe70", "score": "0.75835156", "text": "def sqrt(number):\n if number < 0:\n return None\n if number == 0 or number == 1:\n return number\n return sqrt_root(number, 0, number // 2)", "title": "" }, { "docid": "1f1751fb39040bc155506e7838505865", "score": "0.755756", "text": "def intSqrt(self, number):\n return int(sqrt(number))", "title": "" }, { "docid": "d7dce4192fa10f3a6ebad63021472006", "score": "0.7553209", "text": "def _sqrt(x):\n x = np.clip(x, a_min=0, a_max=None)\n\n try:\n return np.sqrt(x)\n except (AttributeError, TypeError):\n exponent = 0.5\n\n try:\n exponent = np.take(x, 0).from_float(exponent)\n except AttributeError:\n pass\n\n return x ** exponent", "title": "" }, { "docid": "466248182dfc1a58c3a2b1feb1147b07", "score": "0.7546998", "text": "def square_rooted(self, x):\r\n return round(sqrt(sum([a*a for a in x])), 3)", "title": "" }, { "docid": "2b2f75af3b5235017b087f570ff06211", "score": "0.749695", "text": "def floor_sqrt(x):\n # Base cases\n if (x == 0 or x == 1):\n return x\n\n # Staring from 1, try all numbers until\n # i*i is greater than or equal to x.\n i = result = 1\n while result <= x:\n\n i += 1\n result = i * i\n\n return i - 1", "title": "" }, { "docid": "47fb3e11383a1805763803e520f8d30f", "score": "0.74968046", "text": "def sqrt(number):\n \n # handle Edge case\n if (number < 0):\n return None\n \n return binary_search(0, number, number)", "title": "" }, { "docid": "e862c58ccf2b7b5e1b9915956c8574d6", "score": "0.74959785", "text": "def mySqrt(self, x):\n left, right = 0, x\n while left <= right:\n mid = left + (right - left) / 2\n if mid * mid == x:\n return mid\n elif mid * mid > x:\n if (mid - 1) * (mid - 1) < x:\n return mid - 1\n right = mid - 1\n else:\n if (mid + 1) * (mid + 1) > x:\n return mid\n left = mid + 1", "title": "" }, { "docid": "45dda7fb849f33d2758c1cff3aa02b78", "score": "0.74894536", "text": "def sqrt(x):\n return x ** 0.5", "title": "" }, { "docid": "625f54f2f012e4d9c42ea141f673bb1e", "score": "0.7430754", "text": "def iSqrt(n):\n x = 1\n xOld = 1\n while True:\n aux = ( x + ( n / x ) ) / 2\n if aux == x:\n return x\n if aux == xOld:\n return min(x, xOld)\n xOld = x\n x = aux", "title": "" }, { "docid": "625f54f2f012e4d9c42ea141f673bb1e", "score": "0.7430754", "text": "def iSqrt(n):\n x = 1\n xOld = 1\n while True:\n aux = ( x + ( n / x ) ) / 2\n if aux == x:\n return x\n if aux == xOld:\n return min(x, xOld)\n xOld = x\n x = aux", "title": "" }, { "docid": "f43edef634f20e52585f8b18d76a6845", "score": "0.740675", "text": "def sqrt(n):\n approx = n/2.0\n while True:\n better = (approx + n/approx)/2.0\n if(abs(approx - better)) < 0.001:\n return better\n approx = better", "title": "" }, { "docid": "4c630f4ccc1e607cf62c5fbdcd04a92a", "score": "0.7361383", "text": "def sqrt(number):\n if type(number) is not int:\n #Enforce the input number is an integer\n return None\n return helper(number,1,number)", "title": "" }, { "docid": "43a9b99274fe8dd8e537450a0fe3c31e", "score": "0.7320515", "text": "def square_root(a):\n\tx = random.randint(int(math.sqrt(a)-2),int(math.sqrt(a)+2))\n\tnewton_sqrt(a, x)", "title": "" }, { "docid": "e3eba01738a196b92f91c74c7c82fe15", "score": "0.7318102", "text": "def square_root(a, iters=4):\n\t\ty = 0\n\t\tx = a/3\n\t\tfor i in range(iters):\n\t\t\t\ty = (x + a/x) / 2\n\t\t\t\tx = y\n\t\t\t\t#print (\" guess is {0}\".format(y))\n\t\t\n\t\treturn y", "title": "" }, { "docid": "bc78272e5eb70b2d1298fb8b9055b00e", "score": "0.73175925", "text": "def exec_num(x):\n return np.sqrt(x)", "title": "" }, { "docid": "2062c0a8bcc9b2dd9bc5ab492a59ba89", "score": "0.7310749", "text": "def math_sqrt(x):\n try:\n return math.sqrt(x)\n except:\n pass", "title": "" }, { "docid": "b6043694c443f173da4b371cdbee9cde", "score": "0.7292411", "text": "def square_root(x):\n F = x.parent()\n p = F.order()\n\n if p % 16 == 1:\n return tonelli_shanks_ct(x)\n\n if p % 4 == 3:\n if sqrt_cache.get(p) is None:\n sqrt_cache[p] = (F(1),)\n z = x ** ((p + 1) // 4)\n\n if p % 8 == 5:\n if sqrt_cache.get(p) is None:\n sqrt_cache[p] = (F(1), F(-1).sqrt())\n z = x ** ((p + 3) // 8)\n\n elif p % 16 == 9:\n if sqrt_cache.get(p) is None:\n sqrt_m1 = F(-1).sqrt()\n sqrt_sqrt_m1 = sqrt_m1.sqrt()\n sqrt_cache[p] = (F(1), sqrt_m1, sqrt_sqrt_m1, sqrt_sqrt_m1 * sqrt_m1)\n z = x ** ((p + 7) // 16)\n\n for mul in sqrt_cache[p]:\n sqrt_cand = z * mul\n if sqrt_cand ** 2 == x:\n return sqrt_cand\n\n return None", "title": "" }, { "docid": "a0226d6d73df60da3bedb2d76ae38f6a", "score": "0.72641283", "text": "def mySqrt(self, x: int) -> int:\n if x == 0:\n return 0\n # elif x < 4:\n # return 1\n \n head = 1\n end = int(x/2)\n\n while head != end:\n mid = int((end+head)/2)\n if mid*mid <= x:\n if mid == head:\n return mid\n head = mid\n else:\n end = mid\n return head", "title": "" }, { "docid": "b729eed4e21aac142cf5812b12f21b58", "score": "0.7255169", "text": "def mySqrt(self, x):\n l = 0\n h = x\n while l <= h:\n mid = (l + h) / 2\n if mid ** 2 <= x < (mid + 1) ** 2:\n return mid\n elif mid ** 2 > x:\n h = mid\n else:\n l = mid + 1", "title": "" }, { "docid": "5e3201fe75ea6ca67aef9668e3e75032", "score": "0.71270096", "text": "def root( num ):\n def find_root( min, max ):\n guess = ( min + max ) / 2.0\n guess2 = guess * guess\n if ( abs( guess2 - num ) < delta ):\n return guess\n elif ( guess2 < num ):\n return find_root( guess, max )\n else:\n return find_root( min, guess )\n\n delta = 0.001*num\n return find_root( 0, max(1,num) )", "title": "" }, { "docid": "7fd02410ee94be67185d9a8c5a506846", "score": "0.7102347", "text": "def sqrt():\n return Sqrt()", "title": "" }, { "docid": "3b1d214c9859d3141d4d2b3a1163c6f9", "score": "0.71003395", "text": "def integer_sqrt(i):\n if not i: return 0\n if i < 0: raise ValueError(\"cannot calculate square root of negative\")\n def n(xn):\n return (xn + i/xn)/2\n xn, xnp, xnpp = i, n(i), n(n(i))\n while xn != xnpp:\n xn, xnp, xnpp = xnp, xnpp, n(xnpp)\n return min(xnp, xnp)", "title": "" }, { "docid": "5427051b85a109876ebd4ba170b75439", "score": "0.7055412", "text": "def sqrt_exhaustive(x):\n\n #guess=0\n\n # Searching starting at 0 for proper square root of x\n for guess in range(x):\n if guess**2 == x:\n return guess\n \n return None\n #while guess**2 < x:\n # guess = guess + 1\n \n #if guess**2 == x:\n # return guess\n #else:\n # return None", "title": "" }, { "docid": "6b592e3894bf8421e5a167b4a09eba9a", "score": "0.7049284", "text": "def _isqrt_fast_python(x):\n # Use direct division-based iteration if sqrt(x) < 2^400\n # Assume floating-point square root accurate to within 1 ulp, then:\n # 0 Newton iterations good to 52 bits\n # 1 Newton iterations good to 104 bits\n # 2 Newton iterations good to 208 bits\n # 3 Newton iterations good to 416 bits\n if x < _1_800:\n y = int(x**0.5)\n if x >= _1_100:\n y = (y + x//y) >> 1\n if x >= _1_200:\n y = (y + x//y) >> 1\n if x >= _1_400:\n y = (y + x//y) >> 1\n return y\n bc = _bitcount(x)\n guard_bits = 10\n x <<= 2*guard_bits\n bc += 2*guard_bits\n bc += (bc&1)\n hbc = bc//2\n startprec = min(50, hbc)\n # Newton iteration for 1/sqrt(x), with floating-point starting value\n r = int(2.0**(2*startprec) * (x >> (bc-2*startprec)) ** -0.5)\n pp = startprec\n for p in giant_steps(startprec, hbc):\n # r**2, scaled from real size 2**(-bc) to 2**p\n r2 = (r*r) >> (2*pp - p)\n # x*r**2, scaled from real size ~1.0 to 2**p\n xr2 = ((x >> (bc-p)) * r2) >> p\n # New value of r, scaled from real size 2**(-bc/2) to 2**p\n r = (r * ((3<<p) - xr2)) >> (pp+1)\n pp = p\n # (1/sqrt(x))*x = sqrt(x)\n return (r*(x>>hbc)) >> (p+guard_bits)", "title": "" }, { "docid": "c9b8077eddff8361bf1b31ffd54bbd03", "score": "0.7045497", "text": "def newtonroot( num ):\n def find_root( whim ):\n guess = ( whim + num / whim ) / 2.0\n guess2 = guess**2\n if ( abs( guess2 - num ) < delta ):\n return guess\n else:\n return find_root( guess )\n\n delta = 0.001*num\n return find_root( 1 )", "title": "" }, { "docid": "3b4f547b1e0a65a51b6dec91e22bcc71", "score": "0.70446", "text": "def safe_sqrt(x):\r\n if x < 0:\r\n return dec('NaN')\r\n else:\r\n return sqrt(x)\r\n # return x.sqrt()\r", "title": "" }, { "docid": "b5f2adbd9e4d2581b284e96add549f52", "score": "0.7038148", "text": "def nth_root(x,n): # credit http://stackoverflow.com/questions/356090/, M. Jarderot\n high = 1\n while high ** n < x:\n high *= 2\n low = high/2\n while low < high:\n mid = (low + high) / 2\n if low < mid and mid**n < x:\n low = mid\n elif high > mid and mid**n > x:\n high = mid\n else:\n return mid\n return mid + 1", "title": "" }, { "docid": "99d8ce10bafefbb1a9e864bf1d554d74", "score": "0.7015905", "text": "def which_square_num(s):\n test_n = sqrt(s)\n if test_n.is_integer():\n return int(test_n)", "title": "" }, { "docid": "afb91b1ec1891f7b379e1e47a4160bf3", "score": "0.6990966", "text": "def nth_root_of_a(n, a):\n return find_zero(lambda x: pow(x, n) - a, lambda x: n * pow(x, n-1))", "title": "" }, { "docid": "b82642a91762e1e301f3619d46b53176", "score": "0.69773275", "text": "def sqrt(x, precision):\n old_guess = 0.5 * (x + 1) # Step 1.\n guess = 0.5 * (old_guess + x / old_guess) # Step 2.\n while( not (-precision <= old_guess - guess <= precision)):\n guess, old_guess = 0.5 * (guess + x / guess), guess # Step i.\n return guess", "title": "" }, { "docid": "a937e5368dc7a2a4548d94d887bd5093", "score": "0.6964668", "text": "def simple_Sqrt(self, x):\n x = self.simple(x)\n if x in (Expr(0), Expr(1), Infinity, UnsignedInfinity, Undefined):\n return x\n if x == Expr(-1):\n return ConstI\n if x == -Infinity:\n return ConstI * Infinity\n if x.is_integer():\n # todo: call an actual square root function\n v = int(x)\n real = v >= 0\n v = abs(v)\n if v < 1e100:\n r = int(round(v ** 0.5))\n if r * r == v:\n return Expr(r) if real else Expr(r)*ConstI\n # todo: wanted?\n if self.is_negative(x):\n return self.simple_Sqrt(-x) * ConstI\n # todo: generalize\n if x.head() == Pow:\n base, exp = x.args()\n if exp.is_integer():\n exp = int(exp)\n if exp % 2 == 0 and exp > 0 and self.is_real(base) and self.is_nonnegative(base):\n if exp == 2:\n return base\n else:\n return Pow(base, exp // 2)\n return Sqrt(x)", "title": "" }, { "docid": "3d88fdb734c0c027364916e2fd6b5420", "score": "0.69263", "text": "def sqrt(self, a):\n return gmpy_sqrt(a)", "title": "" }, { "docid": "3d90cbc83d4039c3b066e4e2787929a1", "score": "0.69044113", "text": "def sqrt_bs(n: float) -> float:\n if n < 1:\n return sqrt_(n)\n lower, upper = 1, float(\"inf\")\n while lower * lower < n:\n lower *= 2\n lower /= 2\n upper = lower * 2\n while abs(lower - upper) > 0.001:\n midpoint = (upper + lower) / 2\n if midpoint * midpoint > n:\n upper = midpoint\n elif midpoint * midpoint < n:\n lower = midpoint\n else:\n return midpoint\n return midpoint", "title": "" }, { "docid": "d6611c1128896efceecb244c5b808da5", "score": "0.68926007", "text": "def nth_root(x, n): # make sure x != 0\n root = 1\n while root**n < x:\n root *= 2\n root /= 2\n for i in xrange(len(bits(root)) - 4, -1, -1):\n root += (2**i)\n if root**n > x:\n root -= (2**i)\n return root", "title": "" }, { "docid": "3b56e943fdaf54f64846811bcc5383a7", "score": "0.6872886", "text": "def nextSquare(x):\r\n y = isqrt(x)\r\n while y*y < x:\r\n y += 1\r\n return y*y", "title": "" }, { "docid": "c53bcb82e7ec228a0b957b499de7a8d3", "score": "0.6861061", "text": "def ex0():\n i = int(input(\"Enter a non-negative integer: \"))\n if i < 0:\n print(\"Negative numbers do not have real square roots\")\n else:\n root = sqrt(i)\n print(\"The square root is\", round(root, 2))", "title": "" }, { "docid": "185754e644adada100a3466462102ebb", "score": "0.68607056", "text": "def sqrt(number):\r\n # if number is a negative number.\r\n if number < 0:\r\n return -1\r\n\r\n # create dictionary that store the square of the numbers from 0- number\r\n temp = {}\r\n for i in range(0, number+1):\r\n temp.update({i*i : i})\r\n # case 1 if number is square.\r\n if number in temp.keys():\r\n return temp[number]\r\n else:\r\n # case 2 if number is square.\r\n for num, index in temp.items():\r\n if number < num:\r\n # here I am try to find the two numbers between number.\r\n big_num = num\r\n small_num = (index-1)*(index-1)\r\n # difference between big_num and small_num.\r\n diff = big_num - small_num - 1\r\n # the half difference between big_num and small_num.\r\n h_diff = diff//2\r\n # part_1 is a part of numbers it is near to small_num.\r\n part_1 = small_num + h_diff\r\n # part_2 is a part of numbers it is near to big_num.\r\n part_2 = big_num - h_diff\r\n \"\"\"\r\n If number is smaller than part_1 or equal to part_1 then the root of number will be the same of \r\n the small_num.\r\n \"\"\"\r\n if number <= part_1:\r\n return temp[small_num]\r\n # else if number is bigger than part_2 then the root of number will be the same of the big_num.\r\n elif number > part_2:\r\n return temp[big_num]", "title": "" }, { "docid": "2f9f102c10004bfe3bbda7691cf2a60b", "score": "0.68536144", "text": "def square(num):\n return round((num ** 2), 2)", "title": "" }, { "docid": "7189a744a2fa4107c76d4807dfa94c2e", "score": "0.6783841", "text": "def _isqrt_small_python(x):\n if not x:\n return x\n if x < _1_800:\n # Exact with IEEE double precision arithmetic\n if x < _1_50:\n return int(x**0.5)\n # Initial estimate can be any integer >= the true root; round up\n r = int(x**0.5 * 1.00000000000001) + 1\n else:\n bc = bitcount(x)\n n = bc//2\n r = int((x>>(2*n-100))**0.5+2)<<(n-50) # +2 is to round up\n # The following iteration now precisely computes floor(sqrt(x))\n # See e.g. Crandall & Pomerance, \"Prime Numbers: A Computational\n # Perspective\"\n while 1:\n y = (r+x//r)>>1\n if y >= r:\n return r\n r = y", "title": "" }, { "docid": "cd3993ff279fc1f8c5f9919ba146e7e3", "score": "0.67793334", "text": "def square_of_sum(num):\n \"\"\"Uses theorem for finite series to calculate base\"\"\"\n return (num * (num + 1) / 2) ** 2", "title": "" }, { "docid": "5a48aac9c083900e492d637d2fc41dfd", "score": "0.677912", "text": "def square_num(n):\n assert n > 0 and int(n) == n, \"Must provide a positive integer.\"\n return n**2", "title": "" }, { "docid": "06855e1cf5a7314b30e53eca1ce95657", "score": "0.6758811", "text": "def square(number):\n sqr_num = number ** 2\n return sqr_num", "title": "" }, { "docid": "7bbd1764b9498050841b610286ce8354", "score": "0.6724075", "text": "def sqrt(a):\n return _arith_unary_func(a, backend.get().af_sqrt)", "title": "" }, { "docid": "5212c540edb0b2962e0b7096961b3ca0", "score": "0.6702846", "text": "def test_function_sqrt(self, parser, ctx, n, expected, num):\r\n func = blackbirdParser.FunctionContext(parser, ctx)\r\n func.SQRT = lambda: True\r\n expression = num(n)\r\n assert np.isclose(_func(func, expression), np.sqrt(expected))", "title": "" }, { "docid": "de0faf53009ef328f39bddecdd623710", "score": "0.6690055", "text": "def prevSquare(x):\r\n y = isqrt(x)\r\n return y*y", "title": "" }, { "docid": "813458c03369e3a310aee93e4b254e2d", "score": "0.6683506", "text": "async def root(self, ctx, number):\n answer = math.sqrt(float(number))\n if await check_answer(ctx, answer):\n await ctx.send(f\"The square root of {number} is {answer}\")", "title": "" }, { "docid": "01a17cf7676a2b50444f580006d8c150", "score": "0.6680857", "text": "def sqrt(x):\n try:\n return x._sqrt()\n except AttributeError:\n if isinstance(x,numbers.Real):\n return math.sqrt(x)\n elif isinstance(x,numbers.Complex):\n return cmath.sqrt(x)\n else:\n raise TypeError(\n \"illegal argument: {!r}\".format(x)\n )", "title": "" }, { "docid": "27cdb0a6cd1688986248a94842f33423", "score": "0.66558844", "text": "def _protected_sqrt(x):\n return np.sqrt(np.abs(x))", "title": "" }, { "docid": "4fa55c02fa93cdfce98ba82b0d5e7205", "score": "0.6629477", "text": "def fp_sqrt(self, x):\n x = self.lift(x)\n return self.dispatch(\"fp_sqrt\", x, container=aby3_fp)", "title": "" }, { "docid": "1a69af99318d79bc863716b1b89f12ea", "score": "0.6608822", "text": "def sum_of_squares(num):\n \"\"\"Uses theorem for finite series to calculate\"\"\"\n return (num * (num + 1) * (2 * num + 1)) / 6", "title": "" }, { "docid": "5d1b9821304079b6ae87a96b14d51143", "score": "0.6592354", "text": "def square_num(num):\r\n num = int(num)\r\n square = num ** 2\r\n\r\n return square", "title": "" }, { "docid": "322700ef16cdf90eed9af1df491e18cd", "score": "0.6586051", "text": "def root(x, n):\n return newton(lambda y: x - y ** n, 1)", "title": "" }, { "docid": "2afb269a53f57a2317e808cd8840db53", "score": "0.65765274", "text": "def sqr(x):\n return x**2", "title": "" }, { "docid": "2c5cde57c459b8f2167933164d41ba6e", "score": "0.65753204", "text": "def sqrt_mod(n: int, p: int) -> int:\n return min(sympy.sqrt_mod(n, p, all_roots=True))", "title": "" }, { "docid": "12ba4ac5888c94bc370c7a9ec43c3539", "score": "0.65620434", "text": "def squaring():\r\n\tx = int(input())\r\n\treturn x**2", "title": "" }, { "docid": "cba463328de50f7ba120c474fa004668", "score": "0.6560665", "text": "def next_perfect_square(n):\n pass", "title": "" }, { "docid": "851efde4dd33d4ddd118c40e4fbe3176", "score": "0.6549559", "text": "def square(number):\n return int(number ** 2)", "title": "" }, { "docid": "b877fd916cc7d3bc8233dfa6ae37c362", "score": "0.6546326", "text": "def check_perfect_square(x):\n global squares\n if x == int(sqrt(x)) ** 2: #a better condition is x == int(sqrt(x) + 0.5) ** 2\n squares += 1", "title": "" }, { "docid": "e49a885fde9e973eb548a8c7d450b07a", "score": "0.6537544", "text": "def sqrt(x):\n #from math import sqrt\n #m,n = x.shape\n #z = np.zeros((m,n))\n #for i in range(m):\n # for j in range(n):\n # z[i,j] = sqrt(x[i,j])\n return np.sqrt(x)", "title": "" }, { "docid": "96bae575c3f8f6afdc3f841f42f8d9a4", "score": "0.6529088", "text": "def isqrt(n):\r\n x = n\r\n y = (x + 1) // 2\r\n while y < x:\r\n x = y\r\n y = (x + n // x) // 2\r\n return x", "title": "" }, { "docid": "b69ec940f0e62d1224772d9c35af4767", "score": "0.651306", "text": "def _sqrtrem_python(x):\n # to check cutoff:\n # plot(lambda x: timing(isqrt, 2**int(x)), [0,2000])\n if x < _1_600:\n y = _isqrt_small_python(x)\n return y, x - y*y\n y = _isqrt_fast_python(x) + 1\n rem = x - y*y\n # Correct remainder\n while rem < 0:\n y -= 1\n rem += (1+2*y)\n else:\n if rem:\n while rem > 2*(1+y):\n y += 1\n rem -= (1+2*y)\n return y, rem", "title": "" }, { "docid": "0e665883391372236faf42953b2033bb", "score": "0.65027696", "text": "def rms(x):\n return np.sqrt(x + 0.0001)", "title": "" }, { "docid": "a0494dbf62c11d2951b51b682d10e6e8", "score": "0.6480791", "text": "def root_sum_of_squares(x, dim=0):\n assert x.size(-1) == 2\n return torch.sqrt((x ** 2).sum(dim=-1).sum(dim))", "title": "" }, { "docid": "df2d6b811cd15f1bbd5dadd497f48a43", "score": "0.6479134", "text": "def square(num1: float) -> float:\n # logic\n num2 = num1 ** 2\n return num2", "title": "" }, { "docid": "d8ab206755f9790179b26bbe11d8dfb4", "score": "0.64783746", "text": "def modular_sqrt(a, p):\r\n # Simple cases\r\n #\r\n if legendre_symbol(a, p) != 1:\r\n return 0\r\n elif a == 0:\r\n return 0\r\n elif p == 2:\r\n return p\r\n elif p % 4 == 3:\r\n return pow(a, (p + 1) // 4, p)\r\n\r\n # Partition p-1 to s * 2^e for an odd s (i.e.\r\n # reduce all the powers of 2 from p-1)\r\n #\r\n s = p - 1\r\n e = 0\r\n while s % 2 == 0:\r\n s /= 2\r\n e += 1\r\n\r\n # Find some 'n' with a legendre symbol n|p = -1.\r\n # Shouldn't take long.\r\n #\r\n n = 2\r\n while legendre_symbol(n, p) != -1:\r\n n += 1\r\n\r\n # Here be dragons!\r\n # Read the paper \"Square roots from 1; 24, 51,\r\n # 10 to Dan Shanks\" by Ezra Brown for more\r\n # information\r\n #\r\n\r\n # x is a guess of the square root that gets better\r\n # with each iteration.\r\n # b is the \"fudge factor\" - by how much we're off\r\n # with the guess. The invariant x^2 = ab (mod p)\r\n # is maintained throughout the loop.\r\n # g is used for successive powers of n to update\r\n # both a and b\r\n # r is the exponent - decreases with each update\r\n #\r\n x = pow(a, (s + 1) // 2, p)\r\n b = pow(a, s, p)\r\n g = pow(n, s, p)\r\n r = e\r\n\r\n while True:\r\n t = b\r\n m = 0\r\n for m in xrange(r):\r\n if t == 1:\r\n break\r\n t = pow(t, 2, p)\r\n\r\n if m == 0:\r\n return x\r\n\r\n gs = pow(g, 2 ** (r - m - 1), p)\r\n g = (gs * gs) % p\r\n x = (x * gs) % p\r\n b = (b * g) % p\r\n r = m", "title": "" }, { "docid": "ad6f2ffb0a1749d3d71594e6c84752eb", "score": "0.6452888", "text": "def simplify_sqrt(n):\n perfect_square = None\n float_to_int = lambda x: int(x) if is_integer(x) else x\n for factor in sorted(factors(n), reverse=True)[:-1]:\n if is_integer(sqrt(factor)):\n perfect_square = factor\n break\n\n if perfect_square == n:\n return (int(sqrt(perfect_square)), 0)\n\n elif perfect_square:\n factor1 = sqrt(perfect_square)\n factor2 = n / perfect_square\n return (float_to_int(factor1), float_to_int(factor2))\n\n else:\n return (0, n)", "title": "" }, { "docid": "975893bfb0dccaecd666d27ff7ce42b5", "score": "0.6452267", "text": "def squareit(num):\n\n return num * num", "title": "" }, { "docid": "e287cfea1baef00bd5a8dc7235c707b2", "score": "0.64487493", "text": "def squareRootExhaustive(x, epsilon):\n step = epsilon**2\n ans = 0.0\n while abs(ans**2 - x) >= epsilon and ans*ans <= x:\n ans += step\n if ans*ans > x:\n raise ValueError\n return ans", "title": "" }, { "docid": "a297a096305acd6d155b823688662073", "score": "0.64387804", "text": "def cube_root(n):\n lo = 0\n hi = n\n\n while lo < hi:\n mid = (lo + hi) // 2\n if mid**3 < n:\n lo = mid + 1\n else:\n hi = mid\n\n return lo", "title": "" }, { "docid": "3c86b2208623326ee809816fe8ca1e02", "score": "0.6431863", "text": "def modular_sqrt(a, p):\n # Simple cases\n #\n if legendre_symbol(a, p) != 1:\n return 0\n elif a == 0:\n return 0\n elif p == 2:\n return p\n elif p % 4 == 3:\n return pow(a, (p + 1) / 4, p)\n\n # Partition p-1 to s * 2^e for an odd s (i.e.\n # reduce all the powers of 2 from p-1)\n #\n s = p - 1\n e = 0\n while s % 2 == 0:\n s /= 2\n e += 1\n\n # Find some 'n' with a legendre symbol n|p = -1.\n # Shouldn't take long.\n #\n n = 2\n while legendre_symbol(n, p) != -1:\n n += 1\n\n # Here be dragons!\n # Read the paper \"Square roots from 1; 24, 51,\n # 10 to Dan Shanks\" by Ezra Brown for more\n # information\n #\n\n # x is a guess of the square root that gets better\n # with each iteration.\n # b is the \"fudge factor\" - by how much we're off\n # with the guess. The invariant x^2 = ab (mod p)\n # is maintained throughout the loop.\n # g is used for successive powers of n to update\n # both a and b\n # r is the exponent - decreases with each update\n #\n x = pow(a, (s + 1) / 2, p)\n b = pow(a, s, p)\n g = pow(n, s, p)\n r = e\n\n while True:\n t = b\n m = 0\n for m in xrange(r):\n if t == 1:\n break\n t = pow(t, 2, p)\n\n if m == 0:\n return x\n gs = pow(g, 2 ** (r - m - 1), p)\n g = (gs * gs) % p\n x = (x * gs) % p\n b = (b * g) % p\n r = m", "title": "" }, { "docid": "4d2188bc18bfa253bb890d4ea918a340", "score": "0.64273983", "text": "def modular_sqrt(a, p):\r\n # Simple cases\r\n #\r\n if legendre_symbol(a, p) != 1:\r\n return 0\r\n elif a == 0:\r\n return 0\r\n elif p == 2:\r\n return p\r\n elif p % 4 == 3:\r\n return pow(a, (p + 1) / 4, p)\r\n \r\n # Partition p-1 to s * 2^e for an odd s (i.e.\r\n # reduce all the powers of 2 from p-1)\r\n #\r\n s = p - 1\r\n e = 0\r\n while s % 2 == 0:\r\n s /= 2\r\n e += 1\r\n \r\n # Find some 'n' with a legendre symbol n|p = -1.\r\n # Shouldn't take long.\r\n #\r\n n = 2\r\n while legendre_symbol(n, p) != -1:\r\n n += 1\r\n \r\n # Here be dragons!\r\n # Read the paper \"Square roots from 1; 24, 51,\r\n # 10 to Dan Shanks\" by Ezra Brown for more\r\n # information\r\n #\r\n \r\n # x is a guess of the square root that gets better\r\n # with each iteration.\r\n # b is the \"fudge factor\" - by how much we're off\r\n # with the guess. The invariant x^2 = ab (mod p)\r\n # is maintained throughout the loop.\r\n # g is used for successive powers of n to update\r\n # both a and b\r\n # r is the exponent - decreases with each update\r\n #\r\n x = pow(a, (s + 1) / 2, p)\r\n b = pow(a, s, p)\r\n g = pow(n, s, p)\r\n r = e\r\n \r\n while True:\r\n t = b\r\n m = 0\r\n for m in xrange(r):\r\n if t == 1:\r\n break\r\n t = pow(t, 2, p)\r\n \r\n if m == 0:\r\n return x\r\n \r\n gs = pow(g, 2 ** (r - m - 1), p)\r\n g = (gs * gs) % p\r\n x = (x * gs) % p\r\n b = (b * g) % p\r\n r = m", "title": "" }, { "docid": "27fda45513a4df71f3bbfd5839c17ec2", "score": "0.64257854", "text": "def square_root():\n\n number_1 = randint(0,49)\n \n print('What is the square root of '+ str(number_1) + '?')\n print('Please enter ONLY a whole number or decimal.')\n\n user_input = input(' ')\n \n #if statement conditional states that a user input that is equal to the answer of the square root of the random integer, the answer is correct.\n if int(float(user_input)) == number_1**(1/2):\n print(random.choice(correct_response))\n play_game()\n else:\n print(random.choice(wrong_response))\n print('The answer is ' + str(number_1**(1/2)) + '.')\n play_game()", "title": "" }, { "docid": "9a5149175c631a26769b634729b5f505", "score": "0.64085346", "text": "def square(num):\r\n return num**2", "title": "" }, { "docid": "bab8c0bb7cd07f940752c10a7392c06b", "score": "0.63832253", "text": "def my_square(x):\n\treturn(x ** 2)", "title": "" }, { "docid": "d81211cb912fb7bd0745ed2cd2b64368", "score": "0.6368992", "text": "def square(number):\n return number ** 2", "title": "" }, { "docid": "4b96b298db755e374ff3f77380ef6592", "score": "0.63667166", "text": "def digital_root(n):\n if n < 10:\n return n\n else:\n return digital_root(sum([int(num) for num in str(n)]))", "title": "" }, { "docid": "c9ce962977f45d78c4b03cb72627da1e", "score": "0.6362705", "text": "def square(num1: float) -> float:\n # logic here\n num2 = num1 ** 2\n return num2", "title": "" }, { "docid": "20cf6de84fb5e951c6516f3ea93e9bb3", "score": "0.63624346", "text": "def square(x): \n\treturn x ** 2 \t\t\t\t# square", "title": "" }, { "docid": "2f4b955044598834fcf302ddfbb894ca", "score": "0.6354909", "text": "def signed_sqrt(x):\r\n return keras.backend.sign(x) * keras.backend.sqrt(keras.backend.abs(x) + 1e-9)", "title": "" }, { "docid": "9824092253339778b6abefc1842d059f", "score": "0.6352045", "text": "def is_square(num):\r\n return int(num**0.5)**2 == num", "title": "" } ]
b0377bbae547987257813ce50934fe39
Returns True if the game is over, and false if not
[ { "docid": "243eabe38d03783381e9856ff2fc5a3c", "score": "0.77313864", "text": "def gameOver(self):\r\n if self.hasWonPlayer(\"X\") or self.hasWonPlayer(\"O\"):\r\n return True\r\n else:\r\n for move in self.board:\r\n if move == ' ':\r\n return False\r\n return True", "title": "" } ]
[ { "docid": "b7b6e3c1761505622aa0e8ec5753c7c9", "score": "0.8727644", "text": "def is_game_over(self):\r\n pass", "title": "" }, { "docid": "40a3298c329d5c988cad7bdf39cbb1bd", "score": "0.8521025", "text": "def is_game_over(self):", "title": "" }, { "docid": "487fc9b57415a1d3b9148afebabc3700", "score": "0.8511613", "text": "def is_game_over(self):\n return sum(self.state[0]) == 0 or sum(self.state[1]) == 0", "title": "" }, { "docid": "34dc20f3fa2ba5a893220188f85ab0e8", "score": "0.822954", "text": "def is_game_over(self) -> bool:\n # First check if the game is stopped, if not we can't safely read self.game\n if not self.is_stopped():\n return False\n\n return self.game.is_over()", "title": "" }, { "docid": "b1091804852bedd0716334c67d7ce56b", "score": "0.8219491", "text": "def game_over() -> bool:\r\n if self.__block_collides():\r\n self.__game_over = True\r\n return True\r\n return False", "title": "" }, { "docid": "8bcf597ada8829ba35c390688ce41ffd", "score": "0.821118", "text": "def is_game_over(self, state):\n result = self.outcome(state)\n return not result == 2", "title": "" }, { "docid": "7a919c9bde5d180710573cbec85440fb", "score": "0.8116675", "text": "def game_over(self):\n return self.value() == 1 or self.value() == -1 or self.turn() > 9", "title": "" }, { "docid": "f64e1c566863ddd1d9e4ea869d05a532", "score": "0.81158936", "text": "def check_game_over(self):\n if self.get_player()._move_count <= 0:\n return True\n return False", "title": "" }, { "docid": "2863b9c3c3bccb7a7260c462a86aaf64", "score": "0.80947053", "text": "def game_over(self):\n if self.get_current_state() not in self._actions.keys():\n return True\n return False", "title": "" }, { "docid": "92479f5bca90bae5457d47134c33ee4e", "score": "0.8085551", "text": "def game_over(self):\n pass", "title": "" }, { "docid": "92479f5bca90bae5457d47134c33ee4e", "score": "0.8085551", "text": "def game_over(self):\n pass", "title": "" }, { "docid": "f1ebafd0ee696909f3eda2b5b9ab45c9", "score": "0.80355334", "text": "def _is_game_over(self):\n if self.__life == 0:\n return True, self.MESSAGE_FOR_LOST\n if self.__asteroids_amount == 0:\n return True, self.MESSAGE_FOR_WIN\n if self.__screen.should_end():\n return True, self.MESSAGE_FOR_EXIT\n return False, False", "title": "" }, { "docid": "34bc2029875a0a56ed87cccbc9544274", "score": "0.800201", "text": "def check_game_over(self):\n if self.player.health <= 0:\n pygame.mixer.music.stop()\n self.pause_game(\"Game Over! Final Score: \" + str(self.score), \"Press 'Enter' to play again...\")\n self.reset_game()", "title": "" }, { "docid": "86c8d960127899de6a701d6c0dd71417", "score": "0.79633003", "text": "def check_game_is_over(self) -> bool:\n\n return self.status == GameStatus.COMPLETED", "title": "" }, { "docid": "8d98dc865d0306b939157db4d46445b7", "score": "0.79463273", "text": "def game_over(self):\n return self.winner() is not None", "title": "" }, { "docid": "cfb4d29a12a9e526fdb8defaddb9ecc9", "score": "0.7943947", "text": "def is_game_over(self):\r\n assert self.opened\r\n soup = self.get_html_parser()\r\n game_over_screen = soup.find('div', class_='game-over')\r\n return game_over_screen is not None", "title": "" }, { "docid": "a34dbaf144e9eb37caba78d31dbbf932", "score": "0.7809504", "text": "def check_game_over(self):\n state = self._game.get_game_state()\n\n if state == self._game.GameState.WON:\n showinfo(\"Round Over!\", \"You won {}!!!\".format(self._player))\n self._playing = False\n if int(self._checkpoint) < 4:\n self._next_round = True\n else:\n self._next_round = False\n \n elif state == self._game.GameState.LOST:\n showinfo(\"Round Over!\",\n f\"You didn't reach the objective(s) in time. You connected {self._game.get_score()} points\")\n self._playing = False\n self._next_round = False", "title": "" }, { "docid": "83dc36edf0a585520cca1cd51580525a", "score": "0.7801536", "text": "def isOver(self) -> bool:\r\n\r\n return self.hasWon(1) or self.hasWon(2) or self.isFull()", "title": "" }, { "docid": "24c00bab287d6c96df6b6a8a6f3bec68", "score": "0.78004515", "text": "def is_game_over(self):\n for row in range(4):\n for column in range(4):\n if self.grid[row][column] == 2048:\n return True, \"Game finishes, you win!\"\n\n possible_move = self.valid_move_exists()\n if possible_move:\n return False, \"\"\n else:\n return True, \"Game over, no possible move, you lost!\"", "title": "" }, { "docid": "e74c70b9731854361a1a3cbb913a5d4a", "score": "0.7784149", "text": "def check_gameover():\n\n global GAMEOVER\n\n if collide_with_pixels():\n print(\"Game over!\")\n GAMEOVER = True", "title": "" }, { "docid": "a3e8682a0fe71de05ed65e9cdf2f8e87", "score": "0.77713513", "text": "def see_if_game_is_over():\n\n see_if_winner()\n see_if_draw()", "title": "" }, { "docid": "461811055c0d173f0648b07bbdca9efd", "score": "0.7761938", "text": "def trigger_game_over(self):\n print(\"Game Over\")", "title": "" }, { "docid": "00c34c04d5e46f03788bc7ae07d9a981", "score": "0.7752569", "text": "def game_over(self):\n if not self.empty_fields() or None != self.winner():\n return True\n else:\n return False", "title": "" }, { "docid": "e78956ca0f3924eb96999c6b6616e1f7", "score": "0.7743732", "text": "def game_over(self):\n if len(self.remaining_players()) < 1:\n self.cutoff = 'extermination'\n return True\n elif len(self.remaining_players()) == 1:\n self.cutoff = 'lone survivor'\n return True\n elif self.turn >= self.turn_limit:\n self.cutoff = 'turn limit reached'\n return True\n else: return False", "title": "" }, { "docid": "67e1ced67eeba8d8eb6b5e8e94f36c4f", "score": "0.77389854", "text": "def game_over(self):\n\n for action in range(4):\n if self.is_action_available(action):\n return False\n return True", "title": "" }, { "docid": "e990e8ff290bc656e8e2fdf8b6b59bc9", "score": "0.7737669", "text": "def is_over(self):\n return (self.possible_moves() == []) or self.lose()", "title": "" }, { "docid": "b69cbf2508eac034651a3fd513fa5bc5", "score": "0.7674087", "text": "def is_over(self):\r\n is_winner, _ = self.winner()\r\n return is_winner", "title": "" }, { "docid": "deffc3fadd660ca993757c825c789783", "score": "0.75929725", "text": "def game_over():\r\n print(\"You have failed\")", "title": "" }, { "docid": "457b31df310a82a3d547ab8d1ed6c7f8", "score": "0.7572", "text": "def check_loss(self):\n if POKEMON in self._game:\n return True", "title": "" }, { "docid": "a9d42e3a6a4a1505c440929b15b0863a", "score": "0.7566365", "text": "def check_game_over():\n global game_over_text\n\n winner = game_board.get_winner()\n if winner != 0:\n font = pygame.font.Font(None, 60)\n game_over_text = font.render(\"Player %i wins!\" % winner, True, purple, white)\n return True\n elif game_board.is_board_full():\n font = pygame.font.Font(None, 60)\n game_over_text = font.render(\"It's a draw!\", True, purple, white)\n return True\n else:\n return False", "title": "" }, { "docid": "c9a8fe79a3948e3b75b3040ffe29bbda", "score": "0.75273514", "text": "def check_game_over(self: \"Hangman\") -> None:\n if self.found_letters.count(False) == 0:\n self.game_won = True\n elif len(self.missed_guesses) == len(HANGMAN_PICS) - 1:\n self.game_lost = True", "title": "" }, { "docid": "1b3bbb8daa692c9000f2c0f7ed2f427b", "score": "0.75073826", "text": "def is_game_over(self):\n if self.__board[0][5] == 7:\n return 'W'\n if self.__board[2][5] == 7:\n return 'B'\n return None", "title": "" }, { "docid": "35a834137e9586e3845f2d6ffcd13db8", "score": "0.7453389", "text": "def is_gameover(ai_board, player_board):\n game_boolean = False\n if ai_board.gameover:\n player_board.display.show_text(\n \"You won this game, no more ships for opponent!\")\n player_board.display.flip()\n time.sleep(5)\n game_boolean = True\n elif player_board.gameover:\n player_board.display.show_text(\n \"You lost this game, no more ships!\")\n player_board.display.flip()\n time.sleep(5)\n game_boolean = True\n return game_boolean", "title": "" }, { "docid": "4dad8678aaa76ae81fe52ce6f154bf95", "score": "0.744317", "text": "def isGameOver(self):\n w, h = self.width, self.height\n return len(self.board.keys()) == 2 * w * h - h - w", "title": "" }, { "docid": "40fbe8d6cb076e9a84e8bb0d49ac1eb1", "score": "0.7433027", "text": "def check_game_over(self):\n for victory in self.victories:\n if set(victory).issubset(self.squares[self.COMPUTER]):\n return True, 1\n elif set(victory).issubset(self.squares[self.HUMAN]):\n return True, -1\n if self.turns == 9:\n return True, 0\n return False, None", "title": "" }, { "docid": "78d15d9f98ff50a62e0e3bc62b0bf5b7", "score": "0.7410562", "text": "def gameover(self):\n self.paused = False\n self.running = False\n self.del_timer('TimedUpdate')\n pygame.mixer.music.fadeout(1000)\n print('Game Over')", "title": "" }, { "docid": "5925406edf2637275ac4dbd1ef505b2a", "score": "0.7383281", "text": "def game_over(self):\n self.is_game_over = True\n\n # Subtract the letter scores from the players\n # that still have letters on hand\n for player in self.players:\n if player.nr_of_letters > 0:\n for letter in player.held_letters:\n player.score -= letter.score\n\n # Set the first player as winner for comparison\n winner = [self.players[0]]\n\n # If a player has a higher score than the comparison\n # then that player is set.\n for player in self.players[1:]:\n if player.score > winner[0].score:\n winner = [player]\n\n # Otherwise if they are equal they are added to\n # the list, to make a draw possible\n elif player.score == winner[0].score:\n winner.append(player)\n \n\n if len(winner) > 1:\n winners = \"\\n\".join(winner)\n self.error_text = \"\"\n self.winner_text = \"Jafntefli!\\n\" + winners\n else:\n self.winner_text = f\"{winner[0].name}\\ner sigurvegari!\"", "title": "" }, { "docid": "c03810200466b50f699e9e39f58e3c96", "score": "0.7359948", "text": "def game_over(self):\n if self.active_phrase.check_complete():\n print(\"Congratulations! You won!\")\n else:\n print(\"Out of luck. You lost.\")", "title": "" }, { "docid": "1c18e30c2dc2630fb4c3a996a23865df", "score": "0.72985786", "text": "def check_win(self):\r\n return self.board.game_won()", "title": "" }, { "docid": "eaead6f8539d5e494f34825d6fb3dd52", "score": "0.72674257", "text": "def game_over():\n print \"*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*\"\n print\n print \"GAME OVER -- Thanks for playing!\"\n print\n print \"*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*\"", "title": "" }, { "docid": "141bdc13137ad78c9d028f910bda802d", "score": "0.7245292", "text": "def is_over(self):\n for t in self.players:\n if len(self.off_pieces[t]) == self.num_pieces[t]:\n return True\n return False", "title": "" }, { "docid": "141bdc13137ad78c9d028f910bda802d", "score": "0.7245292", "text": "def is_over(self):\n for t in self.players:\n if len(self.off_pieces[t]) == self.num_pieces[t]:\n return True\n return False", "title": "" }, { "docid": "edd1a939914ada15a1378dacb3197874", "score": "0.7236908", "text": "def win_lose(self):\n win = pygame.image.load(const.WIN_PICTURE).convert()\n lose = pygame.image.load(const.LOSE_PICTURE).convert()\n if self.gardien.find_position(self.p_maze) is None:\n if self.score == len(self.objects):\n self.window_2.blit(win, (0, 0))\n else:\n #Stop the condition for the move_pygame methode\n self.window_2.blit(lose, (0, 0))\n self.game_over = True\n return False\n else:\n return True", "title": "" }, { "docid": "826dfaacd39671be13848ad9f5254952", "score": "0.7180126", "text": "def _is_game_over(self, levels):\r\n for level in levels:\r\n if level >= self.cap:\r\n return True\r\n return False", "title": "" }, { "docid": "d15d44dfbb4a1f50e166d17bc763c1fa", "score": "0.7176114", "text": "def _check_is_over(self):\r\n return self.deck.is_empty() or len(self.players) == 1", "title": "" }, { "docid": "48070786ac12e18120d522b2f7357c4a", "score": "0.7173666", "text": "def lost(self):\n return self.game.is_winner(self.opponent)", "title": "" }, { "docid": "b5432e50b27f1b452e53cf67c9110fae", "score": "0.7171704", "text": "def is_end_of_game(self):\n pass", "title": "" }, { "docid": "080a74f382c5a30a8d90984e5b4eacd9", "score": "0.71562654", "text": "def __gameOver(self):\n if self.life_num == 0: # If the number of aircraft is 0, exit the game\n game_over = pygame.image.load(GAME_OVER) # Show GAMEOVER picture\n self.screen.blit(game_over, (50, 150))\n self.__infoDisplay('press ESC play again...', SCREEN_CENTER, 'center')\n pygame.display.update()\n flag = True\n while flag:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n PlaneGame.quit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE: # Press SPACE to restart the game\n flag = False\n PlaneGame().start()", "title": "" }, { "docid": "0e6776c624f11146da422bbda1151b90", "score": "0.7151181", "text": "def _check_game_over(self, msgs):\n for msg in msgs:\n if msg.type == sermess.MessageType.DIED: # a player's dead was announced by the Game (server side)\n self.logger.info(f\"ID: {msg.player_id} Death of player acknowledged.\")\n PlayerManager.get_instance().remove_player(msg.player_id) # remove from player manager\n self.logger.info(f\"ID: {msg.player_id} Player removed from player manager.\")\n if Client.get_instance().id == msg.player_id: # if it was our player, set screen's game over state\n self.__game_over_state = sstatecons.GameOverState.LOST\n self.logger.info(\"It is our player that died!\")\n self.__show_cannot_attack_text = False\n elif msg.type == sermess.MessageType.NO_ALIVE_HUMAN: # if Game announced that all human player is dead\n self.__game_over_state = sstatecons.GameOverState.ALL_HUMAN_DIED # set game over state of Screen\n self.__t_to_exit = FPS * 10 - 1 # trigger delayed exit\n elif msg.type == sermess.MessageType.WON: # if Game announced that a player won\n if msg.player_id == Client.get_instance().id: # check if it's ours'\n self.__game_over_state = sstatecons.GameOverState.WON # then set it's go. state accordingly\n self.__t_to_exit = FPS * 10 - 1 # trigger delayed exit for regardless", "title": "" }, { "docid": "c8cfaceb266f75dff6f75c774cea4916", "score": "0.7150852", "text": "def is_game_over(self):\n for r in self.fields:\n for b in r:\n if b.get_flag() and not b.get_isMine() or b.get_isMine() and not b.get_flag():\n return False\n return True", "title": "" }, { "docid": "8585a1d46afe1176a8de85a2697eafb9", "score": "0.714262", "text": "def is_over(self):\n return self._moves == TicTacToe.SIZE * TicTacToe.SIZE or self.check_winner(\n ) is not None", "title": "" }, { "docid": "40511dd7f673c081ff217fa3c605cc2b", "score": "0.7140324", "text": "def check_is_game_over(self, color):\n n = 0\n for rnum,piece in self.board.pieces.items():\n if piece.color == color:\n n += 1\n if self.has_a_move(rnum):\n return False\n self.tend = time.time()\n ts = time.localtime(self.tend)\n tstr = f\"{ts.tm_hour:02}:{ts.tm_min:02}:{ts.tm_sec:02}\"\n if n == 0:\n self._eog = Checkers.EoG.DEFEAT\n self._winner = CheckersPiece.opposite_color(color)\n self.add_event_to_history(f\"DEFEATED({enumlower(color)})@{tstr}\")\n else:\n self._eog = Checkers.EoG.DRAW\n self._winner = None\n self.add_event_to_history(f\"DRAW@{tstr}\")\n self._state = Checkers.State.GAME_OVER\n return True", "title": "" }, { "docid": "c58be8ffc716197f8526f8dee684c89f", "score": "0.71363443", "text": "def is_over(self, current_state: 'Game') -> bool:\n raise NotImplementedError(\"Override this.\")", "title": "" }, { "docid": "00a33ad1e00615ed5a25dac72366c4bf", "score": "0.7132596", "text": "def gameOver(self):\n if self.availableSpots():\n return False\n\n board = self.board\n for row in range(self.size):\n for col in range(self.size):\n if (row < self.size - 1 and board[row][col] == board[row+1][col]) \\\n or (col < self.size - 1 and board[row][col] == board[row][col+1]):\n return False\n return True", "title": "" }, { "docid": "e7d4ba476b6922fd0f5a150e538f5b33", "score": "0.7111102", "text": "def checkForEndGame(self) -> bool:\r\n\r\n endGame = False\r\n if self.spaceShip.getHealth() <= 0:\r\n endGame = True\r\n slowPrint(\"CATASTROPHIC HULL BREACH: CRAFT DESTROYED\" + scatterPrint(\"DESTROYED\"))\r\n slowPrint(\"------- YOU LOSE -------\")\r\n\r\n if self.spaceShip.getEnergy() <= 0:\r\n endGame = True\r\n slowPrint(\"CATASTROPHIC POWER FAILURE: SYSTEM FAILURE\" + scatterPrint(\"FAILURE\"))\r\n slowPrint(\"------- YOU LOSE -------\")\r\n\r\n if self.getDaysLeft() <= 0:\r\n endGame = True\r\n slowPrint(\"DESTINATION REACHED\")\r\n slowPrint(\"------- YOU WIN -------\")\r\n\r\n if endGame:\r\n quit(0)\r\n else:\r\n return endGame", "title": "" }, { "docid": "bf52145cd74c802e6682c1d5972218b1", "score": "0.70846474", "text": "def __gameOver(self):\n\n # Cria os textos.\n game_over_text = Text(self.__window, self.FONT , 60)\n game_over_text_size = game_over_text.size(\"GAME OVER\")\n\n score_text = Text(self.__window, self.FONT, 100)\n score_text_size = score_text.size(self.__score)\n\n self.__play = False\n\n # Permanece na tela de fim de jogo enquanto o usuário não pedir\n # para sair ou para iniciar um novo jogo.\n while not self.__stop and not self.__play:\n\n self.draw()\n\n h_width = self.WINDOW_GEOMETRY[0] // 2\n h_height = self.WINDOW_GEOMETRY[1] // 2\n\n # Desenha os textos na tela.\n game_over_text.draw(h_width - game_over_text_size[0] // 2, 10, \"GAME OVER\", (255, 0, 0), outline = 1)\n\n score_text.draw(\n h_width - score_text_size[0] // 2, h_height - score_text_size[1] // 2,\n self.__score, (255,255,0), outline = 2\n )\n \n self.update()\n\n # Verifica se o jogador pediu para sair.\n if not self.close():\n self.start()", "title": "" }, { "docid": "bf4991cc383a29e7e5b5a78d7c071d6d", "score": "0.706539", "text": "def game_over(stats):\n stats.game_active = False\n pygame.mouse.set_visible(True)", "title": "" }, { "docid": "914b97ca6fd781b524a0ab998614d215", "score": "0.7064967", "text": "def game_over(state):\r\n return wins(state, HUMAN) or wins(state, COMP)", "title": "" }, { "docid": "6c503f756a0821a7afc4e0bb1a640b51", "score": "0.70591825", "text": "def is_over(self):\n return self.get_state_is_over(self.state)", "title": "" }, { "docid": "300d819ebe47de542ca912755d89e794", "score": "0.70465523", "text": "def is_over(self):\n return self.over", "title": "" }, { "docid": "1dfaff27e6a295640914386ffd5f30b8", "score": "0.7034101", "text": "def game_over(self):\n self.state = Game.GAME_OVER\n self.gameover_snd.play()\n delay = int((self.gameover_snd.get_length()+1)*1000)\n pygame.time.set_timer(Game.RESTART, delay)", "title": "" }, { "docid": "20c61c55df2441e63d423cffca589ef1", "score": "0.702096", "text": "def in_game(self):\n try:\n if self.p.poll() is None:\n return True\n else:\n return False\n except:\n return False", "title": "" }, { "docid": "210526e597473bd4e898f4cc8be0a484", "score": "0.7012166", "text": "def game_over(self, message):\n self.game_over_event.set()\n self.notify(gameengine.Message.GAME_DONE)", "title": "" }, { "docid": "405a232f0cb481f1e39070ba19ceeab0", "score": "0.7008995", "text": "def game_is_lost(self):\n for block in self.grid[1][1:self.gridsize[1]-1]:\n if block:\n return True\n return False", "title": "" }, { "docid": "6be88686021fdd2c5faa96728f6a72a4", "score": "0.7006516", "text": "def did_lose(self):\n return self.board.get_blacks() and not self.board.get_whites()", "title": "" }, { "docid": "a4ca4d20db3d7699fc4a4849b9308d4e", "score": "0.6995399", "text": "def is_over(self):\n pass", "title": "" }, { "docid": "1205f099a90835e07efc99716680166b", "score": "0.698784", "text": "def game_over(self):\n\n\t\tnum_cows = [None, None]\n\n\t\tfor i in range(2):\n\t\t\tnum_cows[i] = self.cows[i] + np.sum(self.board == i + 1)\n\t\t\tif num_cows[i] < State.MIN_COWS:\n\t\t\t\tself.winner = 2 - i\n\t\t\t\treturn True\n\n\t\tif num_cows == [State.MIN_COWS, State.MIN_COWS]:\n\t\t\tself.winner = 0\n\t\t\treturn True\n\n\t\treturn False", "title": "" }, { "docid": "894a1ee09e9cdfdfa430f659379ee1e1", "score": "0.69855607", "text": "def win_condition(self) -> bool:\n return False", "title": "" }, { "docid": "23745417ad48aca677520515fe82e4cc", "score": "0.69851553", "text": "def checkGameEnded(self):\n return len(self.getAvailablePos()) == 0", "title": "" }, { "docid": "e050473c742f76abdcd417b6f4947284", "score": "0.6936669", "text": "def check_win(self) -> bool:\n return self.who_won() != EMPTY", "title": "" }, { "docid": "af081fbdc7e60dfccc011bb2baa755c1", "score": "0.69289774", "text": "def is_over(self, state: StoneHengeState) -> bool:\n list_of_states = deepcopy(state.map.extract_state())\n winning_count = 0.5 * len(list_of_states)\n count1 = 0\n count2 = 0\n for item in list_of_states:\n if item == '1':\n count1 += 1\n elif item == '2':\n count2 += 1\n return count1 >= winning_count or count2 >= winning_count\n\n # winning = 0\n # if state.map.side_length == 2:\n # winning = 5\n # elif state.map.side_length == 3:\n # winning = 6\n # los = state.map.get_list_repr()\n # player1 = 0\n # player2 = 0\n # for string in los:\n # line = string.strip()\n # if line.startswith('1'):\n # player1 += 1\n # if line.endswith('1'):\n # player1 += 1\n # if line.startswith('2'):\n # player2 += 1\n # if line.endswith('2'):\n # player2 += 1\n # if player1 >= winning or player2 >= winning:\n # return True\n # return False", "title": "" }, { "docid": "8c9058ac35de57ef20a37d4f97d655fe", "score": "0.68993014", "text": "def loseCheck(self, player):\r\n x = player.getHP()\r\n if (x <= 0):\r\n print(\"Player was defeated!\")\r\n print(\"Game Over!\")\r\n sys.exit(0)", "title": "" }, { "docid": "c6448491e9cb2c559ae1911e6df36a36", "score": "0.6883069", "text": "def test_game_stats_game_over_flag(self):\n self.assertFalse(self.game_stats.game_over)", "title": "" }, { "docid": "854cba8ff089d279446180967deec3c8", "score": "0.6872401", "text": "def in_game(self):\r\n return self._in_game", "title": "" }, { "docid": "ffb1b83a0c39a85ab35a1f52a34cb7cb", "score": "0.6868297", "text": "def is_game_won(self):\r\n for idx in range(1, len(self._board)):\r\n if self._board[idx] != 0:\r\n return False\r\n return True", "title": "" }, { "docid": "613b65c71600a513576332aa2429bee5", "score": "0.68402576", "text": "def game_draw(self):\n\t\tfor num in np.ravel(self.boardStatus):\n\t\t\tif num == self.type[\"blank\"]:\n\t\t\t\treturn False\n\t\tif self.game_won() != self.type[\"blank\"]:\n\t\t\treturn False\n\t\treturn True", "title": "" }, { "docid": "a6e5e403e751adb9491711b15da77887", "score": "0.6835506", "text": "def check_win(self):\n return UNEXPOSED not in self._game and self._game.count(FLAG) == len(self._pokemon_locations)", "title": "" }, { "docid": "7eba20a5758ebeb55d3a88fa01bf53ef", "score": "0.6830547", "text": "def check_num_guesses_left(self):\n if self.num_guesses == 0:\n print(\"\\nGAME OVER :(\")\n print(\"You are out of guesses.\")\n print(f\"The word was {self.word.upper()}.\")\n self.game_active = False", "title": "" }, { "docid": "52c2caad78c213d1225ac4e95d65e91e", "score": "0.68275154", "text": "def see_if_draw():\n\n global game_still_running\n if '-' not in board:\n game_still_running = False\n return", "title": "" }, { "docid": "a37f76c29789c0868bc1f9cb1dde82d5", "score": "0.6819608", "text": "def isWin(self):\n return self.pos in self.data['win_states']", "title": "" }, { "docid": "136ed9aa2d0c50cfd10a32907f22d4f9", "score": "0.6818711", "text": "def game_won(self):\r\n if self.check_win_player_one():\r\n return 1\r\n elif self.check_win_player_two():\r\n return 2\r\n else:\r\n return None", "title": "" }, { "docid": "3d44286c5d3fba9aeb92b4acc0718109", "score": "0.68125755", "text": "def player_alive(self)->bool:\n return self.tries_used < 9", "title": "" }, { "docid": "f20bb67a6c97ff6bb8514cdb1bac5a7b", "score": "0.6811448", "text": "def check_crash(self) -> bool:\n # if player crashes into ground\n if self.player_y + PLAYER_HEIGHT >= self.base_y - 1:\n return True\n return False", "title": "" }, { "docid": "3d6265089048023380d9c72cb41c746d", "score": "0.68072236", "text": "def check_if_game_over(self, role=None):\n\n self.game_over, self.winner = self.check_game_state(role=role)", "title": "" }, { "docid": "8cd66b0559f1b101f92985a6e49bd709", "score": "0.67918783", "text": "def game_won():\n global gameboard, gameover, elapsed, difficulty\n score = elapsed\n gameover = True\n for row in gameboard:\n for tile in row:\n if not tile.is_mine:\n tile.flip()\n print ('Game Won!')\n # if difficulty == \"expert\":\n # names, scores = load_scores()\n # if score < max(scores) or len(scores) < 10:\n # get_player_name(score)\n\n return", "title": "" }, { "docid": "8e320d70b1f88f1dfc798b9460ee2971", "score": "0.67706835", "text": "def gameover(self, uid = None):\n if uid is None:\n return len(self.players) < 2\n else:\n return not uid in self.players", "title": "" }, { "docid": "7246deb5c803696ce28ebe15b8770e9f", "score": "0.67631626", "text": "def is_game_over(computer_score, human_score):\n\n if (computer_score >= 50) or (human_score >= 50):\n if computer_score != human_score:\n return True\n else:\n return False\n else:\n return False", "title": "" }, { "docid": "d0884c0ba9e31668d47073a4a7778ab3", "score": "0.6759866", "text": "def CheckGameOver(self):\n # Tie testing will check entire self.board array for a \"None\" value. If one\n # is found then tie will be set to false. Upon a new game over check, this var\n # will be reset to True.\n # Note: Apparently I can't see this var inside of a for loop? Wtf?\n #tie = True\n\n for row in range(3):\n if self.board[row][0] == self.player and self.board[row][1] == self.player \\\n and self.board[row][2] == self.player:\n print(\"Player \" + self.player + \" has won!\")\n\n for col in range(3):\n if self.board[0][col] == self.player and self.board[1][col] == self.player \\\n and self.board[2][col] == self.player:\n print(\"Player \" + self.player + \" has won!\")\n\n if self.board[0][0] == self.player and self.board[1][1] == self.player \\\n and self.board[2][2] == self.player:\n print(\"Player \" + self.player + \" has won!\")\n\n if self.board[2][0] == self.player and self.board[1][1] == self.player \\\n and self.board[0][2] == self.player:\n print(\"Player \" + self.player + \" has won!\")\n\n # Check for tie by seeing if there are no \"None\" values left\n #for row in range(3):\n # for col in range(3):\n # if self.board[row][col] is None:\n # tie = False\n # break", "title": "" }, { "docid": "c51de5b76adbead99c4297a320404ad7", "score": "0.6757919", "text": "def has_game_ended(self):\n return self.__game.has_game_ended()", "title": "" }, { "docid": "8340307eac43dfcb3295e3a9620bd752", "score": "0.6757882", "text": "def game_over(puzzle: str, view: str, menu: str) -> bool:\n return (puzzle == view) or (menu == QUIT)", "title": "" }, { "docid": "3c9ffb8dc016d72864307ec2fb1e576b", "score": "0.6751946", "text": "def gameEnd(self):\n self.game_screen.wheel.disableSpin()\n self.game_over = True\n print(\"Game over\")", "title": "" }, { "docid": "1957005bab598c8e3be6df3e9fb91957", "score": "0.67484367", "text": "def is_game_over(self, claim_draw=False):\n # Special chess variant conditions\n if self.is_variant_loss() or self.is_variant_win() or self.is_variant_draw():\n return True\n\n # Seventyfive-move rule.\n if self.is_seventyfive_moves():\n return True\n\n # Insufficient material.\n if self.is_insufficient_material():\n return True\n\n # Stalemate or checkmate.\n if not any(self.generate_legal_moves()):\n return True\n\n # Fivefold repetition.\n if self.is_fivefold_repetition():\n return True\n\n # Draw claim.\n if claim_draw and self.can_claim_draw():\n return True\n\n return False", "title": "" }, { "docid": "7224f33eb89f1a8f3a6e7ff94959085e", "score": "0.6728211", "text": "def check_game_win_status(self) -> bool:\n if self.flags_count == 0:\n for mine in self.mines_list:\n if mine.state == False and mine.flag == True:\n continue\n else:\n return True\n print(\"*\" * 30)\n print(\"Congratulations! You won!\")\n print(\"*\" * 30)\n return False\n else:\n return True", "title": "" }, { "docid": "5ff4e3545f0faf461e425a75ecfa6938", "score": "0.6721074", "text": "def _has_game_ended(self):\n for pit in self.up_pits:\n if pit.amount != 0:\n break\n else:\n return True\n for pit in self.down_pits:\n if pit.amount != 0:\n return False\n return True", "title": "" }, { "docid": "8a05533add7f7d0b58ab8acf0258112a", "score": "0.6695147", "text": "def check_caught(self):\n if self._coord == self._game.get_pacman_coord() and not self.__movestart:\n if not self.__frightened:\n self._game.set_pacman_caught()\n else:\n if not self.__movestart:\n self._game.set_ghost_caught()\n self._game.music_player.play_music(\"pacman-eatghost/pacman_eatghost.wav\")\n self.set_eaten(True, self._game.get_pacman().get_streak())\n # Make the ghost start moving to the center\n self.__movestart = True", "title": "" }, { "docid": "6c1f11743d2dbc8bf584cbda2239ef8a", "score": "0.66923153", "text": "def is_game_full(self):\n if self.get_player_count() < self.PLAYER_MAX:\n return False\n else:\n return True", "title": "" }, { "docid": "91ec66eb77e77f5532d5892ef0c0c153", "score": "0.66754645", "text": "def gameOver():\n\n print(\"You used all your guesses!\")\n print(\"Game Over!\\n\")", "title": "" }, { "docid": "c6addc651ce22d11e23a3809105bb593", "score": "0.66706234", "text": "def playing(player):\n is_over = Game.play(player)\n\n while not is_over:\n print 'It is a draw'\n Game.separator(2)\n\n is_over = Game.play(player)", "title": "" }, { "docid": "753567437142a817c8f346a9996201bd", "score": "0.66698205", "text": "def is_game_over(computer_score, human_score):\n if (computer_score >= 50 or human_score >= 50) and computer_score != human_score:\n # if the first player (the computer)'s score is equal to or exceeds 50, and the scores are not tied,\n # the function returns True and the game is over\n return True\n else:\n return False # otherwise return False and the game is not over", "title": "" }, { "docid": "2b00c8adb7735af1ec592950d721d3d6", "score": "0.6662558", "text": "def get_state_is_over(self, state):\n if len(self.get_state_valid_moves(state)) == 0:\n return True\n\n return self.get_state_winner(state) != 0", "title": "" } ]
0110e515d731456038d0a30ee57217d6
Creates a custom OpenAPI schema.
[ { "docid": "f77c75e533e37619f46cd9e7aec44b9a", "score": "0.68512565", "text": "def custom_openapi() -> t.Dict[str, t.Any]:\n if app.openapi_schema:\n return app.openapi_schema\n openapi_schema = get_openapi(\n title=\"Pixels API\",\n description=None,\n version=\"1.0.0\",\n routes=app.routes,\n )\n openapi_schema[\"components\"][\"securitySchemes\"] = {\n \"Bearer\": {\n \"type\": \"http\",\n \"scheme\": \"Bearer\"\n }\n }\n for route in app.routes:\n # Use getattr as not all routes have this attr\n if not getattr(route, \"include_in_schema\", False):\n continue\n # For each method the path provides insert the Bearer security type\n # So RapiDoc knows how to auth for that endpoint\n for method in route.methods:\n openapi_schema[\"paths\"][route.path][method.lower()][\"security\"] = [{\"Bearer\": []}]\n app.openapi_schema = openapi_schema\n return app.openapi_schema", "title": "" } ]
[ { "docid": "4da381989d0549e941efa772b8e5285b", "score": "0.7369258", "text": "def custom_openapi():\r\n if app.openapi_schema:\r\n return app.openapi_schema\r\n openapi_schema = get_openapi(\r\n title=handler.FUNCTION_NAME,\r\n version=f\"v{handler.FUNCTION_VERSION}\",\r\n routes=app.routes,\r\n )\r\n paths = openapi_schema[\"paths\"]\r\n upd_paths = {}\r\n # Supplement path specs\r\n for key in paths:\r\n path = paths[key]\r\n for method in path:\r\n path[method][\"tags\"] = [\"Function Definitions\"]\r\n # Modify path(s) to account for function being exposed\r\n # behind OpenFaas gateway\r\n rel_path = f\"/function/{handler.FUNCTION_NAME}\"\r\n if key.startswith(rel_path):\r\n upd_paths[key] = path\r\n else:\r\n rel_path = f\"{rel_path}{key}\"\r\n upd_paths[rel_path] = path\r\n openapi_schema[\"paths\"] = upd_paths\r\n app.openapi_schema = openapi_schema\r\n return app.openapi_schema", "title": "" }, { "docid": "34e2c9cb48a1694153e3e16b8ea66d90", "score": "0.7151901", "text": "def create_schema(self, schema):\n raise NotImplementedError()", "title": "" }, { "docid": "4abcc3cfd34813a5eddb68150640dbea", "score": "0.68804127", "text": "def with_definitions(self, schema):", "title": "" }, { "docid": "be6a3fb8cf58990252e015c28f62763b", "score": "0.67065066", "text": "def api_schema(request):\n # type: (Request) -> JSON\n swagger_base_spec = {\n \"host\": get_magpie_url(request),\n \"schemes\": [request.scheme]\n }\n return s.generate_api_schema(swagger_base_spec)", "title": "" }, { "docid": "49e336973705152d4adb325827ba4bdf", "score": "0.6689882", "text": "def swagger(result_schema):\n return result_schema", "title": "" }, { "docid": "24a2c86bf58e6782cad2bb0d781a7c8d", "score": "0.6632613", "text": "def _create_schema_for_validation(cls, context) -> typing.Union[PathAwareSchema, Schema]:\n raise NotImplementedError()", "title": "" }, { "docid": "bc1d6595a08425c8122e76144eaa4ad7", "score": "0.6567172", "text": "def my_schema():\n class LinksSchema(Schema):\n self = fields.Link(\n template=URITemplate(\"/records{?params*}\"),\n params=lambda o: {\"params\": {\n # type is expected to show in the query string as type=A&type=B\n \"type\": [\"A\", \"B\"],\n \"sort\": \"newest\",\n \"subtype\": [\"1\"],\n \"size\": 10,\n }}\n )\n publish = fields.Link(\n template=URITemplate(\"/admin{/pid}\"),\n params=lambda o: {\"pid\": o.get(\"pid\")},\n permission=\"admin\",\n )\n prev = fields.Link(\n template=URITemplate(\"/prev\"),\n params=lambda o: {},\n when=lambda o: o.get(\"allowed\", True)\n )\n\n class MySchema(Schema):\n links = fields.Links()\n\n factory = LinksFactory(\n host=\"localhost\",\n config={\"search\": LinksSchema}\n )\n\n return MySchema(\n context={\n \"links_factory\": factory,\n \"links_namespace\": \"search\"\n\n }\n )", "title": "" }, { "docid": "1d6d7ca0b4f39c0de1ad5941926e506d", "score": "0.6376013", "text": "def AnnotateSchema(self, the_api, schema):\n\n super(CppGenerator, self).AnnotateSchema(the_api, schema)\n # imports were already handled.\n\n self.AnnotateDocumentation(schema)", "title": "" }, { "docid": "fcd8ed540b25c266add346b1d88b0833", "score": "0.637442", "text": "def auto_fit_schema():\n return jsonify(autofit_model.schema)", "title": "" }, { "docid": "22cac8baa3ebd566436652b1222db348", "score": "0.62938315", "text": "def test_schema_definition(self):\n\n # ensure that the type hasn't already been registered\n self.assertNotIn(\"Stuff\", otio.core.type_version_map())\n\n @otio.core.register_type\n class FakeThing(otio.core.SerializableObject):\n _serializable_label = \"Stuff.1\"\n foo_two = otio.core.serializable_field(\"foo_2\", doc=\"test\")\n ft = FakeThing()\n\n self.assertEqual(ft.schema_name(), \"Stuff\")\n self.assertEqual(ft.schema_version(), 1)\n\n with self.assertRaises(otio.exceptions.UnsupportedSchemaError):\n otio.core.instance_from_schema(\n \"Stuff\",\n 2,\n {\"foo\": \"bar\"}\n )\n\n version_map = otio.core.type_version_map()\n self.assertEqual(version_map[\"Stuff\"], 1)\n\n ft = otio.core.instance_from_schema(\"Stuff\", 1, {\"foo\": \"bar\"})\n self.assertEqual(ft._dynamic_fields['foo'], \"bar\")", "title": "" }, { "docid": "27d145981f323a42c0a3e3caa9c251c5", "score": "0.6241876", "text": "def test_descriptor() -> None:\n\n class MySerializer(serializers.Serializer):\n id = serializers.IntegerField()\n my_attr = serializers.CharField()\n\n schema = auto_schema(\"mytype\", transformer=CamelCaseTransform)\n\n serializer = MySerializer()\n result = serializer.schema()\n assert isinstance(result, ResourceObject)\n assert result.type == \"mytype\"\n assert result.id == \"id\"\n assert result.attributes == [\"my_attr\"]\n assert result.relationships == []\n assert result.transformer == CamelCaseTransform", "title": "" }, { "docid": "f7e68af7535498f67da351126191e3c1", "score": "0.62275136", "text": "def main() -> None:\n\n with tempfile.NamedTemporaryFile(\"w\", encoding=\"utf-8\") as temp_fh:\n temp_fh.write(OPENAPI_SPEC.strip())\n temp_fh.seek(0)\n output = subprocess.check_output([\"swagger-marshmallow-codegen\", temp_fh.name])\n\n output_str = str(output, \"utf-8\")\n\n print()\n print(\"Generated marshmallow model:\")\n print()\n print(output_str)\n print()\n print(\"Please note validate=[ItemsRange(min=1 is probably incorrect\")\n print(\"It should be using Length validator:\")\n print(\n \"https://marshmallow.readthedocs.io/en/stable/marshmallow.validate.html?highlight=length#marshmallow.validate.Length\"\n )\n print()\n\n with open(\"generated_marshmallow_models.py\", \"w\", encoding=\"utf-8\") as path_fh:\n path_fh.write(output_str)\n\n print(\"Using the generated model with proper payload is now going to crash\")\n print(\"with TypeError: '<' not supported between instances of 'list' and 'int'\")\n print()\n print('Payload is {\"array\": [\"string1\", \"string2\"]}')\n print()\n\n from generated_marshmallow_models import ExampleSchema\n\n schema = ExampleSchema()\n schema.load({\"array\": [\"string1\", \"string2\"]})", "title": "" }, { "docid": "e92687a8fe54606646ec56bad02ffb6d", "score": "0.62242776", "text": "def __init__(self, data=None, schema_list: List[Union[dict, OrderedDict]] = None, schema_api=None, schema_path=None):\n if data:\n self.schema_list = self.gen_schema_from_data(data)\n self.schema_api = self.gen_schema_api(self.schema_list)\n elif schema_list:\n self.schema_list = schema_list\n self.schema_api = self.gen_schema_api(self.schema_list)\n elif schema_api:\n self.schema_api = schema_api\n self.schema_list = self.gen_schema_list(self.schema_api)\n elif schema_path:\n with open(schema_path, \"r\") as fp:\n self.schema_list = json.load(fp)\n self.schema_api = self.gen_schema_api(self.schema_list)\n else:\n self.schema_list = []\n self.schema_api = []", "title": "" }, { "docid": "7b980b8805e8b0895dbae25809e987f7", "score": "0.62179005", "text": "def createSchema():\n from fluiddb.schema import logs as patches\n\n return ZSchema(CREATE, DROP, DELETE, patches)", "title": "" }, { "docid": "06ba32bdd5adcc42152578f82862e4e4", "score": "0.62104094", "text": "def api_swagger(request): # noqa: F811\n swagger_versions_dir = \"{}\".format(os.path.abspath(os.path.join(MAGPIE_MODULE_DIR, \"ui/swagger/versions\")))\n swagger_ui_path = s.SwaggerGenerator.path.lstrip(\"/\")\n return_data = {\"api_title\": s.TitleAPI,\n \"api_schema_path\": swagger_ui_path,\n \"api_schema_versions_dir\": swagger_versions_dir}\n return return_data", "title": "" }, { "docid": "77a775ee9892631800befa3aa30ef0db", "score": "0.6193103", "text": "def _schema(self):\n return GraphQlSchemaFactory.create_from_modules([\n 'graphql.executor.test.star_wars',\n 'graphql.scalar_descriptors.strict'])", "title": "" }, { "docid": "f65887732ec91b48d2a98e3598ab3c75", "score": "0.61512166", "text": "def init_schema(self):\n init_schema_data = {\n \"title\": \"filename\",\n \"display\": \"file_owner\",\n\n \"datatypes\": {\n \"filename\" : {\n \"type\": \"string\",\n \"is_search\" : True,\n \"is_facet\": True\n },\n \"file_owner\": {\n \"type\": \"string\",\n \"is_search\": True,\n \"is_facet\": True\n },\n \"file_modified\":{\n \"type\": \"date\"\n },\n \"file_perms\": {\n \"type\": \"string\",\n \"is_facet\": False\n },\n \"file_size\": {\n \"type\": \"integer\",\n \"is_facet\": False\n }\n }\n }\n # mapping endpoint\n endpoint = \"{location}/_search/default/_mapping/{schema}\".format(location=self.base_url, schema=self.schema)\n req = requests.post(endpoint, auth=self.get_auth(), data=json.dumps(init_schema_data))\n if req.status_code != 200:\n raise requests.RequestException(\"Could not initialize schema {}\".format(req.reason()))", "title": "" }, { "docid": "a4017b71bb4204d04704e6f929fad583", "score": "0.61390257", "text": "def create_schemas(ctx): \n query=ctx.obj['queries'].get('create_schemas')\n conn = ctx.obj['conn']\n with conn.cursor() as cur:\n cur.execute(query)", "title": "" }, { "docid": "495bd1c289b84ed91cc6d32461137af4", "score": "0.6114634", "text": "def do_schema(self):\n\n name = '{}.rest.schema'.format(BACKEND_PACKAGE)\n module = Meta.get_module_from_string(\n name, exit_if_not_found=True, exit_on_fail=True\n )\n schema_class = getattr(module, 'RecoverSchema')\n\n self._schema_endpoint = EndpointElements(\n cls=schema_class,\n exists=True,\n custom={\n 'methods': {\n 'get': ExtraAttributes(auth=None),\n # WHY DOES POST REQUEST AUTHENTICATION\n # 'post': ExtraAttributes(auth=None)\n }\n },\n methods={},\n )\n\n # TODO: find a way to map authentication\n # as in the original endpoint for the schema 'get' method\n\n # TODO: find a way to publish on swagger the schema\n # if endpoint is enabled to publish and the developer asks for it", "title": "" }, { "docid": "c4d8bd238daca9f1f0217e95b5b6a113", "score": "0.61055595", "text": "def __init__(self,\n stats: types.Channel = None,\n infer_feature_shape: bool = True,\n output: Optional[types.Channel] = None,\n statistics: Optional[types.Channel] = None,\n name: Optional[Text] = None):\n stats = stats or statistics\n output = output or types.Channel(\n type=standard_artifacts.Schema, artifacts=[standard_artifacts.Schema()])\n spec = SchemaGenSpec(\n stats=stats, infer_feature_shape=infer_feature_shape, output=output)\n super(SchemaGen, self).__init__(spec=spec, name=name)", "title": "" }, { "docid": "da76aa18b4cbfe863e761cdbffb5873c", "score": "0.6104634", "text": "def _create_spec(self):\n spec = APISpec(\n title=title,\n version=version,\n info=dict(description=desc),\n plugins=[\"apispec.ext.marshmallow\", \"api.v1.apispec_restful\"],\n )\n return spec", "title": "" }, { "docid": "871fa2984facd0ef0a552ab2e0fa72a9", "score": "0.60853124", "text": "def get_schema(cls) -> JsonObjectSchema:", "title": "" }, { "docid": "a0249e129978722679fac78ff7387b12", "score": "0.6079451", "text": "def get_schema(cls) -> JsonObjectSchema:\n return JsonObjectSchema(\n properties=dict(\n callable_ref=JsonStringSchema(min_length=1),\n callable_params=JsonObjectSchema(additional_properties=True),\n inline_code=JsonStringSchema(min_length=1),\n file_set=FileSet.get_schema(),\n install_required=JsonBooleanSchema(),\n ),\n additional_properties=False,\n factory=cls,\n )", "title": "" }, { "docid": "5b73a55d61e1546383be15139f76b445", "score": "0.6066744", "text": "def build_schema(self):\n data = {\n 'fields': {},\n 'default_format': self._meta.default_format,\n 'allowed_list_http_methods': self._meta.list_allowed_methods,\n 'allowed_detail_http_methods': self._meta.detail_allowed_methods,\n 'default_limit': self._meta.limit,\n }\n\n if self._meta.ordering:\n data['ordering'] = self._meta.ordering\n\n if self._meta.filtering:\n data['filtering'] = self._meta.filtering\n\n for field_name, field_object in self.fields.items():\n data['fields'][field_name] = {\n 'default': field_object.default,\n 'type': field_object.dehydrated_type,\n 'nullable': field_object.null,\n 'blank': field_object.blank,\n 'readonly': field_object.readonly,\n 'help_text': field_object.help_text,\n 'unique': field_object.unique,\n }\n\n return data", "title": "" }, { "docid": "14881672c758a88d1687063809876553", "score": "0.60444385", "text": "def schema(self):\n # TODO fake\n raise NotImplementedError(\"TODO\")", "title": "" }, { "docid": "1b65dc16fafe41eff5f26efb690b5ea8", "score": "0.60418785", "text": "def _schema(self):\n raise NotImplementedError", "title": "" }, { "docid": "9805bdb523ad9163c6a12f8bc13b0d43", "score": "0.6033125", "text": "def fetch_schema(self) -> None:\n url = self.schema_url or urljoin(self.base_url, \"schema/openapi.yaml\")\n logger.info(\"Fetching schema at '%s'\", url)\n self._schema = schema_fetcher.fetch(url, {\"v\": \"3\"})", "title": "" }, { "docid": "3a1317b62a8699e96a0eb24cd7115a21", "score": "0.603213", "text": "def create_schemas(api: Namespace) -> List[Any]:\n return [\n # Value objects\n api.schema_model('BaseError', BaseError.schema()),\n api.schema_model('BaseUrlError', BaseUrlError.schema()),\n api.schema_model('BirdNameWithScore', BirdNameWithScore.schema()),\n api.schema_model('ImageDownloadError', ImageDownloadError.schema()),\n api.schema_model('ImageFormatError', ImageFormatError.schema()),\n api.schema_model('ModelInferenceError', ModelInferenceError.schema()),\n ]", "title": "" }, { "docid": "de50813315973745a0a7a7cbd0aae9a5", "score": "0.6027857", "text": "def json_schema(self):", "title": "" }, { "docid": "258ce15196d5c9bdacd36e2a27efc975", "score": "0.60219085", "text": "def _build_schemas():\n for i in model_list:\n setattr(\n sys.modules[__name__], # gets the reference to the current module\n i + 'Schema',\n _schema_factory(i)\n )", "title": "" }, { "docid": "335be622aba786089f326b5fb29bd451", "score": "0.60196704", "text": "def build_schema(self):\r\n data = {\r\n 'fields': {},\r\n 'default_format': self._meta.default_format,\r\n 'allowed_list_http_methods': self._meta.list_allowed_methods,\r\n 'allowed_detail_http_methods': self._meta.detail_allowed_methods,\r\n 'default_limit': self._meta.limit,\r\n }\r\n\r\n if self._meta.ordering:\r\n data['ordering'] = self._meta.ordering\r\n\r\n if self._meta.filtering:\r\n data['filtering'] = self._meta.filtering\r\n\r\n for field_name, field_object in self.fields.items():\r\n data['fields'][field_name] = {\r\n 'default': field_object.default,\r\n 'type': field_object.dehydrated_type,\r\n 'nullable': field_object.null,\r\n 'blank': field_object.blank,\r\n 'readonly': field_object.readonly,\r\n 'help_text': field_object.help_text,\r\n 'unique': field_object.unique,\r\n }\r\n if field_object.dehydrated_type == 'related':\r\n if getattr(field_object, 'is_m2m', False):\r\n related_type = 'to_many'\r\n else:\r\n related_type = 'to_one'\r\n data['fields'][field_name]['related_type'] = related_type\r\n\r\n return data", "title": "" }, { "docid": "46c6c6cf9e3e7aac27adf62da0d6867a", "score": "0.5991787", "text": "def generate_schema(schema_json, use_logical_types=False, custom_imports=None, avro_json_converter=None):\n\n if avro_json_converter is None:\n avro_json_converter = 'avrojson.AvroJsonConverter'\n\n if '(' not in avro_json_converter:\n avro_json_converter += f'(use_logical_types={use_logical_types}, schema_types=__SCHEMA_TYPES)'\n\n custom_imports = custom_imports or []\n names = schema.Names()\n make_avsc_object(json.loads(schema_json), names)\n\n names = [k for k in six.iteritems(names.names) if isinstance(k[1], (schema.RecordSchema, schema.EnumSchema))]\n names = sorted(names, key=lambda x: x[0])\n\n main_out = StringIO()\n writer = TabbedWriter(main_out)\n\n write_preamble(writer, use_logical_types, custom_imports)\n write_schema_preamble(writer)\n write_get_schema(writer)\n write_populate_schemas(writer)\n\n current_namespace = tuple()\n\n for name, field_schema in names: # type: str, schema.Schema\n name = clean_fullname(name)\n namespace = tuple(name.split('.')[:-1])\n if namespace != current_namespace:\n current_namespace = namespace\n if isinstance(field_schema, schema.RecordSchema):\n logger.debug(f'Writing schema: {clean_fullname(field_schema.fullname)}')\n write_schema_record(field_schema, writer, use_logical_types)\n elif isinstance(field_schema, schema.EnumSchema):\n logger.debug(f'Writing enum: {field_schema.fullname}', field_schema.fullname)\n write_enum(field_schema, writer)\n writer.set_tab(0)\n writer.write('\\n__SCHEMA_TYPES = {')\n writer.tab()\n\n # Lookup table for fullname.\n for name, field_schema in names:\n n = clean_fullname(field_schema.name)\n full = field_schema.fullname\n writer.write(f\"\\n'{full}': {n}Class,\")\n\n # Lookup table for names without namespace.\n for name, field_schema in names:\n n = clean_fullname(field_schema.name)\n writer.write(f\"\\n'{n}': {n}Class,\")\n\n writer.untab()\n writer.write('\\n}\\n\\n')\n\n writer.write(f'_json_converter = {avro_json_converter}\\n\\n')\n\n value = main_out.getvalue()\n main_out.close()\n return value, [clean_fullname(name[0]) for name in names]", "title": "" }, { "docid": "2780f66d389df9b4c13da9758682721f", "score": "0.59861654", "text": "def generate_schema_create_all(self, engine):", "title": "" }, { "docid": "a641406a7ed5ff80d92f765f3971c6bb", "score": "0.5980291", "text": "def genSchema(self, inc, cpp):\n \n hschema = self\n className = '{0}_v{1}'.format(self.pstype.name, self.schema.version)\n \n _log.debug(\"_genAbsType: type=%s\", repr(type))\n\n cpp_code = []\n\n # declarations for public methods \n methods = []\n for t in _types(self.pstype):\n for meth in t.methods(): \n if meth.access == 'public': \n decl, impl = self._genMethod(meth)\n methods += decl\n cpp_code += impl\n # generate _shape() methods for array attributes\n for attr in t.attributes() :\n decl, impl = self._genAttrShapeDecl(attr)\n methods += decl\n cpp_code += impl\n\n for ds in self.datasets:\n cpp_code += ds.ds_read_impl()\n\n # explicitely instantiate class with known config types\n for config in self.pstype.xtcConfig:\n cfgClassName = config.fullName('C++', self.psana_ns)\n cpp_code += [T(\"template class $className<$cfgClassName>;\")(locals())]\n\n # may also provide a constructor which takes dataset data\n if len(self.datasets) == 1:\n ds = self.datasets[0]\n decltype = ds.ds_decltype()\n dsName = ds.ds.name\n dsCtorWithArg = T('(const ${decltype}& ds) : m_ds_${dsName}(ds) {}')(locals())\n\n print(_TEMPL('abstract_type_declaration').render(locals()), file=inc)\n for line in cpp_code:\n print(line, file=cpp)\n\n print(_TEMPL('schema_store_impl').render(locals()), file=cpp)", "title": "" }, { "docid": "681b30ad38fd65658f3a1fedada7b0da", "score": "0.59798354", "text": "def input_schema(self) -> vol.Schema:\n raise NotImplementedError", "title": "" }, { "docid": "8ff1ab574b415fcac76d321d9863fa4d", "score": "0.59660643", "text": "def create_schema(self, name):\n self._execute_query('CREATE SCHEMA {}'.format(name))", "title": "" }, { "docid": "920e8aa8a0b9dbc784196b448be36c98", "score": "0.59649503", "text": "def dump_swagger():\n # TODO replace this with the URL of a real tng-project service\n app.config.update(SERVER_NAME=\"tng-project.5gtango.eu\")\n with app.app_context():\n with open(os.path.join(\"docs\", \"rest_api.json\"), \"w\") as f:\n f.write(json.dumps(api.__schema__))", "title": "" }, { "docid": "eda570052b3f0af58b3f4421690b7736", "score": "0.5963655", "text": "def _register_swagger(self):\n raise NotImplementedError", "title": "" }, { "docid": "e46c9ed7720c50a7ed3dea86dce248d2", "score": "0.5960012", "text": "def make_schema(backend_type: BackendType) -> Dict:\n\n schema = dict()\n is_backend_terraform = backend_type == BackendType.terraform\n\n # Backend settings\n schema[\"backend\"] = {\n \"required\": True,\n \"type\": \"dict\",\n \"schema\": {\n \"type\": {\"required\": True, \"type\": \"string\", \"allowed\": [backend_type.value]},\n \"environment\": {\"required\": True, \"type\": \"string\", \"allowed\": [\"develop\", \"staging\", \"production\"]},\n },\n }\n\n # Terraform settings\n schema[\"terraform\"] = {\n \"required\": is_backend_terraform,\n \"type\": \"dict\",\n \"schema\": {\"organization\": {\"required\": True, \"type\": \"string\", \"check_with\": customise_pointer}},\n }\n\n # Google Cloud settings\n schema[\"google_cloud\"] = {\n \"required\": is_backend_terraform,\n \"type\": \"dict\",\n \"schema\": {\n \"project_id\": {\"required\": is_backend_terraform, \"type\": \"string\", \"check_with\": customise_pointer},\n \"credentials\": {\n \"required\": is_backend_terraform,\n \"type\": \"string\",\n \"check_with\": customise_pointer,\n \"google_application_credentials\": True,\n },\n \"region\": {\n \"required\": is_backend_terraform,\n \"type\": \"string\",\n \"regex\": r\"^\\w+\\-\\w+\\d+$\",\n \"check_with\": customise_pointer,\n },\n \"zone\": {\n \"required\": is_backend_terraform,\n \"type\": \"string\",\n \"regex\": r\"^\\w+\\-\\w+\\d+\\-[a-z]{1}$\",\n \"check_with\": customise_pointer,\n },\n \"data_location\": {\"required\": is_backend_terraform, \"type\": \"string\", \"check_with\": customise_pointer},\n },\n }\n\n # Observatory settings\n package_types = [\"editable\", \"sdist\", \"pypi\"]\n schema[\"observatory\"] = {\n \"required\": True,\n \"type\": \"dict\",\n \"schema\": {\n \"package\": {\"required\": True, \"type\": \"string\"},\n \"package_type\": {\"required\": True, \"type\": \"string\", \"allowed\": package_types},\n \"airflow_fernet_key\": {\"required\": True, \"type\": \"string\", \"check_with\": check_schema_field_fernet_key},\n \"airflow_secret_key\": {\"required\": True, \"type\": \"string\", \"check_with\": check_schema_field_secret_key},\n \"airflow_ui_user_password\": {\"required\": is_backend_terraform, \"type\": \"string\"},\n \"airflow_ui_user_email\": {\"required\": is_backend_terraform, \"type\": \"string\"},\n \"observatory_home\": {\"required\": False, \"type\": \"string\"},\n \"postgres_password\": {\"required\": is_backend_terraform, \"type\": \"string\"},\n \"redis_port\": {\"required\": False, \"type\": \"integer\"},\n \"flower_ui_port\": {\"required\": False, \"type\": \"integer\"},\n \"airflow_ui_port\": {\"required\": False, \"type\": \"integer\"},\n \"docker_network_name\": {\"required\": False, \"type\": \"string\"},\n \"docker_network_is_external\": {\"required\": False, \"type\": \"boolean\"},\n \"docker_compose_project_name\": {\"required\": False, \"type\": \"string\"},\n \"api_package\": {\"required\": False, \"type\": \"string\"},\n \"api_package_type\": {\"required\": False, \"type\": \"string\", \"allowed\": package_types},\n \"api_port\": {\"required\": False, \"type\": \"integer\"},\n },\n }\n\n # Database settings\n if is_backend_terraform:\n schema[\"cloud_sql_database\"] = {\n \"required\": True,\n \"type\": \"dict\",\n \"schema\": {\n \"tier\": {\"required\": True, \"type\": \"string\"},\n \"backup_start_time\": {\"required\": True, \"type\": \"string\", \"regex\": r\"^\\d{2}:\\d{2}$\"},\n },\n }\n\n # VM schema\n vm_schema = {\n \"required\": True,\n \"type\": \"dict\",\n \"schema\": {\n \"machine_type\": {\n \"required\": True,\n \"type\": \"string\",\n },\n \"disk_size\": {\"required\": True, \"type\": \"integer\", \"min\": 1},\n \"disk_type\": {\"required\": True, \"type\": \"string\", \"allowed\": [\"pd-standard\", \"pd-ssd\"]},\n \"create\": {\"required\": True, \"type\": \"boolean\"},\n },\n }\n\n # Airflow main and worker VM\n if is_backend_terraform:\n schema[\"airflow_main_vm\"] = vm_schema\n schema[\"airflow_worker_vm\"] = vm_schema\n\n # Workflow configuration\n cloud_workspace_schema = {\n \"project_id\": {\"required\": True, \"type\": \"string\"},\n \"download_bucket\": {\"required\": True, \"type\": \"string\"},\n \"transform_bucket\": {\"required\": True, \"type\": \"string\"},\n \"data_location\": {\"required\": True, \"type\": \"string\"},\n \"output_project_id\": {\"required\": False, \"type\": \"string\"},\n }\n\n schema[\"cloud_workspaces\"] = {\n \"required\": False,\n \"type\": \"list\",\n \"schema\": {\n \"type\": \"dict\",\n \"schema\": {\"workspace\": {\"required\": True, \"type\": \"dict\", \"schema\": cloud_workspace_schema}},\n },\n }\n\n schema[\"workflows\"] = {\n \"required\": False,\n \"dependencies\": \"cloud_workspaces\", # cloud_workspaces must be specified when workflows are defined\n \"type\": \"list\",\n \"schema\": {\n \"type\": \"dict\",\n \"schema\": {\n \"dag_id\": {\"required\": True, \"type\": \"string\"},\n \"name\": {\"required\": True, \"type\": \"string\"},\n \"class_name\": {\"required\": True, \"type\": \"string\"},\n \"cloud_workspace\": {\"required\": False, \"type\": \"dict\", \"schema\": cloud_workspace_schema},\n \"kwargs\": {\"required\": False, \"type\": \"dict\"},\n },\n },\n }\n\n schema[\"workflows_projects\"] = {\n \"required\": False,\n \"type\": \"list\",\n \"schema\": {\n \"type\": \"dict\",\n \"schema\": {\n \"package_name\": {\n \"required\": True,\n \"type\": \"string\",\n },\n \"package\": {\"required\": True, \"type\": \"string\"},\n \"package_type\": {\"required\": True, \"type\": \"string\", \"allowed\": package_types},\n \"dags_module\": {\n \"required\": True,\n \"type\": \"string\",\n },\n },\n },\n }\n\n return schema", "title": "" }, { "docid": "7d1ac7bcae6e04d0112aabbf22f4c3d5", "score": "0.5956505", "text": "def _create_schema_field(param: ServiceParameter):\n keywords = []\n imports = []\n methods = []\n code_name = snakeit(param.name)\n if code_name != param.name:\n keywords.append(\n ast.keyword(arg=\"data_key\", value=ast.Str(s=param.name, kind=None))\n )\n if not param.required:\n keywords.append(\n ast.keyword(arg=\"required\", value=ast.Constant(value=False, kind=None))\n )\n\n if param.name.startswith(\"/\"):\n placeholder = param.extra_data[\"(placeholderParam)\"]\n code_name = snakeit(placeholder[\"paramName\"])\n imports.append((\"marshmallow\", \"fields\"))\n imports.append((\"marshmallow\",))\n serialize_func = ast.Call(\n func=ast.Attribute(value=ast.Name(id=\"fields\"), attr=\"Dict\"),\n args=[],\n keywords=[],\n )\n\n # TODO: can break if there is a suffix\n key_name = placeholder[\"template\"].replace(\n \"<%s>\" % placeholder[\"placeholder\"], \"\"\n )\n code = ast.parse(\n textwrap.dedent(\n \"\"\"\n @marshmallow.post_dump\n def _%(target_name)s_post_dump(self, data, **kwrags):\n values = data.pop('%(target_name)s')\n if not values:\n return data\n for key, val in values.items():\n data[f\"%(key_name)s{key}\"] = val\n return data\n\n @marshmallow.pre_load\n def _%(target_name)s_post_load(self, data, **kwrags):\n items = {}\n for key in list(data.keys()):\n if key.startswith(\"%(key_name)s\"):\n items[key[%(key_len)d:]] = data[key]\n del data[key]\n data[\"%(target_name)s\"] = items\n return data\n \"\"\"\n % {\n \"target_name\": code_name,\n \"key_name\": key_name,\n \"key_len\": len(key_name),\n }\n )\n )\n methods.extend(code.body)\n\n elif param.type == \"string\":\n imports.append((\"commercetools.helpers\", \"OptionalList\"))\n imports.append((\"marshmallow\", \"fields\"))\n serialize_func = ast.Call(\n func=ast.Name(id=\"OptionalList\"),\n args=[\n ast.Call(\n func=ast.Attribute(value=ast.Name(id=\"fields\"), attr=\"String\"),\n args=[],\n keywords=[],\n )\n ],\n keywords=keywords,\n )\n elif param.type == \"number\":\n imports.append((\"marshmallow\", \"fields\"))\n serialize_func = ast.Call(\n func=ast.Attribute(value=ast.Name(id=\"fields\"), attr=\"Int\"),\n args=[],\n keywords=keywords,\n )\n\n elif param.type == \"boolean\":\n keywords.append(\n ast.keyword(arg=\"missing\", value=ast.Constant(value=False, kind=None))\n )\n imports.append((\"marshmallow\", \"fields\"))\n serialize_func = ast.Call(\n func=ast.Attribute(value=ast.Name(id=\"fields\"), attr=\"Bool\"),\n args=[],\n keywords=keywords,\n )\n elif param.type == \"file\":\n return None, []\n else:\n raise NotImplementedError(param)\n\n node = ast.Assign(targets=[ast.Name(id=code_name)], value=serialize_func, simple=1)\n return node, methods, imports", "title": "" }, { "docid": "8bbffc18327aec459e0d48b83fede227", "score": "0.59521544", "text": "def build_openapi(\n method: str,\n path: str,\n resp_code: str,\n parameters: List[Tuple[str, str]] = None,\n request: str = None,\n response: str = None,\n media_type: str = \"application/json\",\n example: bool = True,\n reference: bool = False,\n title: str = \"Generated by InducOapi\",\n version: str = \"v1\",\n) -> Dict:\n oapi = {\n \"openapi\": \"3.0.0\",\n \"info\": {\n \"title\": title,\n \"version\": version,\n },\n \"paths\": {\n path: {\n method.lower(): {\n \"requestBody\": None,\n \"responses\": {\n resp_code: {\n \"description\": \"\",\n }\n },\n }\n }\n },\n }\n\n if parameters:\n param_list = []\n for name, location in parameters:\n required = True if location == \"path\" else False\n param_list.append(\n {\n \"name\": name,\n \"in\": location,\n \"required\": required,\n \"description\": \"\",\n \"schema\": {},\n }\n )\n oapi[\"paths\"][path][method.lower()][\"parameters\"] = param_list\n\n if request:\n try:\n data = _load_json_yaml(request)\n except ValueError as e:\n raise ValueError(f\"Cannot load request data: {e}\")\n # noinspection PyTypeChecker,PyUnresolvedReferences\n oapi[\"paths\"][path][method.lower()][\"requestBody\"] = {\n \"content\": {media_type: {\"schema\": _gen_schema(data, example)}}\n }\n else:\n # noinspection PyTypeChecker,PyUnresolvedReferences\n del oapi[\"paths\"][path][method.lower()][\"requestBody\"]\n\n if response:\n try:\n data = _load_json_yaml(response)\n except ValueError as e:\n raise ValueError(f\"Cannot load response data: {e}\")\n if reference:\n schema_name = path.split(\"/\")[-1].capitalize()\n # noinspection PyTypeChecker,PyUnresolvedReferences\n oapi[\"paths\"][path][method.lower()][\"responses\"][resp_code][\n \"content\"\n ] = {\n media_type: {\n \"schema\": {\"$ref\": f\"#/components/schemas/{schema_name}\"}\n }\n }\n # noinspection PyUnresolvedReferences\n oapi[\"components\"] = {\n \"schemas\": {schema_name: _gen_schema(data, example)}\n }\n else:\n # noinspection PyTypeChecker,PyUnresolvedReferences\n oapi[\"paths\"][path][method.lower()][\"responses\"][resp_code][\n \"content\"\n ] = {media_type: {\"schema\": _gen_schema(data, example)}}\n\n validate_spec(copy.deepcopy(oapi))\n\n return oapi", "title": "" }, { "docid": "c80de12c3c6e8e9f0144f65463bfe5d9", "score": "0.5937456", "text": "def create_schema(engine: Engine):\n metadata.create_all(bind=engine)", "title": "" }, { "docid": "d33e5159d84559a6915141c4846a1a04", "score": "0.5876389", "text": "def create_schema(self, schema):\n qry = create_schema_query_generator.generate_query(schema)\n with self.__conn.cursor() as cursor:\n cursor.execute(qry)", "title": "" }, { "docid": "151707432aaa141a368d76ce480420ef", "score": "0.5873091", "text": "def serve_schema():\n docs_root = os.path.join(self.static_dir, 'docs')\n return serve_static(docs_root + '/' + 'openapi.json')", "title": "" }, { "docid": "9e1f039828faee2b920848b71c8a1dae", "score": "0.586057", "text": "def to_schema(cls):\n required = []\n props = {\n \"id\": {\n \"type\": \"string\",\n }\n }\n for k in cls._properties.keys():\n prop = cls._properties[k]\n if prop.required:\n required.append(prop._name)\n if prop._name:\n props[prop._name] = prop._get_schema()\n required.sort()\n schema = {\n \"type\": \"object\",\n \"required\": required,\n \"properties\": props,\n }\n return schema", "title": "" }, { "docid": "2040272954c88ff93f576c9d33ac6ce6", "score": "0.5841351", "text": "def schema(self, name, component=..., **kwargs):\n ...", "title": "" }, { "docid": "d227988344600e19d6b187d6c6f04f11", "score": "0.5841303", "text": "def publish_schema(DevelopmentSchemaArn=None, Version=None, MinorVersion=None, Name=None):\n pass", "title": "" }, { "docid": "69c1d960e9c9d592e9b9a38ae8f8df73", "score": "0.58314115", "text": "def user_jsonschema_with_file():\n return make_user_schema(\n middle_name={\"anyOf\": [{\"type\": \"string\"}, {\"type\": \"null\"}]}, scan={\"type\": \"string\", \"format\": \"binary\"}\n )", "title": "" }, { "docid": "c775d70706af0c3af1de196bfe0c84f2", "score": "0.58215266", "text": "def __init__(self, name=None, input_schema=None, output_schema=None, version=None, links=None, metadata=None): # noqa: E501\n self.openapi_types = {\n 'name': str,\n 'input_schema': List[Feature],\n 'output_schema': Dict[str, object],\n 'version': str,\n 'links': List[Link],\n 'metadata': Dict[str, object]\n }\n\n self.attribute_map = {\n 'name': 'name',\n 'input_schema': 'input_schema',\n 'output_schema': 'output_schema',\n 'version': 'version',\n 'links': 'links',\n 'metadata': 'metadata'\n }\n\n self._name = name\n self._input_schema = input_schema\n self._output_schema = output_schema\n self._version = version\n self._links = links\n self._metadata = metadata", "title": "" }, { "docid": "4266e0da4e932133c083062ec85df467", "score": "0.5816349", "text": "def _schema(self):\n\n raise NotImplementedError('Abstract method')", "title": "" }, { "docid": "db7659a3bbbd4046403f77f2af87b2a7", "score": "0.58121127", "text": "def add_schema(self, schema_name, schema):\n self.specs.definition(schema_name, schema=schema)", "title": "" }, { "docid": "384eb6b13289f684e559b1307ede80f6", "score": "0.5810266", "text": "def test_command_detects_schema_generation_mode(self):\n command = generateschema.Command()\n assert command.get_mode() == generateschema.OPENAPI_MODE\n with override_settings(REST_FRAMEWORK={'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.AutoSchema'}):\n assert command.get_mode() == generateschema.COREAPI_MODE", "title": "" }, { "docid": "8315c83f063463fc194dd8b3d1c1a00a", "score": "0.58079195", "text": "def swagger_spec():\n spec = swagger(app)\n spec['info']['title'] = \"Nervana cloud challenge API\"\n spec['info']['description'] = (\"Nervana's cloud challenge \" +\n \"for interns and full-time hires\")\n spec['info']['license'] = {\n \"name\": \"Nervana Proprietary License\",\n \"url\": \"http://www.nervanasys.com\",\n }\n spec['info']['contact'] = {\n \"name\": \"Nervana Systems\",\n \"url\": \"http://www.nervanasys.com\",\n \"email\": \"[email protected]\",\n }\n spec['schemes'] = ['http']\n spec['tags'] = [\n {\"name\": \"db\", \"description\": \"database actions (create, delete)\"},\n {\"name\": \"commands\", \"description\": \"process and retrieve commands\"}\n ]\n return jsonify(spec)", "title": "" }, { "docid": "691df0345a740aa2b209777adfaa6c0d", "score": "0.5757698", "text": "def __init__(__self__,\n resource_name: str,\n args: SchemaArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "title": "" }, { "docid": "84abfac9c5928d44595a00c5360c3a7f", "score": "0.5744222", "text": "def write_populate_schemas(writer):\n writer.write('\\n__SCHEMAS = dict((n.fullname.lstrip(\".\"), n) for n in six.itervalues(__NAMES.names))\\n')", "title": "" }, { "docid": "6d758db550aed55926874164c3f087bf", "score": "0.5694023", "text": "def create_schema(self, name):\n sql = sql_create_schema(name)\n self.cursor.execute(sql)\n return self.add_schema(name)", "title": "" }, { "docid": "ac896458990017dffa918b279f7fb1a3", "score": "0.5681436", "text": "def compiled_api_schema_partial(self):\n if self.table_name == 'user':\n return {'api.schema': ''}\n view_import = sql.statements.view_import.substitute(table_name=self.table_name,\n table_name_lowercased=self.table_name,\n view_name=self.view_name)\n api_rpc_import = sql.statements.api_rpc_import.substitute(view_name=self.view_name)\n return {'api.schema': sql.statements.api_schema_import.substitute(view_import=view_import,\n api_rpc_import=api_rpc_import)}", "title": "" }, { "docid": "903bc9b40107c91d05d242405e006485", "score": "0.5677457", "text": "def schema(cls):\n\n with open(cls.schema_json, 'r') as fschema:\n return json.load(fschema)", "title": "" }, { "docid": "c5669bc2e781c31d4d150e6a1fca6947", "score": "0.5674583", "text": "def schema(self):\r\n schema = self.get_base_model().schema.get(**self.api_auth).get('fields', None)\r\n return schema", "title": "" }, { "docid": "fe9c594c6e8895f3024318544642763a", "score": "0.56686336", "text": "def test_generate_json_schema(self):\n self.assertIsInstance(WorkBook.generate_json_schema(), dict)", "title": "" }, { "docid": "036b282b34994896c283d66fde5b1209", "score": "0.5642566", "text": "def model_factory(\n *, name: str, base: typing.Type, schemas: types.Schemas\n) -> typing.Type:\n # Input validation\n # Checking that name is in schemas\n if name not in schemas:\n raise exceptions.SchemaNotFoundError(f\"{name} not found in schemas\")\n schema: types.Schema = schemas.get(name, {})\n # De-referencing schema\n schema = helpers.prepare_schema(schema=schema, schemas=schemas)\n # Checking for tablename key\n if \"x-tablename\" not in schema:\n raise exceptions.MalformedSchemaError(\n f'\"x-tablename\" is a required schema property for {name}.'\n )\n # Checking for object type\n if schema.get(\"type\") != \"object\":\n raise exceptions.FeatureNotImplementedError(\n f\"{schema.get('type')} is not supported in {name}.\"\n )\n if not schema.get(\"properties\"):\n raise exceptions.MalformedSchemaError(\n f\"At least 1 property is required for {name}.\"\n )\n\n # Calculating the class variables for the model\n model_class_vars = []\n required_exists = \"required\" in schema\n required_array = schema.get(\"required\", [])\n required_set = set(required_array)\n # Initializing the schema to record for the model\n model_schema: types.Schema = {\"type\": \"object\", \"properties\": {}}\n if required_exists:\n model_schema[\"required\"] = required_array\n if \"x-backrefs\" in schema:\n model_schema[\"x-backrefs\"] = helpers.ext_prop.get(\n source=schema, name=\"x-backrefs\"\n )\n description = helpers.peek.description(schema=schema, schemas={})\n if description is not None:\n model_schema[\"description\"] = description\n for prop_name, prop_spec in schema.get(\"properties\", []).items():\n prop_class_vars, prop_final_spec = column_factory.column_factory(\n spec=prop_spec,\n schemas=schemas,\n logical_name=prop_name,\n required=prop_name in required_set if required_exists else None,\n model_schema=schema,\n model_name=name,\n )\n model_class_vars.append(prop_class_vars)\n dict_ignore = helpers.ext_prop.get(\n source=prop_final_spec, name=\"x-dict-ignore\", default=False, pop=True\n )\n if not dict_ignore:\n model_schema[\"properties\"][prop_name] = prop_final_spec\n\n # Assembling model\n return type(\n name,\n (base, utility_base.UtilityBase),\n {\n \"__tablename__\": helpers.ext_prop.get(source=schema, name=\"x-tablename\"),\n \"_schema\": model_schema,\n **dict(itertools.chain.from_iterable(model_class_vars)),\n \"__table_args__\": table_args.construct(schema=schema),\n **_get_kwargs(schema=schema),\n },\n )", "title": "" }, { "docid": "7f803521b4193954d918ce461cfa7924", "score": "0.5642004", "text": "def get_schema(self):\n pass", "title": "" }, { "docid": "b13fb92bb5817ef49ee2b12d08e99011", "score": "0.5640245", "text": "def generate_entity_schema(entity, context, declaring_type, meta):\n properties = get_properties(entity)\n type_weights = murano_type.weigh_type_hierarchy(declaring_type)\n schema = {\n '$schema': 'http://json-schema.org/draft-04/schema#',\n 'type': 'object',\n 'properties': {\n name: generate_property_schema(prop, context, type_weights)\n for name, prop in properties.items()\n },\n 'additionalProperties': False,\n 'formSections': generate_sections(meta, type_weights)\n }\n schema.update(generate_ui_hints(entity, context, type_weights))\n return schema", "title": "" }, { "docid": "7882aac62ae650c7d90252e223237fe0", "score": "0.5636498", "text": "def to_openapi_dict(self):", "title": "" }, { "docid": "9d3f73c1f5e9a02940d2aad8f48fef2e", "score": "0.5632853", "text": "def test_schema_creation( self ):\n\n class MyClass( Serializable ):\n some_field = Field()\n another_field = Field()\n\n schema = MyClass._schema\n assert issubclass( schema, marshmallow.Schema )\n assert len( schema._declared_fields ) == 2\n assert 'some_field' in schema._declared_fields\n assert isinstance( schema._declared_fields['some_field'], marshmallow.fields.Field )\n assert 'another_field' in schema._declared_fields\n assert isinstance( schema._declared_fields['another_field'], marshmallow.fields.Field )", "title": "" }, { "docid": "a492b306352c5b0b7cf8b421322b3122", "score": "0.5631139", "text": "def testLazySchemaForCreation(self):\n api = self.ApiFromDiscoveryDoc(self._TEST_DISCOVERY_DOC)\n for schema in ['Activity', 'Comment', 'Activity.object']:\n self.assertTrue(isinstance(api._schemas[schema], Schema))", "title": "" }, { "docid": "4694d1670a4ff9e1a6e7621ebac50375", "score": "0.56300807", "text": "def test_openapi(client):\n url = reverse('openapi-schema')\n headers = {'Accept': 'application/vnd.oai.openapi'}\n response = client.get(url, headers=headers)\n assert response.status_code == 200\n assert 'application/vnd.oai.openapi' in response['content-type'], \"Response should be OpenAPI spec\"", "title": "" }, { "docid": "453ea9d11d1e1a73df03a4bbdd1459c3", "score": "0.56215984", "text": "def schema(self):\n schema = simplify_schema_for_filter(self._schema())\n\n if self.name is None:\n return schema\n return {\n \"type\": \"object\",\n \"properties\": {\"${}\".format(self.name): schema},\n \"required\": [\"${}\".format(self.name)],\n \"additionalProperties\": False,\n }", "title": "" }, { "docid": "4ecdddf071f85552ff895da0001987b2", "score": "0.5615711", "text": "def schema(self):\n raise NotImplementedError()", "title": "" }, { "docid": "0162ce6797f1b92c7fd16c52952cbe24", "score": "0.5594452", "text": "def test_schema_v2():\n testargs = ['--schema', '2.0',\n './tests/integration/data/v2.0/petstore.yaml']\n main(testargs)", "title": "" }, { "docid": "d99921377435ab10ca8ed9df0edff742", "score": "0.5593371", "text": "def create_swagger_json_handler(app, **kwargs):\n\n spec = get_swagger_spec(app).swagger_definition(**kwargs)\n encoded_spec = json.dumps(spec).encode(\"UTF-8\")\n\n async def swagger(request):\n return web.Response(\n # we allow CORS, so this can be requested at swagger.io\n headers={\"Access-Control-Allow-Origin\": \"*\"},\n body=encoded_spec,\n content_type=\"application/json\",\n )\n\n return swagger", "title": "" }, { "docid": "53fcdefc83e23ed6d0983e548ea92722", "score": "0.55931544", "text": "def meta_schema():\n meta_schema_file = os.path.join(spack.paths.test_path, \"data\", \"jsonschema_meta.json\")\n with open(meta_schema_file) as f:\n ms = json.load(f)\n return ms", "title": "" }, { "docid": "df0fd299789148719babd6f3cbcc341e", "score": "0.5593146", "text": "def get_schema(self, addon):\n raw_schema = self._system_data[addon][ATTR_SCHEMA]\n\n if isinstance(raw_schema, bool):\n return vol.Schema(dict)\n\n return vol.Schema(vol.All(dict, validate_options(raw_schema)))", "title": "" }, { "docid": "379c56526583054928c51f885ec5c34b", "score": "0.5581603", "text": "def set_schema(obj, eng):\n obj.data[\"$schema\"] = url_for(\n \"invenio_jsonschemas.get_schema\", schema_path=\"hep.json\"\n )", "title": "" }, { "docid": "cf279f4c30cfb6581d09439299186a96", "score": "0.5575768", "text": "def put_schema_from_json(SchemaArn=None, Document=None):\n pass", "title": "" }, { "docid": "b70b4f74381d1d1e401f9239d70c398c", "score": "0.557461", "text": "def generate_schema():\n start_msg = \"Creating database schema...\"\n end_msg = \"Database schema created\"\n print(f\"[ -- ] {start_msg}\")\n logger.info(start_msg)\n try:\n Base.metadata.create_all(engine)\n print(f\"[ OK ] {end_msg}\")\n logger.info(end_msg)\n except Exception as e:\n error_msg = f\"Error creating schema: {e}\"\n print(f\"[ERROR] {error_msg}\")\n logger.error(error_msg)\n sys.exit(1)", "title": "" }, { "docid": "1dd233a6919bcd4bde5a8a15ea34d91d", "score": "0.557138", "text": "def _create_schema(self):\r\n con = db.connect(self._db_file)\r\n\r\n with con:\r\n self._logger.info('Creating schema')\r\n\r\n with open(self._schema_file, 'rt') as f:\r\n schema = f.read()\r\n con.executescript(schema)", "title": "" }, { "docid": "e3e4e25869a424c88363ff6a7cf64594", "score": "0.55532557", "text": "def prep_schema(destination, json_schema: List[object]) -> Dict[str, object]:\n\n return {\n 'fields': json_schema\n }", "title": "" }, { "docid": "aaa1f1384d16122c3a6692c7a6a057d1", "score": "0.55509776", "text": "def schema_helper(\n self, name: str, definition: dict, **kwargs: typing.Any\n ) -> dict | None:\n raise PluginMethodNotImplementedError", "title": "" }, { "docid": "33ad449ff391398bc116611abede4bcf", "score": "0.5549294", "text": "def __init__(self, schema: Dict = None, **kwargs):\n super().__init__(schema=schema, **kwargs)\n self.schema = schema", "title": "" }, { "docid": "d54f6076923cc3e2a7aafa52773e7a75", "score": "0.55265033", "text": "def create_schema(self, database, schema, model_name=None):\n raise dbt.exceptions.NotImplementedException(\n '`create_schema` is not implemented for this adapter!'\n )", "title": "" }, { "docid": "97baebb8cf603a745c64d3fb15a1e14f", "score": "0.5526269", "text": "def __init__(self, schema):\n self._schema = schema", "title": "" }, { "docid": "97baebb8cf603a745c64d3fb15a1e14f", "score": "0.5526269", "text": "def __init__(self, schema):\n self._schema = schema", "title": "" }, { "docid": "722a6268408c766c5212521fe8dbac2c", "score": "0.55218005", "text": "def body_schema(self):", "title": "" }, { "docid": "fc35676ec41d8dd818c553eb90465550", "score": "0.5510746", "text": "def _schema_for_validation(self) -> typing.Union[PathAwareSchema, Schema]:\n return self._create_schema_for_validation(\n context={BASE_PATH_CONTEXT_KEY: self._base_path_for_validation or Path.cwd()}\n )", "title": "" }, { "docid": "3074c273dae75e3d25dab9167665f36c", "score": "0.5510611", "text": "def SchemaGen(\n statistics_path: InputPath('ExampleStatistics'),\n schema_path: OutputPath('Schema'),\n infer_feature_shape: bool = None, # ? False\n):\n from tfx.components.schema_gen.component import SchemaGen as component_class\n\n #Generated code\n import json\n import os\n import tensorflow\n from google.protobuf import json_format, message\n from tfx.types import Artifact, channel_utils, artifact_utils\n\n arguments = locals().copy()\n\n component_class_args = {}\n\n for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():\n argument_value_obj = argument_value = arguments.get(name, None)\n if argument_value is None:\n continue\n parameter_type = execution_parameter.type\n if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message): # Maybe FIX: execution_parameter.type can also be a tuple\n argument_value_obj = parameter_type()\n json_format.Parse(argument_value, argument_value_obj)\n component_class_args[name] = argument_value_obj\n\n for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():\n artifact_path = arguments[name + '_path']\n if artifact_path:\n artifact = channel_parameter.type()\n artifact.uri = artifact_path + '/' # ?\n if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:\n # Recovering splits\n subdirs = tensorflow.io.gfile.listdir(artifact_path)\n artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))\n component_class_args[name] = channel_utils.as_channel([artifact])\n\n component_class_instance = component_class(**component_class_args)\n\n input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}\n output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}\n exec_properties = component_class_instance.exec_properties\n\n # Generating paths for output artifacts\n for name, artifacts in output_dict.items():\n base_artifact_path = arguments[name + '_path']\n # Are there still cases where output channel has multiple artifacts?\n for idx, artifact in enumerate(artifacts):\n subdir = str(idx + 1) if idx > 0 else ''\n artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'\n\n print('component instance: ' + str(component_class_instance))\n\n #executor = component_class.EXECUTOR_SPEC.executor_class() # Same\n executor = component_class_instance.executor_spec.executor_class()\n executor.Do(\n input_dict=input_dict,\n output_dict=output_dict,\n exec_properties=exec_properties,\n )\n #return (output_path,)", "title": "" }, { "docid": "86c2b1ba21f6307a5947a47190160480", "score": "0.55078715", "text": "def create_schema(self, schema_name, comment=None, acls={}, annotations={}):\n self.logger.debug('name: %s', schema_name)\n try:\n s = self.model_instance.create_schema(em.Schema.define(\n schema_name,\n comment=comment,\n acls=acls,\n annotations=annotations\n )\n )\n except ValueError:\n raise DerivaCatalogError(self, 'Schema %s already exists'.format(schema_name))\n self.model_map[s] = DerivaSchema(self, s)\n return self.schema(schema_name)", "title": "" }, { "docid": "8723675e8f70070de9ed54eee87adf28", "score": "0.5501187", "text": "def serialization_schema(cls):\n pass", "title": "" }, { "docid": "815a579a212a847ac0954d275f13dfbb", "score": "0.5499391", "text": "def make_docs_ui_view(api_info):\n return get_schema_view(\n api_info,\n generator_class=ApiSchemaGenerator,\n public=True,\n permission_classes=(permissions.AllowAny,),\n ).with_ui('swagger', cache_timeout=get_docs_cache_timeout())", "title": "" }, { "docid": "39ee5bf728e6516445e8e4fecf4481dd", "score": "0.5496596", "text": "def api_docs():\n return jsonify(swagger(app))", "title": "" }, { "docid": "400003bce6fd9c6d7761de8334faa5c6", "score": "0.5496215", "text": "def write_schema_files(schema_json, output_folder, use_logical_types=False, custom_imports=None):\n schema_py, names = generate_schema(schema_json, use_logical_types, custom_imports)\n names = sorted(names)\n\n if not os.path.isdir(output_folder):\n os.mkdir(output_folder)\n\n with open(os.path.join(output_folder, \"schema_classes.py\"), \"w+\") as f:\n f.write(schema_py)\n\n with open(os.path.join(output_folder, \"schema.avsc\"), \"w+\") as f:\n f.write(schema_json)\n\n ns_dict = generate_namespace_modules(names, output_folder)\n\n with open(os.path.join(output_folder, \"__init__.py\"), \"w+\") as f:\n pass # make sure we create this file from scratch\n\n write_namespace_modules(ns_dict, output_folder)\n write_specific_reader(names, output_folder, use_logical_types)", "title": "" }, { "docid": "a754711119f73882be757db9d4ad7c47", "score": "0.5495448", "text": "def instantiate(self, data):\n return _ifcopenshell_wrapper.schema_definition_instantiate(self, data)", "title": "" }, { "docid": "b4fb5810464da599efe5e0560eb1fa46", "score": "0.5487391", "text": "def _buildSchemaObj(self, field, startPos):\n SchemaField = namedtuple('SchemaField', 'name typeHelper slice default pk fk')\n endPos = startPos + int(field['length'])\n helper = self._schemaBuilderTypeHelper(field['type'])\n schemaObj = SchemaField(field['name'], helper,\n slice(startPos, endPos),\n field['default'],\n field['pk'], field['fk'])\n return schemaObj, endPos", "title": "" }, { "docid": "eafcac2dc7509f6afd1f835aba97d208", "score": "0.54844546", "text": "def __init__(self, py_dict=None):\n\n super(LoadBalancerConfigSchema, self).__init__()\n self._self = resource_link_schema.ResourceLinkSchema()\n self.global_ips = [load_balancer_global_ip_schema.LoadBalancerGlobalIpSchema()]\n self.application_rules = \\\n [load_balancer_application_rule_schema.LoadBalancerApplicationRuleSchema()]\n self.display_name = None\n self.description = None\n self._create_user = None\n self.monitors = \\\n [load_balancer_monitor_schema.LoadBalancerMonitorSchema()]\n self.global_sites = \\\n [load_balancer_global_site_schema.LoadBalancerGlobalSiteSchema()]\n self.id = None\n self._create_time = None\n self.schema = None\n self.virtual_servers = [virtual_servers_schema.VirtualServerConfigSchema()]\n self._links = [resource_link_schema.ResourceLinkSchema()]\n self.logging = logging_config_schema.LoggingConfigSchema()\n self._last_modified_time = None\n self.application_profiles = \\\n [load_balancer_application_profile_config_Schema.LoadBalancerApplicationProfileConfigSchema()]\n self._last_modified_user = None\n self.acceleration_enabled = None\n self.tags = [tag_schema.TagSchema()]\n self.revision = None\n self.pools = [load_balancer_pool_config_schema.LoadBalancerPoolConfigSchema()]\n\n if py_dict is not None:\n self.get_object_from_py_dict(py_dict)", "title": "" }, { "docid": "5d5e16e3cb308045d2ee9ca945d108ff", "score": "0.5472457", "text": "def get_schemas(name: str):\n\n if name.upper() == 'LOG':\n # Create schema for jsons of logs\n LOG_SCHEMA = T.StructType([\n T.StructField(\"artist\", T.StringType()),\n T.StructField(\"auth\", T.StringType()),\n T.StructField(\"firstName\", T.StringType()),\n T.StructField(\"gender\", T.StringType()),\n T.StructField(\"itemInSession\", T.LongType()),\n T.StructField(\"lastName\", T.StringType()),\n T.StructField(\"length\", T.DoubleType()),\n T.StructField(\"level\", T.StringType()),\n T.StructField(\"location\", T.StringType()),\n T.StructField(\"method\", T.StringType()),\n T.StructField(\"page\", T.StringType()),\n T.StructField(\"registration\", T.DoubleType()),\n T.StructField(\"sessionId\", T.LongType()),\n T.StructField(\"song\", T.StringType()),\n T.StructField(\"status\", T.LongType()),\n T.StructField(\"ts\", T.LongType()),\n T.StructField(\"userAgent\", T.StringType()),\n T.StructField(\"userId\", T.StringType())\n ])\n\n return LOG_SCHEMA\n\n if name.upper() == 'SONG':\n SONG_SCHEMA = T.StructType([\n T.StructField(\"artist_id\", T.StringType(), True),\n T.StructField(\"artist_latitude\", T.DoubleType(), True),\n T.StructField(\"artist_location\", T.StringType(), True),\n T.StructField(\"artist_longitude\", T.DoubleType(), True),\n T.StructField(\"artist_name\", T.StringType(), True),\n T.StructField(\"duration\", T.DoubleType(), True),\n T.StructField(\"num_songs\", T.IntegerType(), True),\n T.StructField(\"song_id\", T.StringType(), True),\n T.StructField(\"title\", T.StringType(), True),\n T.StructField(\"year\", T.IntegerType(), True),\n ])\n\n return SONG_SCHEMA", "title": "" }, { "docid": "cbb1612ad807fb52c0825ad24c0ddbd8", "score": "0.54661965", "text": "def get_swagger_view(title=None, url=None, patterns=None, urlconf=None):\n\n class SwaggerSchemaView(APIView):\n _ignore_model_permissions = True\n exclude_from_schema = True\n permission_classes = [AllowAny]\n renderer_classes = [\n CoreJSONRenderer,\n renderers.OpenAPIRenderer,\n renderers.SwaggerUIRenderer,\n ]\n\n def get(self, request):\n generator = SchemaGenerator(\n title=title, url=url, patterns=patterns, urlconf=urlconf\n )\n schema = generator.get_schema(request=request, public=True)\n return Response(schema)\n\n return SwaggerSchemaView.as_view()", "title": "" }, { "docid": "1dd7d07d4c128753f7dc603765e7cfad", "score": "0.54629606", "text": "def openapi_json() -> object:\n path = os.path.join(os.path.dirname(__file__), 'v1/spec', 'v1.yaml')\n file = open(path, 'r')\n docs = yaml.load(file)\n return jsonify(docs)", "title": "" }, { "docid": "46f19ffad3152983a29b9ad764368ec7", "score": "0.5447652", "text": "def do(self, blob, ignore_missing=True, exception_handlers=None):\n json = super(OverdoJSONSchema, self).do(\n blob=blob,\n ignore_missing=ignore_missing,\n exception_handlers=exception_handlers)\n if HAS_FLASK:\n json_schema = current_app.extensions['invenio-jsonschemas']\n json['$schema'] = {\n '$ref': json_schema.path_to_url(self.__class__.__schema__)\n }\n else:\n json['$schema'] = {'$ref': self.__class__.__schema__}\n\n return json", "title": "" }, { "docid": "86189eb13ea974ec6e1cfd4f8a68f941", "score": "0.5446689", "text": "def register_schema_for_connection(config):\n\n LOG.info(\"Registering schema, this may take a moment...\")\n\n target_address = config.target_address\n target_connection_id = config.target_connection[\"id\"]\n\n url = f\"{target_address}/{target_connection_id}/schema\"\n access_token = get_access_token(config)\n\n # Set headers\n headers = {\n 'Authorization': f'Bearer {access_token}',\n 'Content-Type': 'application/json'\n }\n\n schema_path = os.path.join(APP_TEMP_DIR, 'microsoft_graph_schema.json')\n\n with open(schema_path, \"r\") as schema_file:\n schema_json = json.load(schema_file)\n\n response = requests.post(url, headers=headers, json=schema_json)\n\n # Schema is accepted to be registered\n if response.status_code == 202:\n LOG.info(\"Schema has been posted and accepted!\")\n\n # Check connection operation status until complete status, so we can proceed with sync\n check_connection_operation_status(config, response.headers[\"Location\"])\n\n # If schema already exists but still not registered (409 - conflict)\n elif response.status_code == 409:\n LOG.info(\"Schema is already posted but still not registered. Please try later. %s\", response.json())\n else:\n LOG.info(\"Error while registering connection schema. %s\", response.json())\n\n response.raise_for_status()", "title": "" }, { "docid": "332d0fe26d8a9928010d3f4ec361e50a", "score": "0.54444975", "text": "def create_schema():\n engine = create_engine(DATABASEURI)\n #schema_dir = os.path.join(\"./schema\", \"yelp_schema.sql\")\n schema_dir = os.path.join(\"./schema\", \"yelp_schema_Project2.sql\")\n schema_file = open(schema_dir)\n sql_command = text(schema_file.read())\n try:\n engine.execute(sql_command)\n except exc.SQLAlchemyError:\n raise", "title": "" } ]
2589bff24ae753f876adb3845642c57b
Use this parameter to enable sessionless load balancing.
[ { "docid": "884de96cf3f76c8947909b56cee4f95f", "score": "0.60458386", "text": "def set_sessionless(self, sessionless):\n self.options['sessionless'] = sessionless", "title": "" } ]
[ { "docid": "f270baae5754a122166b60ba38ef538e", "score": "0.5495329", "text": "def enable_member(b, session_objects):\r\n #Note how easy it is to simply 'toggle' the session state now that\r\n #we are dealing with object attributes.\r\n for x in sstate_seq.item:\r\n x.session_state = 'STATE_ENABLED'\r\n try:\r\n b.LocalLB.PoolMember.set_session_enabled_state(pool_names = [POOL],\r\n session_states = [sstate_seq])\r\n except Exception, e:\r\n print e", "title": "" }, { "docid": "8ff8b344b29f5911fadf2fc764cf4210", "score": "0.54011", "text": "def enable_load_balancer(self) -> Optional[bool]:\n return pulumi.get(self, \"enable_load_balancer\")", "title": "" }, { "docid": "00433ec2faadd0d3740714acb01b1652", "score": "0.5138653", "text": "def get_sessionless(self):\n return self.options['sessionless']", "title": "" }, { "docid": "88d3febe81bf34dff6e4aab14acec8bc", "score": "0.5060981", "text": "def enable_cross_zone_load_balancing(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_cross_zone_load_balancing\")", "title": "" }, { "docid": "88d3febe81bf34dff6e4aab14acec8bc", "score": "0.5060981", "text": "def enable_cross_zone_load_balancing(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_cross_zone_load_balancing\")", "title": "" }, { "docid": "f21d82fe79b7c38d6354ff1f80efa492", "score": "0.50352335", "text": "def enable_cross_zone_load_balancing(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"enable_cross_zone_load_balancing\")", "title": "" }, { "docid": "cf5a400001a3d018af9c4c89533aac87", "score": "0.49530727", "text": "def autoBridgeBalance(self):\n pass", "title": "" }, { "docid": "7852a62fe402dfd6520a781bfadb13e1", "score": "0.4924656", "text": "def use_automatic_session_management(self):\n self._session_management = AUTOMATIC", "title": "" }, { "docid": "7852a62fe402dfd6520a781bfadb13e1", "score": "0.4924656", "text": "def use_automatic_session_management(self):\n self._session_management = AUTOMATIC", "title": "" }, { "docid": "7852a62fe402dfd6520a781bfadb13e1", "score": "0.4924656", "text": "def use_automatic_session_management(self):\n self._session_management = AUTOMATIC", "title": "" }, { "docid": "7852a62fe402dfd6520a781bfadb13e1", "score": "0.4924656", "text": "def use_automatic_session_management(self):\n self._session_management = AUTOMATIC", "title": "" }, { "docid": "0eee52002210a3f9a0dfd15a3af1d678", "score": "0.48912153", "text": "def autoBridgeBalance(self):\n raise NotImplementedError()", "title": "" }, { "docid": "593dbd7eece1f6f2fc88e70fe7a5cf48", "score": "0.48723963", "text": "def setup_tunnel_br(self, tun_br_name=None):\n if self.hybrid_mode:\n super(GBPOvsAgent, self).setup_tunnel_br(tun_br_name)", "title": "" }, { "docid": "a678372af5c119f3bda38e961c827352", "score": "0.47747684", "text": "def EnableLspSelfPing(self):\n return self._get_attribute('enableLspSelfPing')", "title": "" }, { "docid": "0fb72c3fffa68cf9ba9fbcedc85e853f", "score": "0.46988103", "text": "def enable_backend_ssl30(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_backend_ssl30\")", "title": "" }, { "docid": "b4f290fdb2ceb39b57aef9df2ac40219", "score": "0.46630543", "text": "def EnableLoopBack(self):\n if self.force_auto_sync:\n self.get('EnableLoopBack')\n return self._EnableLoopBack", "title": "" }, { "docid": "e429bed1e745c0b27d676c3fcf1f4756", "score": "0.46581128", "text": "def disable_member(b, session_objects):\r\n try:\r\n b.LocalLB.PoolMember.set_session_enabled_state(pool_names =\r\n [POOL], session_states = [sstate_seq])\r\n \r\n except Exception, e:\r\n print e", "title": "" }, { "docid": "3a64f8b02b961cc27d0f736166b96fdc", "score": "0.4651878", "text": "def use_mandatory_session_management(self):\n # Session state will be saved and can not be closed by consumers\n self._session_management = MANDATORY", "title": "" }, { "docid": "3a64f8b02b961cc27d0f736166b96fdc", "score": "0.4651878", "text": "def use_mandatory_session_management(self):\n # Session state will be saved and can not be closed by consumers\n self._session_management = MANDATORY", "title": "" }, { "docid": "7fc76764c036ba7724ae251f60bcab8b", "score": "0.46471593", "text": "def use_mandatory_session_management(self):\n self._session_management = MANDATORY", "title": "" }, { "docid": "7fc76764c036ba7724ae251f60bcab8b", "score": "0.46471593", "text": "def use_mandatory_session_management(self):\n self._session_management = MANDATORY", "title": "" }, { "docid": "a4b2889a8c11deb3b2eb1e181b69a380", "score": "0.46272922", "text": "def conf_ssl_enable_sess_cache(self, sslplugin=\"initiation\", **kwargs):\r\n mode = kwargs.get('mode', \"set\").lower()\r\n commit = kwargs.get('commit', \"yes\").lower()\r\n sslprofile = kwargs.get('sslprofile', None)\r\n\r\n if sslprofile is None or sslplugin is None:\r\n self.device.log(level=\"ERROR\", message=\"sslprofile and sslplugin is \\\r\n REQUIRED key argument\")\r\n raise ValueError(\"sslprofile and sslplugin is REQUIRED key argument\")\r\n\r\n cfg_node = mode + \" services ssl \" + sslplugin + \" profile \" + sslprofile + \\\r\n \" enable-session-cache \"\r\n cmdlist = [cfg_node]\r\n\r\n # Configure and commit the configuration\r\n self.device.config(command_list=cmdlist)\r\n if commit == 'yes' and len(cmdlist) != 0:\r\n self.device.commit()\r\n\r\n return True", "title": "" }, { "docid": "8bccb8771cc5f71805a1e0fa8784c2ee", "score": "0.46078894", "text": "def EnableLspPing(self):\n return self._get_attribute('enableLspPing')", "title": "" }, { "docid": "8bccb8771cc5f71805a1e0fa8784c2ee", "score": "0.46078894", "text": "def EnableLspPing(self):\n return self._get_attribute('enableLspPing')", "title": "" }, { "docid": "fd3f78c06e9e2949cf37e6290d4070a6", "score": "0.45956352", "text": "def enable_frontend_ssl30(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_frontend_ssl30\")", "title": "" }, { "docid": "b5e3c13f525dcd38929bf683e5ffe8ac", "score": "0.45904246", "text": "def load_balancing_mode(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"load_balancing_mode\")", "title": "" }, { "docid": "b5e3c13f525dcd38929bf683e5ffe8ac", "score": "0.45904246", "text": "def load_balancing_mode(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"load_balancing_mode\")", "title": "" }, { "docid": "b8337dd596ff80c4ee025693a3ff9a39", "score": "0.45553657", "text": "def session_active(self, session_active):\n\n self._session_active = session_active", "title": "" }, { "docid": "08fb0a5861ede71bc06d3c3285e53434", "score": "0.45508212", "text": "def loadbalance_instance():\n RAINMAN3.algorithm_config = QLEARNING_REGRESSION_CONFIG\n RAINMAN3.environment_config = CELLULAR_DEV_CONFIG\n return RAINMAN3", "title": "" }, { "docid": "4f91b6145233dc53809713b3739b3c0a", "score": "0.45359793", "text": "def pre_session_hook(self) -> Any:\n pass", "title": "" }, { "docid": "1604d1ccb862e6a267124bada5abf0c7", "score": "0.45296976", "text": "def __init__(self, optimizer):\n\n super(ShardedOptimizer, self).__init__(False, name=\"ShardedOptimizer\")\n self._optimizer = optimizer", "title": "" }, { "docid": "9913ca4a5d7aedada96dba5ba624e1d5", "score": "0.45058346", "text": "def __init__(self, local_dc='', used_hosts_per_remote_dc=0):\n self.local_dc = local_dc\n self.used_hosts_per_remote_dc = used_hosts_per_remote_dc\n self._dc_live_hosts = {}\n self._position = 0\n self._endpoints = []\n LoadBalancingPolicy.__init__(self)", "title": "" }, { "docid": "93398518583882c48a6134350b6cbdd1", "score": "0.44942713", "text": "def session(key='default'):\n self.active = key\n return self", "title": "" }, { "docid": "4f1cf049688992fde8df3e0f47c61cbc", "score": "0.44906288", "text": "def start_traffic(self, context, blocking):\n pass", "title": "" }, { "docid": "2f446c936860ad9e04238ba29954a52f", "score": "0.44898263", "text": "def enable_http_keep_alive(self) -> bool:\n return self.get(\"enable-http-keep-alive\", bool_or_value) # type: ignore", "title": "" }, { "docid": "28c74fc4d906edec08816b45e7e6dad2", "score": "0.4484103", "text": "def sdrs_io_load_balance_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"sdrs_io_load_balance_enabled\")", "title": "" }, { "docid": "28c74fc4d906edec08816b45e7e6dad2", "score": "0.4484103", "text": "def sdrs_io_load_balance_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"sdrs_io_load_balance_enabled\")", "title": "" }, { "docid": "54c607f584fa29a58512932c1a1608f9", "score": "0.4481955", "text": "def set_up_network_profile(self, mc: ManagedCluster) -> ManagedCluster:\n mc = super().set_up_network_profile(mc)\n network_profile = mc.network_profile\n\n # build nat gateway profile, which is part of the network profile\n nat_gateway_profile = create_nat_gateway_profile(\n self.context.get_nat_gateway_managed_outbound_ip_count(),\n self.context.get_nat_gateway_idle_timeout(),\n models=self.models.nat_gateway_models,\n )\n\n load_balancer_sku = self.context.get_load_balancer_sku()\n if load_balancer_sku != \"basic\":\n network_profile.nat_gateway_profile = nat_gateway_profile\n mc.network_profile = network_profile\n return mc", "title": "" }, { "docid": "46db5946284c6b79d40229b897f75086", "score": "0.4476509", "text": "def DPxEnableDinStabilize():\n enableDinStabilize()", "title": "" }, { "docid": "e898dcbc27cf259f7311a599b6e37d8a", "score": "0.44732928", "text": "def enable_backend_tls11(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_backend_tls11\")", "title": "" }, { "docid": "5746b6862b00af655f90b482f8b1ca01", "score": "0.4472187", "text": "def load_balancing_mode(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"load_balancing_mode\")", "title": "" }, { "docid": "3a29b0c60c0a57d04c38375d266fbde9", "score": "0.44698948", "text": "def EnableDnsRoundRobin(self) -> bool:", "title": "" }, { "docid": "4a44151324cf52179f55fd68005aa10b", "score": "0.4469784", "text": "def confFeatures(self):\n\n try:\n self.ns_session.enable_features(\"lb\")\n self.ns_session.enable_features(\"ssl\")\n self.ns_session.enable_features(\"cs\")\n self.ns_session.enable_features(\"appfw\")\n except nitro_exception as e:\n print(\"Exception::errorcode=\" +\n str(e.errorcode) + \",message=\" + e.message)\n except Exception as e:\n print(\"Exception::message=\" + str(e.args))\n\n return", "title": "" }, { "docid": "9e2fb0a70c9e84897bc65ae3e343a870", "score": "0.44550323", "text": "def init_session(self):\n pass", "title": "" }, { "docid": "ae7281b1618e8d8b374e8cb7ac4b264c", "score": "0.44428363", "text": "def session_init():\n\n session()", "title": "" }, { "docid": "a5d79449003ff765104b3e2f5b937297", "score": "0.44365466", "text": "def server_enable(s_name, **connection_args):\n ret = True\n server = _server_get(s_name, **connection_args)\n if server is None:\n return False\n if server.get_state() == \"ENABLED\":\n return True\n nitro = _connect(**connection_args)\n if nitro is None:\n return False\n try:\n NSServer.enable(nitro, server)\n except NSNitroError as error:\n log.debug(\"netscaler module error - NSServer.enable() failed: %s\", error)\n ret = False\n _disconnect(nitro)\n return ret", "title": "" }, { "docid": "c49342295a2cca26528fe681feb0a7a1", "score": "0.4422391", "text": "def is_always_active(self):\n return True", "title": "" }, { "docid": "9723dae6779c3f7f8c20a1d047c2f87e", "score": "0.44206655", "text": "def enable_single_session_login(self, enable_single_session_login):\n\n self._enable_single_session_login = enable_single_session_login", "title": "" }, { "docid": "88b9ab6a2a88e4cdd97919ff0b890757", "score": "0.4415641", "text": "def start_session(self):\n self.log.debug(\"Session start\")", "title": "" }, { "docid": "2c3a4889e273f44eca966f92db692f52", "score": "0.44107184", "text": "def cf_negotiated_failover_enable(self, module):\n return self.request( \"cf-negotiated-failover-enable\", {\n 'module': [ module, 'module', [ basestring, 'None' ], False ],\n }, {\n } )", "title": "" }, { "docid": "e236cc2c005d40ae0b8d93a0b5633048", "score": "0.43959185", "text": "def create_session(self):\n pass", "title": "" }, { "docid": "b23822a39d29ffea08474db2ed7539ad", "score": "0.43908015", "text": "def start_session():\n default_client.session_tracker.start_session()", "title": "" }, { "docid": "30c52c8a6a445ade1b0d6bd53f451327", "score": "0.4380721", "text": "def config_nat_feature_enabled(request, duthost):\n if request.config.getoption(\"--enable_nat_feature\"):\n feature_status, _ = duthost.get_feature_status()\n if feature_status['nat'] == 'disabled':\n duthost.shell(\"sudo config feature state nat enabled\")\n time.sleep(2)", "title": "" }, { "docid": "627babc371124845b6a41c192991897e", "score": "0.4375243", "text": "def load_balance(cls, workshop, check_available=True):\n\n l.debug(\"Load balance for a %s workshop\", workshop)\n\n # Check if any servers have an available session\n servers = db.get_all_servers()\n if not bool(servers):\n l.error(\"Attempting to load balance with no servers!\")\n return None\n\n if check_available:\n for server in servers:\n l.debug(\" ... checking for available session at server: %s\", server[\"ip\"])\n\n count = db.session_count_by_workshop(workshop, server['ip'], True)\n l.debug(\" ... found %d available\", count)\n\n if count > 0:\n return server['ip']\n\n l.debug(\" ... checking if we can spawn a new session\")\n\n instances = 0\n for server in servers:\n instances += db.session_count_by_workshop(workshop, server['ip'])\n l.debug(\" ... total instances for %s: %d\", workshop, instances)\n\n max_instances = db.get_workshop(name=workshop)['max_instances']\n l.debug(\" ... max instances allowed: %d\", max_instances)\n if instances >= max_instances:\n l.error(\"Maximum number of instances met or exceeded.\")\n return None\n\n # Find server with least amount of workshops running\n l.debug(\" ... selecting server with least amount of workshops running\")\n sessions = 9999\n min_ip = 0\n for i, server in enumerate(servers):\n count = db.session_count(server['ip'])\n l.debug(\" ... session count for %s: %d\", server['ip'], count)\n if count < sessions:\n sessions = count\n min_ip = i\n l.debug(\" ... setting min_ip to %s\", server['ip'])\n\n # Finally ensure the server has enough resources\n server = servers[min_ip]\n try:\n if server['mem'] < float(config['REMU']['mem_limit']) and \\\n server['hdd'] < float(config['REMU']['hdd_limit']):\n return server['ip']\n except KeyError:\n l.warn(\" ... making decision without hardware check!\")\n return server['ip']\n\n l.error(\" ... unable to find a suitable server!\")\n return None", "title": "" }, { "docid": "055c86ed4fbc11c5261dc84902f52e3e", "score": "0.43619657", "text": "def sdrs_io_load_balance_enabled(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"sdrs_io_load_balance_enabled\")", "title": "" }, { "docid": "8bc0cf7214cf099a681753ee0fe2368c", "score": "0.43590868", "text": "def enable_slicing(self):\n self.use_slicing = True", "title": "" }, { "docid": "9fcda04be745fe369659a8dff434b919", "score": "0.43584436", "text": "def enable(self):\n if self._state:\n return\n assert (\n framework.in_dygraph_mode()\n ), \"LazyInit.enable() is only available in dygraph mode.\"\n self._state = True", "title": "" }, { "docid": "ba536ab2daa73fed53d3ec81d8724f3a", "score": "0.4342384", "text": "def headless(self):\n pass", "title": "" }, { "docid": "998a70f307413c71004e6d3bda0ecd6d", "score": "0.43399417", "text": "def enable_nat_feature(request, duthost):\n yield\n if request.node.rep_call.failed:\n duthost.command(\"config nat feature enable\")", "title": "" }, { "docid": "0a49784484ab77601cde7f9b09fa6152", "score": "0.4335255", "text": "def set_master_enable(url, state):\n print('Setting master_enable: {}'.format(state))\n\n body = {\n \"master_enable\": state,\n \"activation\": {\n \"mode\": \"activate_immediate\",\n \"requested_time\": None\n }\n }\n\n send_request(url, body)", "title": "" }, { "docid": "a9210b300b2bf35980e8233cd0e16145", "score": "0.43168586", "text": "def enable_frontend_tls11(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_frontend_tls11\")", "title": "" }, { "docid": "72488976b6f935283042df97475f078b", "score": "0.4303755", "text": "def __init__(self):\n #HOST = 'managesmap'\n HOST = 'karttur'\n secrets = netrc.netrc()\n username, account, password = secrets.authenticators( HOST )\n pswd = b64encode(password.encode())\n #create a query dictionary for connecting to the Postgres server\n query = {'db':'postgres','user':username,'pswd':pswd}\n #Connect to the Postgres Server\n self.session = PGsession.__init__(self,query,'ManageSMAP')", "title": "" }, { "docid": "95537159d1c92a6623450aac3fb9724b", "score": "0.42974052", "text": "def __init__(self, session=..., **options) -> None:\n ...", "title": "" }, { "docid": "f8eeab0e8fbeb4884a88e81b7fdf03cb", "score": "0.4290809", "text": "def _load_session(self):\n pass", "title": "" }, { "docid": "dad18a31111aa4bb43815600f92db8a4", "score": "0.42875123", "text": "def ixNetworkSessionObj():\n return Middleware.ixNetworkSession", "title": "" }, { "docid": "64f6c185e539846f3cd7cf31c67dc7b4", "score": "0.42507628", "text": "def http10_enabled(self) -> bool:\n return pulumi.get(self, \"http10_enabled\")", "title": "" }, { "docid": "0699b0d7a5794314e36b25efd0d9c094", "score": "0.42475647", "text": "def _session(self):\n return", "title": "" }, { "docid": "13b98846c3b830ccc754e98a6192dda9", "score": "0.42473504", "text": "def __init__(self, session):\n self._mgmt_proxy = session.proxy(RwBaseYang)", "title": "" }, { "docid": "2786f93e8ed5ee9969f75b5812b19160", "score": "0.42462185", "text": "def allocates_vip(self):\n return False", "title": "" }, { "docid": "85e8424b7b76a6de17e6d1118198c94e", "score": "0.4243062", "text": "def _begin_new_session(self):\n sess_manager = self.gracie_server.sess_manager\n self.session = dict()\n session_id = sess_manager.create_session(self.session)\n self.session['session_id'] = session_id", "title": "" }, { "docid": "5fe12a47620a26d7741c813306fb39ac", "score": "0.4233341", "text": "def is_pool(self):\n return True", "title": "" }, { "docid": "357bf3b54da79b0b057905052e5a8861", "score": "0.422754", "text": "def test_SAMPHubServer():\n SAMPHubServer(web_profile=False, mode=\"multiple\", pool_size=1)", "title": "" }, { "docid": "a07d62d4a74df13c890d5c2b0a5fe85d", "score": "0.42272595", "text": "def supports(self, session):\n return not session.is_local", "title": "" }, { "docid": "86ad954557852deedd7fcfe86aa796bd", "score": "0.4226865", "text": "def __init__(self):\n\n self._boto_session = boto3.Session()\n self._region = self._boto_session.region_name\n self._account_id = boto3.client(\"sts\").get_caller_identity()[\"Account\"]\n self._s3_client = boto3.client('s3', region_name=self._region),\n self._sm_client = self._boto_session.client(service_name='sagemaker', \n region_name=self._region)\n self._featurestore_runtime = self._boto_session.client(service_name='sagemaker-featurestore-runtime', \n region_name=self._region)\n self._feature_store_session = Session(\n boto_session=self._boto_session,\n sagemaker_client=self._sm_client,\n sagemaker_featurestore_runtime_client=self._featurestore_runtime\n )\n self._sm_sess = sagemaker.Session()\n self._default_bucket = self._sm_sess.default_bucket()", "title": "" }, { "docid": "c604f08987eb5acb5be91ad6ddfb5ae4", "score": "0.42265862", "text": "def IsDistributed(self):\r\n return False", "title": "" }, { "docid": "18fb2dfef5dc8065b4165ea1535dedb5", "score": "0.42187282", "text": "def __init__(self, driver_handles_share_servers, *args, **kwargs):\n super(ShareDriver, self).__init__()\n self.configuration = kwargs.get('configuration', None)\n self.initialized = False\n self._stats = {}\n self.ip_versions = None\n self.ipv6_implemented = False\n # Indicates whether a driver supports update of security services for\n # in-use share networks. This property will be saved in every new share\n # server.\n self.security_service_update_support = False\n # Indicates whether a driver supports adding subnet with its\n # allocations to an in-use share network availability zone. This\n # property will be saved in every new share server.\n self.network_allocation_update_support = False\n self.dhss_mandatory_security_service_association = {}\n\n self.pools = []\n if self.configuration:\n self.configuration.append_config_values(share_opts)\n network_config_group = (self.configuration.network_config_group or\n self.configuration.config_group)\n admin_network_config_group = (\n self.configuration.admin_network_config_group)\n else:\n network_config_group = None\n admin_network_config_group = (\n CONF.admin_network_config_group)\n\n self._verify_share_server_handling(driver_handles_share_servers)\n if self.driver_handles_share_servers:\n # Enable common network\n self.network_api = network.API(\n config_group_name=network_config_group)\n\n # Enable admin network\n if admin_network_config_group:\n self._admin_network_api = network.API(\n config_group_name=admin_network_config_group,\n label='admin')\n\n for config_opt_set in kwargs.get('config_opts', []):\n self.configuration.append_config_values(config_opt_set)\n\n if hasattr(self, 'init_execute_mixin'):\n # Instance with 'ExecuteMixin'\n # pylint: disable=no-member\n self.init_execute_mixin(*args, **kwargs)\n if hasattr(self, 'init_ganesha_mixin'):\n # Instance with 'GaneshaMixin'\n # pylint: disable=no-member\n self.init_ganesha_mixin(*args, **kwargs)", "title": "" }, { "docid": "4329c2b22f43b82b9aaea472f1cce0d0", "score": "0.42184854", "text": "def enable_backend_tls10(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_backend_tls10\")", "title": "" }, { "docid": "e4b417fa89b0a91d7405fce799966388", "score": "0.42148817", "text": "def _FailoverHA(self):\n raise NotImplementedError('Failover is not implemented.')", "title": "" }, { "docid": "1a6c9ef575d0b5bbdb774411a98e39bc", "score": "0.42108035", "text": "def nvme_over_tcp_enabled(self, nvme_over_tcp_enabled):\n\n self._nvme_over_tcp_enabled = nvme_over_tcp_enabled", "title": "" }, { "docid": "917378676090a323f9017b84a7f3484f", "score": "0.42106506", "text": "def DefaultCachePolicy(self) -> _n_6_t_0:", "title": "" }, { "docid": "8b996287e91f90318e26b56edcc524f9", "score": "0.4208546", "text": "def _session_keepalive(self) -> None:\n if not self.session_keepalive:\n return\n pool = ThreadPoolExecutor()\n pool.submit(self._keepalive_thread)", "title": "" }, { "docid": "eb75c1fedbc80f47f6af69c078cef345", "score": "0.42079774", "text": "def stateless_job(self, **kwargs):\n kwargs.setdefault(\"config\", self.integ_config)\n kwargs.setdefault(\"client\", self.client)\n return sjob.StatelessJob(**kwargs)", "title": "" }, { "docid": "1ecbe18ce69b907a78f5772a7801bf1c", "score": "0.42067197", "text": "def enable_http_port_access(self) -> bool:\n return pulumi.get(self, \"enable_http_port_access\")", "title": "" }, { "docid": "15c5208f2182733bb25a6e07e9e90b0e", "score": "0.42047563", "text": "def session_affinity_config(self) -> Optional[\"SessionAffinityConfig\"]:\n return self.__session_affinity_config", "title": "" }, { "docid": "e39c59715a7e995d2b5f2b1338f73111", "score": "0.42044652", "text": "def set_session(session):", "title": "" }, { "docid": "a8a690bafc4fc60d1ff868638590999e", "score": "0.4199035", "text": "def sslbetlsv11sessionsrate(self) :\n\t\ttry :\n\t\t\treturn self._sslbetlsv11sessionsrate\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "3ae7c5c253e1801caa0f3b97883ece71", "score": "0.41987422", "text": "def BypassProxyOnLocal(self) -> bool:", "title": "" }, { "docid": "8873a6ae8f5b02ae46a791386fb59d82", "score": "0.4187041", "text": "def api_sync_default_session() -> None:\n func = enable_if.unique([sync_default_session])\n return func()", "title": "" }, { "docid": "5324d1f7af23b34ee466e51ae934a1c0", "score": "0.417859", "text": "def enableSharing(self):\n\n self.conn.adminSetServerSettings({\n cups.CUPS_SERVER_SHARE_PRINTERS: '1'})", "title": "" }, { "docid": "bd53e93c75dba6766d622894f7eebd4f", "score": "0.4171449", "text": "def is_pool(self):\n return False", "title": "" }, { "docid": "7d8b3b38fbde6eb1c83b105ca58612a5", "score": "0.41697457", "text": "def __init__(self, session):\n self.session = session", "title": "" }, { "docid": "e73f693823c055f14b973784742cff05", "score": "0.4164812", "text": "def set_scrobbling(self, social_provider, scrobbling_state):\r\n spotify.Error.maybe_raise(lib.sp_session_set_scrobbling(\r\n self._session._sp_session, social_provider, scrobbling_state))", "title": "" }, { "docid": "dd831245ce56e95abc6dfd10daaecc49", "score": "0.4155446", "text": "def _auto_enable_graph_kernel(device_target, graph_kernel_mode):\n return graph_kernel_mode in (\"auto\", \"true\") and device_target == 'GPU' and \\\n cfg.bert_network == 'base' and cfg.optimizer == 'AdamWeightDecay'", "title": "" }, { "docid": "f25ce304bba7624900d845b6d50c7f66", "score": "0.4154199", "text": "def cache_enabled(self):\n if not SERVICE_CACHING_ENABLED or not self.session:\n return False\n return True", "title": "" }, { "docid": "a87a4cfcb47a27f973b766c624647c3f", "score": "0.4152566", "text": "def ensure_connection_draining_enabled(self):\n\n Log.info(\"Ensuring Connection Draining is turned on\")\n # Get LB Attributes\n lb_attributes = self.elb_client.describe_load_balancer_attributes(\n LoadBalancerName = self.elb[\"LoadBalancerName\"]\n )[\"LoadBalancerAttributes\"]\n\n # Conditionally Enabled ConnectionDraining\n if not lb_attributes[\"ConnectionDraining\"][\"Enabled\"]:\n Log.info(\"Enabling Connection Draining\")\n self.elb_client.modify_load_balancer_attributes(\n LoadBalancerName = self.elb[\"LoadBalancerName\"]\n , LoadBalancerAttributes = {\n 'ConnectionDraining': { 'Enabled': True, 'Timeout': 300 }\n }\n )", "title": "" }, { "docid": "50e165debceb7625d2bd0b835cf3279b", "score": "0.41519228", "text": "def enabled(self):\n return True", "title": "" }, { "docid": "9606a749c8f1cc62aa51abff11aaad93", "score": "0.41516796", "text": "def configure(self, cfg_params):\n\n SchedulerLocal.configure(self, cfg_params)\n\n self.proxyValid=0\n self.dontCheckProxy=int(cfg_params.get(\"GRID.dont_check_proxy\",0))\n self.space_token = cfg_params.get(\"USER.space_token\",None)\n self.proxyServer= 'myproxy.cern.ch'\n self.group = cfg_params.get(\"GRID.group\", None)\n self.role = cfg_params.get(\"GRID.role\", None)\n self.VO = cfg_params.get('GRID.virtual_organization','cms')\n\n try:\n tmp = cfg_params['CMSSW.datasetpath']\n if tmp.lower() == 'none':\n self.datasetPath = None\n self.selectNoInput = 1\n else:\n self.datasetPath = tmp\n self.selectNoInput = 0\n except KeyError:\n msg = \"Error: datasetpath not defined \"\n raise CrabException(msg)\n\n self.checkProxy()\n\n return", "title": "" }, { "docid": "cea07caab336000a237125920be18c82", "score": "0.41489127", "text": "def multi_cluster_tests_enabled():\n return os.getenv('COOK_MULTI_CLUSTER') is not None", "title": "" }, { "docid": "45b18056638dd725a9824ab0d36f0ffc", "score": "0.41466558", "text": "def scaling_enabled(self):\n return False", "title": "" }, { "docid": "87e925a3c55f8f79a8242d9264742621", "score": "0.4138656", "text": "def EnableSwitchToStats(self):\r\n\t\treturn self._get_attribute('enableSwitchToStats')", "title": "" } ]
7332477105e1efa102399b422ee7a4e8
Uniform Cost Search algorithm
[ { "docid": "d5f48166743c873d97767e611ef4e795", "score": "0.57536024", "text": "def uniform_cost_search(origin_id, destination_id, map, type_preference=0):\n\n list_of_path = [Path(origin_id)]\n\n while len(list_of_path) != 0 and destination_id not in list_of_path[0].route:\n first_element_list = list_of_path[0]\n expanded_path = expand(first_element_list, map)\n expanded_path = remove_cycles(expanded_path)\n expanded_path = calculate_cost(expanded_path, map, type_preference)\n list_of_path = insert_cost(expanded_path, list_of_path)\n\n return list_of_path[0]", "title": "" } ]
[ { "docid": "194d4fc2a27e6e232cd9c7027070e744", "score": "0.82053727", "text": "def uniformCostSearch(problem):\n \"*** DO NOT IMPLEMENT ***\"\n util.raiseNotDefined()", "title": "" }, { "docid": "bb4ae15eaacc2394f7027814947b26a3", "score": "0.8203106", "text": "def uniformCostSearch(problem):\n return aStarSearch(problem)", "title": "" }, { "docid": "4bb4a67cc168353d3d4e3662fb8efe9a", "score": "0.768976", "text": "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "title": "" }, { "docid": "4bb4a67cc168353d3d4e3662fb8efe9a", "score": "0.768976", "text": "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "title": "" }, { "docid": "4bb4a67cc168353d3d4e3662fb8efe9a", "score": "0.768976", "text": "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "title": "" }, { "docid": "9e573ab424987f9a471c378f9db7d764", "score": "0.76568115", "text": "def uniformCostSearch(problem: SearchProblem[State]):\n \"*** YOUR CODE HERE ***\"\n return aStarSearch(problem)", "title": "" }, { "docid": "aebcf2ac2f99f953acde93ad2abcbe05", "score": "0.75917655", "text": "def uniform_cost_search(problem):\n return best_first_search(problem,\n lambda n: n.path_cost)", "title": "" }, { "docid": "f1111265d5452de3f3611d47d23c86c9", "score": "0.7496332", "text": "def uniformCostSearch(problem):\n \"\"\" we call astar search with null heuristic\"\"\"\n return aStarSearch(problem)", "title": "" }, { "docid": "4d528e2b1877e18b1897b2b38e74974a", "score": "0.74873596", "text": "def UniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\" \n util.raiseNotDefined()", "title": "" }, { "docid": "3a96004f495d4ac70d79629130d76ccc", "score": "0.74268585", "text": "def uniformCostSearch(problem):\n def nodeAddition(successor, successors, current, problem):\n weight = successor[2] + current[2]\n successors.push(\n (successor[0], successor[1], weight, current), weight)\n\n # Fila de sucessores que contera somente os nodos nao percorridos\n successors = PriorityQueue()\n # O pai eh adicionado a tupla dos nodos\n successors.push((problem.getStartState(), 0, 0, None), 0)\n\n return genericSearch(successors, problem, nodeAddition)", "title": "" }, { "docid": "e119e4e4ec51f0952da0b8e175bf77ba", "score": "0.742345", "text": "def uniformCostSearch(problem):\n frontier = PriorityQueueWithFunction(lambda item: item.cost)\n return genericSearch(problem, frontier)", "title": "" }, { "docid": "2808789e79ce071aa71b0326f0b00845", "score": "0.740106", "text": "def uniformCostSearch(problem):#DIJKSTRA\n \"*** YOUR CODE HERE ***\"\n\n \"\"\"f(n)=g(n)\"\"\"\n queue = util.PriorityQueue()#we create a PriorityQueue because we want to work with compared costs\n visited = []#empty list\n queue.push((problem.getStartState(), [], 0), 0)#we add first/start node\n while not queue.isEmpty():#while the queue in not empty\n actualstate, path, actualcost = queue.pop()#now we are working with three attributes,\n #such as the actual node, the path we take and how much this path costs\n if problem.isGoalState(actualstate):#we compare if the actualstate is our goal\n return path#if it is the goal, return the path to that node\n if actualstate in visited:#if node in visited, we continue\n continue\n visited.append(actualstate)\n for state, newpath, cost in problem.getSuccessors(actualstate):#we have three parametres that we have to take into account\n if state not in visited:#is the state not in visited we have to add it\n queue.push((state, path + [newpath], actualcost + cost), actualcost + cost)#we add the node to the queue\n #here in cost we acummulate the previous cost and the newest one", "title": "" }, { "docid": "25f7432b184dc9de4b32eaaf358760c6", "score": "0.735672", "text": "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n frontier = util.PriorityQueue() #we use a priority queue to keep the cost of every edge\n frontier.push(problem.getStartState(),0)\n explored = []\n parent = {}\n parent[problem.getStartState()] = (0, 0, 0) #the parent dictionary in thhat case will keep not only the state and the action but the cost of each path we take\n while(True):\n if(frontier.isEmpty() == True):\n break\n else:\n current = frontier.pop()\n if(problem.isGoalState(current) is True): #same method as in dfs for finding the final path\n path = []\n cur_prev = parent[current]\n path.append(cur_prev[1])\n while parent[cur_prev[0]] != (0, 0, 0):\n path.append(parent[cur_prev[0]][1])\n cur_prev = parent[cur_prev[0]]\n\n solution = []\n for i in reversed(path):\n solution.append(i)\n return solution\n else:\n explored.append(current)\n for next_step in problem.getSuccessors(current):\n cost = next_step[2] + parent[current][2] #for every node we search the total cost equals the addition of each node before that plus the cost of the current node\n if((next_step[0] not in [item[2]for item in frontier.heap]) and (next_step[0] not in explored)):\n frontier.push(next_step[0], cost)\n parent[next_step[0]] = (current, next_step[1], cost)\n\n elif(next_step[0] in [item[2]for item in frontier.heap]) and (next_step[0] not in explored) and (cost<parent[next_step[0]][2]): #if the node's already in frontier but the cost from the current path we are searching is smaller that the previous we followed to get there\n frontier.update(next_step[0], cost) #upgrade the frontier with the new cost and change the dictionary\n parent[next_step[0]] = (current, next_step[1], cost)\n\n util.raiseNotDefined()", "title": "" }, { "docid": "fd80fa978c6b07e2f488a47a470a8838", "score": "0.7355223", "text": "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n # Here we implement the uniformCostSearch by first creating a priority queue, 2 lists and 2 dictionaries.\n # The priority queue is the fringe that contains the still unexpanded nodes (that can be visited) ordered by the\n # least costpath.\n # The list closed contains the nodes that have been visited.\n # The list actions contains the results.\n # The dictionary path_cost contains the cost of the path.\n # The dictionary dict contains all the child nodes with the parent node as a reference.\n # It checks the same conditions as DFS and BFS. The only difference is that in the else-statement it calculates the\n # the current (up to now) cost path and push the node and cost path on the fringe if the node has successors.\n startState = (problem.getStartState(), 0, 0)\n fringe = util.PriorityQueue()\n closed = []\n dict = {}\n path_cost = {}\n path_cost[startState] = startState[2]\n actions = []\n fringe.push(startState, 0)\n\n while not fringe.isEmpty():\n node = fringe.pop()\n\n if node[0] not in closed:\n\n if problem.isGoalState(node[0]):\n current = node\n while (current != startState):\n actions.append(current[1])\n current = dict[current]\n actions.reverse()\n return actions\n\n else:\n closed.append(node[0])\n succ = problem.getSuccessors(node[0])\n if not succ:\n continue\n for s in succ:\n cost = path_cost[node] + s[2]\n fringe.push(s, cost)\n if node in dict.keys() and s in dict.values():\n continue\n dict[s] = node\n path_cost[s] = cost\n util.raiseNotDefined()", "title": "" }, { "docid": "a09c3893d09947d52b32abf510796424", "score": "0.73332685", "text": "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE IF YOU WANT TO PRACTICE ***\"\n if problem.name == 'GraphSearch':\n prior_queue = util.PriorityQueueWithFunction(lambda x: x[2])\n prior_queue.push((problem.getStartState(), None, 0))\n visited = []\n path = {}\n path[(problem.getStartState(), None)] = None\n while not prior_queue.isEmpty():\n cur_fullstate = prior_queue.pop()\n if (problem.isGoalState(cur_fullstate[0])):\n break\n else:\n cur_state = cur_fullstate[0]\n if cur_state not in visited:\n visited.append(cur_state)\n else:\n continue\n successors = problem.getSuccessors(cur_state)\n for state in successors:\n cost = cur_fullstate[2] + state[2];\n if state[0] not in visited:\n prior_queue.push((state[0], state[1], cost))\n path[(state[0], state[1])] = cur_fullstate\n \n goal = (cur_fullstate[0], cur_fullstate[1])\n res = []\n while True:\n if path[goal] == None:\n break\n res.append(goal[1])\n goal = (path[goal][0], path[goal][1])\n \n return res[::-1]\n\n elif problem.name == 'PositionSearchProblem' or problem.name == 'AnyFoodSearchProblem':\n prior_que = util.PriorityQueue()\n prior_que.push(problem.getStartState(), 0)\n path, cost = {}, {}\n path[problem.getStartState()] = None\n cost[problem.getStartState()] = 0\n\n goalpos = (-1, -1)\n while not prior_que.isEmpty():\n cur = prior_que.pop()\n if problem.isGoalState(cur):\n goalpos = cur\n break\n for nextnode in problem.getSuccessors(cur):\n new_cost = cost[cur] + nextnode[2]\n if nextnode[0] not in cost or new_cost < cost[nextnode[0]]:\n cost[nextnode[0]] = new_cost\n path[nextnode[0]] = cur\n prior_que.push(nextnode[0], new_cost)\n if goalpos == (-1, -1):\n return \"CAN NOT FIND GOAL\"\n\n def pos_2_dir(successor, predecessor):\n if predecessor[1] - successor[1] == 0:\n if predecessor[0] - successor[0] == 1:\n return 'West'\n else:\n return 'East'\n elif predecessor[1] - successor[1] == 1:\n return 'South'\n else:\n return 'North'\n \n res = []\n while True:\n rootpos = path[goalpos]\n if rootpos == None:\n break\n res.append(pos_2_dir(goalpos, rootpos))\n goalpos = rootpos\n\n return res[::-1]\n\n util.raiseNotDefined()", "title": "" }, { "docid": "57af5fa0448710c285686291ef521e99", "score": "0.7287931", "text": "def uniform_cost_search(problem):\n return tree_search(util.PriorityQueueWithFunction(lambda item: item.get_cost()),\n problem)", "title": "" }, { "docid": "a33493e8337ae1e775aae7232ed63e73", "score": "0.7274068", "text": "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n # util.raiseNotDefined()\n\n # pathList [(point, [directions..], cost)..]\n from util import PriorityQueue\n \n def ucs(Plist,visited,problem):\n if Plist.isEmpty():\n return False,[]\n pointN,direction,cost = Plist.pop()\n if problem.isGoalState(pointN):\n return True,direction\n for pointN2,directionN2,costN2 in problem.getSuccessors(pointN):\n if pointN2 not in visited:\n visited.append(pointN2)\n DN = direction[:]\n DN.append(directionN2)\n cost=cost+costN2\n Plist.push((pointN2,DN,cost),cost)\n return ucs(Plist,visited,problem)\n point = problem.getStartState()\n Plist = PriorityQueue()\n Plist.push((point,[],0),0)\n visited = [point]\n Win,direction = ucs(Plist,visited,problem)\n return direction", "title": "" }, { "docid": "c789b8922b80ad59e4729fa9ccfe9c73", "score": "0.7195446", "text": "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n from util import PriorityQueue\n \n # We create a priority queue and fill it with the start state\n PriorityQueue = util.PriorityQueue()\n for successor in problem.getSuccessors(problem.getStartState()):\n # We use a priority queue and use the cost as our priority\n PriorityQueue.push((successor, []), successor[2])\n \n # Once again, empty visited set\n visitedSet = set()\n \n # This checks if a given priority queue contains a given coord\n def pQueueContainsNode(PriorityQueue, coord):\n for item in PriorityQueue.heap:\n if item[0] == coord:\n return True\n return False\n \n # This is our infinite loop\n while True:\n \n # If empty we failed (or the problem was impossible)\n if PriorityQueue.isEmpty():\n return False\n \n # Pop the node and path from the priority queue \n currentNode, currentPath = PriorityQueue.pop()\n \n # Yay we won\n if problem.isGoalState(currentNode[0]):\n solution = currentPath + [currentNode[1]]\n # Return the spoils of our search victory\n return solution\n \n # If the node is not on the fringe and not visited \n if not(pQueueContainsNode(PriorityQueue, currentNode[0])) and not(setContainsNode(visitedSet, currentNode[0])):\n # We mark it as visited\n visitedSet.add(currentNode[0])\n path = currentPath + [currentNode[1]]\n # Once again, we find the successors and use the cost as our priority\n for successor in problem.getSuccessors(currentNode[0]):\n PriorityQueue.push((successor, path), successor[2])", "title": "" }, { "docid": "bf94c3eb086f7ff40df9f9e59f697807", "score": "0.715062", "text": "def uniformCostSearch(problem):\n # use the problem cost function\n def costFn(xxx_todo_changeme1):\n (state, actions) = xxx_todo_changeme1\n return problem.getCostOfActions(actions)\n # use FIFO and graphSearch\n frontier = util.PriorityQueueWithFunction(costFn)\n return graphSearch(problem, frontier)", "title": "" }, { "docid": "97316e25e382f08c3dda0305905fbdb0", "score": "0.71065396", "text": "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n explored = []\n frontier = util.PriorityQueue()\n tNode = (problem.getStartState(),[]) #el nodo se compone de estado y acciones realizadas hasta ahora\n frontier.push( tNode , 0) #se inserta en la cola de prioridad con un costo 0\n\n while(True):\n \tif(frontier.isEmpty()):\n \t\treturn None\n\n \ttNode = frontier.pop()\n \tif(problem.isGoalState(tNode[0])):\n \t\treturn tNode[1] #se retorna la lista de acciones #probar cambios\n\n \tif tNode[0] not in explored:\n \t\tsuccessors = problem.getSuccessors(tNode[0])\n \t\tfor action in successors:\n \t\t\tnextState = action[0]\n\n \t\t\tif nextState not in explored:\n \t\t\t\tnewAction = action[1]\n \t\t\t\tnewActionList = tNode[1] + [newAction] #se anade la accion realizada a lista de acciones del nodo\n \t\t\t\tnewNode = (nextState,newActionList)\n \t\t\t\tfrontier.push(newNode,problem.getCostOfActions(newActionList))\n\n \texplored.append(tNode[0]) #se anade el estado revisado a la lista de explorados\n\n return None", "title": "" }, { "docid": "0fcbb98b4cfacd646877f760a637b95d", "score": "0.70962536", "text": "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n frontier = util.PriorityQueue()\n frontier.push([[problem.getStartState(), 'Initial', 0]], 0)\n explored = set()\n #print (problem.goal)\n #n_iters = 30\n #for i in range(n_iters):\n while True:\n print('Frontier:')\n print(frontier.heap)\n if len(frontier.heap) == 0:\n return -1\n path = frontier.pop()\n s = path[-1][0]\n explored.add(s)\n print (s)\n print('explored:')\n print(explored)\n if problem.isGoalState(s):\n #return actions\n return [p[1] for p in path][1:]\n \n for successor in problem.getSuccessors(s):\n print ('succ:')\n print (successor)\n #if successor[0] not in explored:\n if successor[0] not in sum([f[2] for f in frontier.heap], []):\n frontier.push(path + [successor], successor[2])", "title": "" }, { "docid": "4bdf06a54ab65ce81fbe9278baeab33d", "score": "0.6943271", "text": "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n #Add starting state to stack\n frontier = util.PriorityQueue()\n startState = problem.getStartState()\n\n #Frontier tuple: (x,y,path,cost)\n tup = (startState,[],0)\n frontier.push(tup ,0)\n\n #Frontier\n visitedNodes = []\n\n #While moves are avaible\n while(not frontier.isEmpty()):\n #Get next move in frontier\n node = frontier.pop()\n state = node[0]\n pathcost = node[2]\n\n #Check if node was already visisted\n evaluateNode = True\n for vis in visitedNodes:\n if(vis[0] == state ):\n if(vis[1] > pathcost):\n #remove this node from visited\n visitedNodes.remove(vis)\n break\n else:\n evaluateNode = False\n break\n if not evaluateNode:\n continue\n\n #Add to visisted nodes\n visitedNodes.append( (state,pathcost) )\n\n #Return path if goal found\n if(problem.isGoalState(state)):\n return node[1]\n #Add succesors to list\n else:\n successors = problem.getSuccessors(state)\n for s in successors:\n succ = s[0]\n #Add new move to path\n path = node[1] + [s[1]]\n #Increase cost\n cost = pathcost + s[2]\n #Push successor onto stack with updated path\n frontier.push( (succ,path,cost), cost)", "title": "" }, { "docid": "799a5ba8ee10309f04c9aadb4beb6573", "score": "0.69016933", "text": "def uniformCostSearch(problem):\n visited = [] # Create a list to store visited nodes (cycle detection)\n frontier = util.PriorityQueue() # Create a Priority Queue to maintain node visiting order\n startNode = (problem.getStartState(), 'Start', 0)\n frontier.push(startNode, 0) # Push the starting state onto the priority queue\n predecessors = {} # Create dictionary to keep track of parent nodes\n cost = {startNode : 0} # Create dictionary to keep track of cheapest cost to get to a node\n while (not frontier.isEmpty()):\n node = frontier.pop() # Visit the top node on the priority queue (cheapest cost node)\n if problem.isGoalState(node[0]): # Check if goal state has been reached\n path = [] # Goal state has been reached and a path needs to be created\n currNode = node\n while (currNode[1] != 'Start'):\n path = [currNode[1], *path] # Add direction to get to node to path\n currNode = predecessors[currNode] # Set the current node to its parent node\n return path # Path has been fully constructed\n if node[0] not in visited:\n visited.append(node[0]) # Node has now been visited\n for successor in problem.getSuccessors(node[0]):\n currCost = cost[node] + successor[2]\n if successor[0] not in visited:\n frontier.update(successor, currCost) # Add all successors to the queue\n if (successor not in cost)\\\n or (currCost < cost[successor]):\n predecessors[successor] = node # Define the predecessor to all successors as the popped node\n cost[successor] = currCost\n return [] # No goal state was found", "title": "" }, { "docid": "5f1720754fb5a6cc1fd62823019369f1", "score": "0.6887092", "text": "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n # Initialize the queue using the already-built in util.py package\n PriorityQueue = util.PriorityQueue()\n\n # Initialize the set of Visited Cells, which we use to keep track of which cells we've visited\n VisitedCells = []\n\n # Get the Starting Cell, using the command that is already built into the project.\n StartCell = problem.getStartState()\n\n # Initialize the Starting Pair, which could be something like ( [3,2], [] ). In other words, the\n # start cell is [3,2] and the second entry is [] since we don't have to do anything to get to [3,2].\n StartCost = 0\n StartPair = [StartCell, [], StartCost]\n PriorityQueue.push(StartPair, StartCost)\n\n while not PriorityQueue.isEmpty():\n\n CurrentPair = PriorityQueue.pop()\n CurrentCell = CurrentPair[0]\n DirectionsToCell = CurrentPair[1]\n\n if problem.isGoalState(CurrentCell):\n return DirectionsToCell\n\n else:\n if CurrentCell not in VisitedCells:\n VisitedCells.append(CurrentCell)\n SuccessorList = problem.getSuccessors(CurrentCell)\n\n for Child in SuccessorList:\n if Child[0] not in VisitedCells:\n directions = [Child[1]]\n CurrentCost = CurrentPair[2] + Child[2]\n # Merge directions and cost from parent state and child state and get directions and cost for child\n Childpair = [Child[0], DirectionsToCell + directions, CurrentCost]\n PriorityQueue.push(Childpair, CurrentCost)\n\n\n return []", "title": "" }, { "docid": "af2eaf7b3281b3f0ac3a4986146222d5", "score": "0.6878882", "text": "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n output = []\n explored = []\n priority_queue = util.PriorityQueue()\n priority_queue_lst = []\n goal_state = None\n\n root_node = Node(problem.getStartState())\n priority_queue.update(root_node, root_node.cost)\n\n while (priority_queue.isEmpty() == False):\n node = priority_queue.pop()\n if (problem.isGoalState(node.state) == False) and (node.state not in explored):\n explored.append(node.state)\n successors = problem.getSuccessors(node.state)\n for i in range(0, len(successors)):\n if (successors[i][0] not in explored) and (successors[i] not in priority_queue_lst):\n newCost = successors[i][2] + node.cost\n newNode = Node(\n successors[i][0], successors[i][1], newCost, node)\n priority_queue_lst.append(successors[i])\n priority_queue.update(\n newNode, newCost)\n elif (problem.isGoalState(node.state) == True) and (node.state not in explored):\n explored.append(node.state)\n break\n\n while node.parent != None:\n output.append(node.action)\n node = node.parent\n\n return list(reversed(output))", "title": "" }, { "docid": "29928a05f093cd6f31a4cf8c41cf7c54", "score": "0.68770945", "text": "def uniformcost(start,coreAlgorithm=treesearch):\n return coreAlgorithm (start,\\\n PriorityQueue(lambda a,b:a.gval < b.gval))", "title": "" }, { "docid": "33750deeac10ad0117fadd809c400373", "score": "0.68295074", "text": "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n startState = problem.getStartState()\n parent = startState\n from util import PriorityQueue\n children = problem.getSuccessors(startState)\n successors = PriorityQueue()\n visited = list()\n visited.append(startState)\n cost = 0\n for child in children:\n holder = list()\n holder.append(child[1])\n nextChild = Cell(child[0], holder, parent, cost + child[2], child)\n successors.push(nextChild, cost + child[2])\n while successors.isEmpty() == False:\n thisparent = successors.pop()\n if thisparent.getCell() not in visited:\n visited.append(thisparent.getCell())\n if problem.isGoalState(thisparent.getCell()):\n return thisparent.getPath()\n newchildren = problem.getSuccessors(thisparent.getCell())\n for child in newchildren:\n if child[0] not in visited:\n updatePath = thisparent.getPath().copy()\n updatePath.append(child[1])\n nextCell = Cell(child[0], updatePath, thisparent, thisparent.getCost() + child[2], child)\n successors.push(nextCell, nextCell.getCost() + child[2])", "title": "" }, { "docid": "be5a104d2324eb7bf9f77a7a6ed9cac5", "score": "0.6803951", "text": "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n curr_state = problem.getStartState()\n visited_states = set()\t#to store the states already visited\n path = []\n curr_cost = 0\n pQueue = util.PriorityQueue()\t#fringe for UCS is a Priority queue\n pQueue.push((curr_state, path, curr_cost), curr_cost)\n while not problem.isGoalState(curr_state):\t#loop till we reach goal state\n \tif curr_state not in visited_states:\n \t\tvisited_states.add(curr_state)\t#mark state as visited\n \t\tsuccessor_list = problem.getSuccessors(curr_state)\t#get successors\n \t\tfor (successor_state, next_direction, additional_cost) in successor_list:\n \t\t\tpQueue.push((successor_state, path + [next_direction], curr_cost + additional_cost), curr_cost + additional_cost)\n \t(curr_state, path, curr_cost) = pQueue.pop()\n return path\n #util.raiseNotDefined()", "title": "" }, { "docid": "bf4c67abb090141c3a8e5480f6368e21", "score": "0.6779326", "text": "def uniformCostSearch(problem):\r\n \"*** YOUR CODE HERE ***\"\r\n #return breadthFirstSearch(problem)\r\n\r\n returnListOfPath = [] # Stores directions\r\n dictCostTillNode = {} # key : the state , value : The prefix-sum cost\r\n dictPopped = {}\r\n dictVisited = {} # key : the state , value : [parentState , direction from parent state]\r\n startState = problem.getStartState()#Initializing Data structure from util.py\r\n pqueueFringe = PriorityQueue()\r\n dictCostTillNode[startState] = 0\r\n dictVisited[startState] = 1\r\n pqueueFringe.push(startState,dictCostTillNode[startState]) #appending with unknown for uniformity.\r\n \r\n\r\n # If start state is itself a goal state then we return null list\r\n if problem.isGoalState(startState):\r\n return returnListOfPath\r\n # Break the loop if Fringe is empty or goal state reached\r\n while (not pqueueFringe.isEmpty()):\r\n firstState = pqueueFringe.pop()\r\n currentState = firstState\r\n dictPopped[currentState] = True \r\n if(problem.isGoalState(currentState)):\r\n returnListOfPath = getThefirstPAth(dictVisited,startState,currentState) # Backtracks the path to reach goal state\r\n return returnListOfPath\r\n costTillCurrent = dictCostTillNode[currentState]\r\n nextSuccessors = problem.getSuccessors(currentState)\r\n for iSuccessor in nextSuccessors:# can be done in reverse too\r\n nextState,directionToReach,costToPath = iSuccessor\r\n if(not dictPopped.__contains__(nextState)):# Avoiding cycles in graph\r\n if(dictCostTillNode.__contains__(nextState)):\r\n if( dictCostTillNode.get(nextState) > costToPath + costTillCurrent):\r\n dictVisited[nextState] = [currentState,directionToReach]\r\n dictCostTillNode[nextState] = costToPath + costTillCurrent\r\n else:\r\n dictCostTillNode[nextState] = costToPath + costTillCurrent\r\n dictVisited[nextState] = [currentState,directionToReach]\r\n pqueueFringe.update(nextState,dictCostTillNode[nextState])\r\n\r\n\r\n return returnListOfPath", "title": "" }, { "docid": "a46c444c7682fea4b89965883475b130", "score": "0.67155355", "text": "def uniformCostSearch(problem):\n \"\"\"\n Pseudocode: \n\n PriorityQueue pqueue\n Add the start location to the priority queue\n Initialize list of visited nodes\n\n while frontier is not empty: \n pop node off the frontier\n if node is goal, return solution\n add node to list of visited \n for each successor of node: \n if successor is not explored or in frontier: \n insert child into frontier\n else if child is in frontier with higher path-cost,\n replace original frontier node with child\n \"\"\"\n priorityQueue = util.PriorityQueue()\n\n visited = []\n\n start = (problem.getStartState(), Directions.STOP, 0)\n\n priorityQueue.push([start], problem.getCostOfActions([Directions.STOP]))\n\n while not priorityQueue.isEmpty(): \n currentPlan = priorityQueue.pop()\n\n currentState = currentPlan[len(currentPlan)-1][0]\n if problem.isGoalState(currentState): \n directions = [ path[1] for path in currentPlan if path[1] != Directions.STOP ] \n return directions\n\n visited.append(currentState)\n\n successors = problem.getSuccessors(currentState)\n for succ in successors: \n succState = succ[0]\n if succState not in visited: \n updatedPlan = currentPlan + [succ]\n updatedPlanCost = problem.getCostOfActions([ path[1] for path in updatedPlan if path[1] != Directions.STOP ])\n priorityQueue.update(updatedPlan, updatedPlanCost)\n\n return None", "title": "" }, { "docid": "c43c2764a2ad4bb10e8fb8623f1fd516", "score": "0.6704289", "text": "def uniformCostSearch(problem):\n\n # Now we use the priority Queue data type, where each element in the Queue\n # has a priority assigned to it at the outset; here the cost per step.\n frontier = util.PriorityQueue()\n # Initialize the explored region\n explored = set()\n\n # Push the root node to the frontier, in the form: node.state,\n # node.actions, node.cost\n frontier.push((problem.getStartState(), []), 0)\n\n # While the Priority Queue has options\n while not frontier.isEmpty():\n # Grab the node and history with the highest priority (lowest cost)\n node, actions = frontier.pop()\n\n # If the node is the goal state\n if problem.isGoalState(node):\n # Return the actions that got us here\n return actions\n\n # Else, let's start exploring Successors\n for coord, direction, steps in problem.getSuccessors(node):\n # If we've never seen it before\n if not coord in explored:\n # Use the convenient GetCostOfActions method to lookup the cost for this new history\n # That new path cost becomes the node.cost in the tree\n frontier.push((coord, actions + [direction]),\n problem.getCostOfActions(actions + [direction]))\n # Also update the explored set\n explored.add(node)\n\n return []", "title": "" }, { "docid": "0f6d85f367d44ac486e034d1b6ba60e0", "score": "0.6669888", "text": "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n # Uniform Cost Search requires PriorityQueue \n pque = PriorityQueue()\n visited = set()\n pque.update(Node(problem.getStartState(), 'none', 'none'), 0)\n\n while not pque.isEmpty():\n # Dequeue first element from queue\n current = pque.pop()\n\n # if current node is Goal state then return the path\n if problem.isGoalState(current.state):\n path = current.getPath()\n return path\n \n # find all adjacent nodes of dequeued node current.\n # if adjacent nodes are not visited, then \n # mark it as visited and add into queue.\n if current.state not in visited:\n visited.add(current.state)\n all_successor = problem.getSuccessors(current.state)\n for successor in all_successor:\n child = Node(successor[0], current, successor[1], successor[2]+current.priority)\n pque.update(child, child.priority)\n \n return list()", "title": "" }, { "docid": "6bd9d92ea1ddfa75db550dfc617cca60", "score": "0.66217107", "text": "def compute_cost():\n for i in range(n):\n for j in range(i, n):\n mid = i + (j - i) / 2\n for r in range(i, mid + 1):\n cost[i + 1][j + 1] += A[mid] - A[r]\n for r in range(mid + 1, j + 1):\n cost[i + 1][j + 1] += A[r] - A[mid]", "title": "" }, { "docid": "32f794f4856a2e8a5abee1c1795c96f5", "score": "0.65119004", "text": "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n #node = [state,action,cost ,father]\n node = [problem.getStartState() ,[], 0 , []]\n if problem.isGoalState( node[0]) == True:\n return []\n \n explored = {} #set to remember relationships father-child\n frontier = PriorityQueue()\n frontier.push(node,node[2])\n\n while frontier.isEmpty() == False:\n\n newNode = frontier.pop()\n if problem.isGoalState( newNode[0]) == True:\n #same way to find the path as in the others functions\n path = []\n path.insert(0,newNode[1])\n findpath = explored[(newNode[3])]\n while findpath[1] != [] and findpath[1] != problem.getStartState() : \n path.insert(0,findpath[0])\n findpath = explored[(findpath[1])]\n\n if findpath[1] != []:\n path.insert(0,findpath[0])\n return path\n\n\n if newNode[0] not in explored: #dont explore an already explored node\n #remember father-child and the action from father to child\n #child is the key and the value is action,father \n explored[(newNode[0])] = [ newNode[1] , newNode[3]] \n successors = problem.getSuccessors(newNode[0])\n for succ in successors:\n if succ[0] not in explored and succ not in frontier.heap:\n nod = [succ[0] ,succ[1], succ[2] +newNode[2] , newNode[0]] \n frontier.push(nod,succ[2]+newNode[2])\n\n elif succ in frontier.heap: \n #if this successor already exists in frontier\n #but has bigger cost update this cost with the new smaller cost of this successor\n for counter in frontier.heap:\n if counter[0] == succ[0] and counter[2] > succ[2]:\n succ[2] = succ[2]+newNode[2]\n frontier.update(succ , succ[2])\n\n\n if (frontier.isEmpty() == True):\n return []\n util.raiseNotDefined()", "title": "" }, { "docid": "fc7c8cd40a773c4b17e6db168a7605f4", "score": "0.6318695", "text": "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n stack = util.PriorityQueue()\n path = []\n start_state = (problem.getStartState(), [],0) #get start state and add empty action and cost to get to the start state before pushing on stack\n\n if problem.isGoalState(start_state[0]): #if the start state is a goal state, return an empty list of actions\n print(\"goal reached\")\n return path\n\n visited = [] #list to hold visited states\n parent = {} #to save the (state: parent state/node) pairs\n actions = {} #to save the (state: action to get there) pairs\n stack.push((start_state),start_state[2]) #each stack item is of format (state,action to get to the state, cost to get to the state )\n \n \n while(stack.isEmpty() == False): #loop until stack is empty\n \n item = stack.pop() # pop an item from the stack\n\n if item[0] not in visited: #check the visited list before processing, if visited, ignore the state. item[0] means state of the item\n if problem.isGoalState(item[0]): #if its a goal state, add to the solution and return\n path = getPath(item[0],parent,actions,start_state[0]) ##call a function to create a path in reverse order and return the path list in correct order\n return path \n\n visited.append(item[0]) #else append it to the visited list and do:\n\n successors = problem.getSuccessors(item[0]) #push all the of its successors onto the stack\n for scr in successors: #for each successor of the current state\n if scr[0] not in visited: #check if the current successor is already visited, if not, do:\n stack.push((scr[0],scr[1],scr[2]),scr[2]) #push the successor in the same stack item format of (state,action,cost)\n #scr(0) is the state and scr(1) is the action required to get to state scr[0]\n parent[scr[0]] = item[0] # save the parent state (item[0]) of the current state/successor [scr[0]] in the parent dictionary \n actions[scr[0]] = scr[1] #save the action reuired (scr[1]) to get to the current state/successor (scr[0])\n\n return path #if goal is not found return an empty list\n util.raiseNotDefined()\n\n util.raiseNotDefined()", "title": "" }, { "docid": "10d32620579ac879bc4daa21c5282bf6", "score": "0.62027085", "text": "def UCS(graph, edges, edge_id, start, goal):\n priQueue = PriorityQueue()\n\n priQueue.put((0, start, [start])) # (priority, current_node, path, cost)\n\n\n closeList = []\n \n\n \n while priQueue:\n cost, v, path = priQueue.get()\n \n if v in closeList:\n continue\n\n if v == goal:\n pathGraph = path\n break\n\n graph[v][3] = yellow\n graphUI.updateUI()\n \n closeList.append(v)\n \n\n for w in graph[v][1]:\n if w not in closeList: \n\n #put in priority queue\n priQueue.put((\n cost + DistanceEdges(graph[v], graph[w]),\n w,\n path + [w]\n ))\n\n \n #set graphic\n edges[edge_id(v, w)] = [(v, w), white]\n graph[w][3] = red\n graphUI.updateUI()\n time.sleep(1)\n \n \n \n\n \n graph[v][3] = blue\n graphUI.updateUI()\n \n time.sleep(1) \n \n\n graph[start][3] = orange\n \n for i in range(0, len(pathGraph) - 1):\n edges[edge_id(pathGraph[i], pathGraph[i+1])] = [(pathGraph[i], pathGraph[i + 1]), green]\n \n #end\n graph[goal][3] = purple\n graphUI.updateUI() \n \n print(\"Implement Uniform Cost Search algorithm.\")\n pass", "title": "" }, { "docid": "d284b265c68d5e92b3efa200dec2fc56", "score": "0.6095197", "text": "def optimal(input_sets, output_set, idx_dict, memory_limit):\n\n full_results = [(0, [], input_sets)]\n for iteration in range(len(input_sets) - 1):\n iter_results = []\n\n # Compute all unique pairs\n comb_iter = tuple(itertools.combinations(range(len(input_sets) - iteration), 2))\n\n for curr in full_results:\n cost, positions, remaining = curr\n for con in comb_iter:\n\n # Find the contraction\n contract = helpers.find_contraction(con, remaining, output_set)\n new_result, new_input_sets, idx_removed, idx_contract = contract\n\n # Sieve the results based on memory_limit\n new_size = helpers.compute_size_by_dict(new_result, idx_dict)\n if new_size > memory_limit:\n continue\n\n # Build (total_cost, positions, indices_remaining)\n total_cost = cost + helpers.flop_count(idx_contract, idx_removed, len(con), idx_dict)\n new_pos = positions + [con]\n iter_results.append((total_cost, new_pos, new_input_sets))\n\n # Update combinatorial list, if we did not find anything return best\n # path + remaining contractions\n if iter_results:\n full_results = iter_results\n else:\n path = min(full_results, key=lambda x: x[0])[1]\n path += [tuple(range(len(input_sets) - iteration))]\n return path\n\n # Update list to iterate over\n full_results = iter_results\n\n # If we have not found anything return single einsum contraction\n if len(full_results) == 0:\n return [tuple(range(len(input_sets)))]\n\n path = min(full_results, key=lambda x: x[0])[1]\n return path", "title": "" }, { "docid": "e4fcd0899557c367ffd0e6c595357435", "score": "0.6079043", "text": "def optimal(input_sets, output_set, idx_dict, memory_limit):\n\n full_results = [(0, [], input_sets)]\n for iteration in range(len(input_sets) - 1):\n iter_results = []\n\n # Compute all unique pairs\n comb_iter = tuple(itertools.combinations(range(len(input_sets) - iteration), 2))\n\n for curr in full_results:\n cost, positions, remaining = curr\n for con in comb_iter:\n\n # Find the contraction\n contract = helpers.find_contraction(con, remaining, output_set)\n new_result, new_input_sets, idx_removed, idx_contract = contract\n\n # Sieve the results based on memory_limit\n new_size = helpers.compute_size_by_dict(new_result, idx_dict)\n if new_size > memory_limit:\n continue\n\n # Build (total_cost, positions, indices_remaining)\n total_cost = cost + helpers.flop_count(idx_contract, idx_removed, len(con), idx_dict)\n new_pos = positions + [con]\n iter_results.append((total_cost, new_pos, new_input_sets))\n\n # Update combinatorial list, if we did not find anything return best\n # path + remaining contractions\n if not iter_results:\n path = min(full_results, key=lambda x: x[0])[1]\n path += [tuple(range(len(input_sets) - iteration))]\n return path\n\n # Update list to iterate over\n full_results = iter_results\n\n # If we have not found anything return single einsum contraction\n if len(full_results) == 0:\n return [tuple(range(len(input_sets)))]\n\n path = min(full_results, key=lambda x: x[0])[1]\n return path", "title": "" }, { "docid": "ba00c5e051b8dc6c0dbdc507aeb7760f", "score": "0.60293686", "text": "def search(self):\n\t\tfor _ in range(0, self.timer):\n\t\t\t#print(_)\n\t\t\tsample_state = self.history.sample_belief()\n\t\t\tself.simulate(sample_state, self.history, 0)\n\t\t\t# optimal_action = self.history.find_optimal_action(self.c)\n\t\t\t# print(optimal_action)\n\n\t\toptimal_action = self.history.find_optimal_action_non_aug()\n\t\t#print(self.history.children[optimal_action].visited)\n\t\t#print(self.history.children[(0,0,1)].visited)\n\t\tprint(optimal_action)\n\t\tprint(self.history.children[optimal_action].value)\n\n\t\t# print(self.history.visited)\n\n\t\t# t = 0\n\t\t# for robotaction in self.history.children:\n\t\t# \tt = t + self.history.children[robotaction].visited\n\t\t# print(t)\n\n\t\ti = 0\n\t\tx = self.history\n\t\twhile len(x.children) > 0 and x != None:\n\t\t\t#print(i)\n\t\t\t# print(x)\n\t\t\t# print(x.children)\n\t\t\tx = x.children[(0, 0, 1)]\n\t\t\ti = i + 1\n\t\t\n\t\tself.history = self.history.children[optimal_action] #prune rest of search tree\n\n\n\t\t#THIS BUG WITH THE 16 LESS THING SHOULD BE FIXED\n\n\t\t# print(self.history.visited)\n\t\t# t = 0\n\t\t# for child in self.history.children:\n\t\t# \tt = t + sum(self.history.children[child].visited)\n\t\t# print(t)\n\n\t\treturn optimal_action", "title": "" }, { "docid": "d1ef71ddbfb8d91588769aac99adb6e4", "score": "0.601759", "text": "def second_chance(open_queue: list, visited_queue: list, best_so_far_name: str, cur_metric: str, main_dict: dict,\n node_max_att: int, cur_metric_name: str, best_so_far: dict, tol_error: float, greed_mult: float) -> list:\n\n res_arr = list()\n recover_best = True\n for elem in open_queue:\n if elem[2] == best_so_far_name[cur_metric_name]:\n recover_best = False\n break\n\n for elem in visited_queue: # elem structure: tot_dist, att, cur_name\n # we give node_max_att attempts for a node to make progress with different seed\n if (elem[1] < node_max_att and main_dict[elem[2]]['{}_to_goal'.format(cur_metric_name)] - best_so_far[cur_metric_name] < tol_error[cur_metric_name]): # \\\n # and elem[2] != best_so_far_name[cur_metric]:\n # or main_dict[elem[2]]['{}_to_goal'.format(cur_metric_name)] != best_so_far[cur_metric]:\n if elem[2] == best_so_far_name[cur_metric_name]:\n if recover_best:\n res_arr.append(elem)\n recover_best = False\n break\n else:\n if elem[1] > 1 and check_in_queue(open_queue, elem[2]):\n print('Not adding regular node (already in the queue)')\n else:\n res_arr.append(elem)\n print('Readding \"{}\" with attempt counter: {} and dist: {}'.format(elem[2], elem[1], elem[0]))\n\n elem = main_dict[best_so_far_name[cur_metric_name]]\n if recover_best:\n res_arr.append((elem['{}_dist_total'.format(cur_metric_name)] * greed_mult + elem['{}_to_goal'.format(cur_metric_name)],\n 0, best_so_far_name[cur_metric_name]))\n print('Recovering best')\n else:\n print('Not recovering best (already in the open queue)')\n del elem\n\n return res_arr", "title": "" }, { "docid": "b5caa169619e38c38815669260ea32ac", "score": "0.60173625", "text": "def Search(S,R,L,r,E,fold,p_tup,individual_bankrupt,strategy,concur,check_height): #searches squares within a given radius r to do R&D on. This R&D effort is given by E and if successful states are changed from 1 to 2\r\n m=int(S.shape[0])\r\n n=int(S.shape[1])\r\n p_win=[]\r\n win_col=[]\r\n order=range(0,n) #gives an array of n column values\r\n rand.shuffle(order) #randomizes the column order for the R&D search\r\n \r\n for j in order:\r\n BPF_y=j\r\n BPF_x=fold[j]\r\n if BPF_x>=0 and individual_bankrupt[j]==0: #ensures that there is a BPF point around with R&D can be conducted and the column has a budget\r\n # op.writeBPF(BPF_y,False,filename)\r\n # op.writeBPF(BPF_x,True,filename)\r\n x_val,y_val,num_sites=Search_Index(m,n,BPF_x,BPF_y,r,L,strategy)\r\n if concur == True:\r\n temp_sequence = sorted(rg.generate_sequence(j,n,5))\r\n for ele in temp_sequence:\r\n if fold[ele] >= 0:\r\n # op.writeBPF(ele,False,filename)\r\n # op.writeBPF(fold[ele],True,filename)\r\n x_temp_val,y_temp_val,temp_num_sites = Search_Index(m,n,fold[ele],ele,r,L,strategy)\r\n for elex in x_temp_val:\r\n x_val.append(elex)\r\n for eley in y_temp_val:\r\n y_val.append(eley)\r\n num_sites = num_sites + temp_num_sites\r\n '''\r\n only search for the max bpf\r\n '''\r\n if check_height == True:\r\n temp_sqeuence = sorted(rg.generate_sequence(j,n,5))\r\n candidate = []\r\n for ele in temp_sequence:\r\n candidate.append((ele,fold[ele]))\r\n max_candidate = max(candidate, key=lambda item: item[0])\r\n x_val,y_val,num_sites = Search_Index(m,n,max_candidate[1],max_candidate[0],r,L)\r\n if num_sites>0:\r\n one_index=RD(x_val,y_val,num_sites,E[j],S,R,L)\r\n x_val=one_index[0]\r\n y_val=one_index[1]\r\n for v in range(len(one_index[0])):\r\n c=x_val[v]\r\n d=y_val[v]\r\n count=0\r\n for x in range(max(0,c-1),min(m,c+2)): #searches up and down\r\n if S[x,d]==2:\r\n S[c,d]=2\r\n L[c,d]=S[c,d]\r\n if (c,d) in p_tup and ((c,d) not in p_win):\r\n p_win.append((c,d))\r\n win_col.append(j)\r\n count=1\r\n for y in range(max(0,d-1),min(d+2,n)): #searches left and right\r\n if S[c,y]==2:\r\n S[c,d]=2\r\n L[c,d]=S[c,d]\r\n if (c,d) in p_tup and ((c,d) not in p_win):\r\n p_win.append((c,d))\r\n win_col.append(j)\r\n count=1\r\n if count==1:\r\n twos_i=[c]\r\n twos_j=[d]\r\n Y=twocheck(S,L,twos_i,twos_j,p_tup,p_win,win_col,j,strategy) #searches like a chain for further changes to state 2\r\n S=Y[0]\r\n p_tup=Y[1]\r\n p_win=Y[2]\r\n win_col=Y[3]\r\n\r\n return p_win,win_col #array of tuples corresponding to the 'prizes' discovered during this round of Search and win_col is an array of the columns that were doing search when the prizes were found\r", "title": "" }, { "docid": "07067630ddff6869b85d8fc1bd884ed5", "score": "0.6002688", "text": "def test_global_search(self):\n solutions = {'Gutmann' : [2.433653775898717, 4.0, 5.288279914554452],\n 'MSRSM' : [9.6569123529739933, 1, 2.0364710264329515]}\n target_val = -0.1\n dist_weight = 0.5\n for algorithm in RbfoptSettings._allowed_algorithm:\n for method in RbfoptSettings._allowed_global_search_method:\n self.settings.algorithm = algorithm\n self.settings.global_search_method = method\n ref = solutions[algorithm]\n sol = aux.global_search(\n self.settings, self.n, self.k, self.var_lower,\n self.var_upper, self.integer_vars, None, self.node_pos, \n self.rbf_lambda, self.rbf_h, self.Amat, target_val,\n dist_weight, min(self.node_val), max(self.node_val))\n for i in range(self.n):\n tolerance = (self.var_upper[i] - self.var_lower[i])*0.2\n if (algorithm == 'MSRSM' and method == 'solver'):\n # We increase the tolerance for this combination\n # because we know it is really poor\n tolerance *= 5\n lb = ref[i] - tolerance\n ub = ref[i] + tolerance\n msg_lb = ('Lb not satisfied on var {:d}: '.format(i) +\n 'lb {:f} solution {:f} '.format(lb, sol[i]) +\n 'alg {:s} '.format(algorithm) +\n 'method {:s} '.format(method)) \n msg_ub = ('Ub not satisfied on var {:d}: '.format(i) +\n 'ub {:f} solution {:f} '.format(ub, sol[i]) +\n 'alg {:s} '.format(algorithm) +\n 'method {:s} '.format(method))\n self.assertLessEqual(lb, sol[i], msg=msg_lb)\n self.assertLessEqual(sol[i], ub, msg=msg_ub)\n for i in self.integer_vars:\n msg=('Variable {:d} not integer in solution'.format(i)\n + ' alg {:s} '.format(algorithm) +\n 'method {:s}'.format(method))\n self.assertAlmostEqual(abs(sol[i] - round(sol[i])), 0.0,\n msg=msg)", "title": "" }, { "docid": "09fccc1777514608f5830e27017480f9", "score": "0.59875757", "text": "def stochastic_local_search(k,n):\n\t# import ipdb\n\t# ipdb.set_trace(context=10)\n\tmax_tries, max_steps = 30, 100\n\tprobability = 0.3\n\tcode_pool_length = len(dna_codes)\n\tbest_S = []\n\tfor i in range(max_tries):\n\t\tS = [[dna_codes[np.random.randint(code_pool_length)] for x in range(n)] for x in range(k)]\n\t\tbest_S = list(S)\n\t\tfor j in range(max_steps):\n\t\t\tif check_constraints(S):\n\t\t\t\treturn (S, count_conflict_constraints(S))\n\t\t\tnot_sat_constraints = count_conflict_constraints(S)\n\t\t\t# randomly select 2 words that violate one of the constraints\n\t\t\tindex1, index2=np.random.randint(k), np.random.randint(k)\n\t\t\tw1, w2 = S[index1], S[index2]\n\t\t\twhile check_constraints([w1,w2]):\n\t\t\t\tindex1, index2=np.random.randint(k), np.random.randint(k)\n\t\t\t\tw1, w2 = S[index1], S[index2]\n\t\t\t# all words by subst one base\n\t\t\tM1, M2 = all_substitutes_one_char(w1), all_substitutes_one_char(w2)\n\t\t\tMunion = M1 + M2\n\t\t\tw = None\n\t\t\tif np.random.uniform() < probability:\n\t\t\t\ttemp = np.random.randint(len(Munion))\n\t\t\t\tw = Munion[temp]\n\t\t\telse:\n\t\t\t\tmax_violations_resolved = 0\n\t\t\t\tfor word in Munion:\n\t\t\t\t\tcopy_S = list(S)\n\t\t\t\t\tif word in M1:\n\t\t\t\t\t\tcopy_S[index1] = word\n\t\t\t\t\telse:\n\t\t\t\t\t\tcopy_S[index2] = word\n\t\t\t\t\tnew_sat = count_conflict_constraints(copy_S)\n\t\t\t\t\tif not_sat_constraints - new_sat >= max_violations_resolved:\n\t\t\t\t\t\tmax_violations_resolved = new_sat\n\t\t\t\t\t\tw = list(word)\n\t\t\tassert w is not None\n\t\t\tif w in M1:\n\t\t\t\tS[index1] = w\n\t\t\telse:\n\t\t\t\tS[index2] = w\n\t\t\tif count_conflict_constraints(S) < count_conflict_constraints(best_S):\n\t\t\t\tbest_S = list(S)\n\treturn (best_S, count_conflict_constraints(best_S))", "title": "" }, { "docid": "99ca1e532a0734d47376ee05edaf4351", "score": "0.59698653", "text": "def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n frontier = util.PriorityQueue() #similar method with ucs but the path(he costs) will be found with a heuristic function\n frontier.push(problem.getStartState(),heuristic(problem.getStartState(),problem))\n explored = []\n parent = {}\n parent[problem.getStartState()] = (0, 0, 0)\n while(True): #the usual technique as the rest\n if(frontier.isEmpty() == True):\n break\n else:\n current = frontier.pop()\n if(problem.isGoalState(current) is True):\n path = []\n cur_prev = parent[current]\n path.append(cur_prev[1])\n while parent[cur_prev[0]] != (0, 0, 0):\n path.append(parent[cur_prev[0]][1])\n cur_prev = parent[cur_prev[0]]\n\n solution = []\n for i in reversed(path):\n solution.append(i)\n return solution\n\n else:\n explored.append(current)\n for next_step in problem.getSuccessors(current):\n cost = next_step[2] + parent[current][2] + heuristic(next_step[0],problem) #the cost this time will be equal with the cost of the path till the current node(next_step) plus the current node's cost plus the heuristic of the current node\n if((next_step[0] not in [item[2]for item in frontier.heap]) and (next_step[0] not in explored)):\n frontier.push(next_step[0],cost)\n parent[next_step[0]] = (current, next_step[1], cost-heuristic(next_step[0],problem))\n elif(next_step[0] in [item[2]for item in frontier.heap]) and (next_step[0] not in explored) and (cost<parent[next_step[0]][2]+heuristic(current, problem)):\n frontier.update(next_step[0], cost) #if we found a better path to current node that the one that exists in frontier change the frontie to our new standars\n parent[next_step[0]] = (current, next_step[1], cost-heuristic(next_step[0],problem)) #also change the current node's parent dictionary\n util.raiseNotDefined()", "title": "" }, { "docid": "7c02addc50750e02e47b681fe2ac6bdf", "score": "0.594182", "text": "def algo(counts):", "title": "" }, { "docid": "4621e0d50c9dbc213fed3571ba112772", "score": "0.5936928", "text": "def findNext(G, dist, weights):\n\n min = 10000000000 \n for k in dist:\n node = k\n break\n\n for n in dist:\n if dist[n] < min: \n min = dist[n]\n node = n\n \n return [n,min]", "title": "" }, { "docid": "51b00c2966d3b800f95fe495c3619b6c", "score": "0.5930197", "text": "def greedy(input_sets, output_set, idx_dict, memory_limit):\n\n if len(input_sets) == 1:\n return [(0, )]\n elif len(input_sets) == 2:\n return [(0, 1)]\n\n # Build up a naive cost\n contract = helpers.find_contraction(range(len(input_sets)), input_sets, output_set)\n idx_result, new_input_sets, idx_removed, idx_contract = contract\n naive_cost = helpers.flop_count(idx_contract, idx_removed, len(input_sets), idx_dict)\n\n comb_iter = itertools.combinations(range(len(input_sets)), 2)\n iteration_results = []\n\n path_cost = 0\n path = []\n\n for iteration in range(len(input_sets) - 1):\n\n # Iterate over all pairs on first step, only previously found pairs on subsequent steps\n for positions in comb_iter:\n\n # Always initially ignore outer products\n if input_sets[positions[0]].isdisjoint(input_sets[positions[1]]):\n continue\n\n result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost,\n naive_cost)\n if result is not None:\n iteration_results.append(result)\n\n # If we do not have a inner contraction, rescan pairs including outer products\n if len(iteration_results) == 0:\n\n # Then check the outer products\n for positions in itertools.combinations(range(len(input_sets)), 2):\n result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit,\n path_cost, naive_cost)\n if result is not None:\n iteration_results.append(result)\n\n # If we still did not find any remaining contractions, default back to einsum like behavior\n if len(iteration_results) == 0:\n path.append(tuple(range(len(input_sets))))\n break\n\n # Sort based on first index\n best = min(iteration_results, key=lambda x: x[0])\n\n # Now propagate as many unused contractions as possible to next iteration\n iteration_results = _update_other_results(iteration_results, best)\n\n # Next iteration only compute contractions with the new tensor\n # All other contractions have been accounted for\n input_sets = best[2]\n new_tensor_pos = len(input_sets) - 1\n comb_iter = ((i, new_tensor_pos) for i in range(new_tensor_pos))\n\n # Update path and total cost\n path.append(best[1])\n path_cost += best[0][1]\n\n return path", "title": "" }, { "docid": "60a8709f0ade8bed333b0d3f40f9443f", "score": "0.59084076", "text": "def greedy(input_sets, output_set, idx_dict, memory_limit):\n\n # Build up a naive cost\n contract = helpers.find_contraction(range(len(input_sets)), input_sets, output_set)\n idx_result, new_input_sets, idx_removed, idx_contract = contract\n naive_cost = helpers.flop_count(idx_contract, idx_removed, len(input_sets), idx_dict)\n\n comb_iter = itertools.combinations(range(len(input_sets)), 2)\n iteration_results = []\n\n path_cost = 0\n path = []\n\n for iteration in range(len(input_sets) - 1):\n\n # Iterate over all pairs on first step, only previously found pairs on subsequent steps\n for positions in comb_iter:\n\n # Always initially ignore outer products\n if input_sets[positions[0]].isdisjoint(input_sets[positions[1]]):\n continue\n\n result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost,\n naive_cost)\n if result is not None:\n iteration_results.append(result)\n\n # If we do not have a inner contraction, rescan pairs including outer products\n if len(iteration_results) == 0:\n\n # Then check the outer products\n for positions in itertools.combinations(range(len(input_sets)), 2):\n result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit,\n path_cost, naive_cost)\n if result is not None:\n iteration_results.append(result)\n\n # If we still did not find any remaining contractions, default back to einsum like behavior\n if len(iteration_results) == 0:\n path.append(tuple(range(len(input_sets))))\n break\n\n # Sort based on first index\n best = min(iteration_results, key=lambda x: x[0])\n\n # Now propagate as many unused contractions as possible to next iteration\n iteration_results = _update_other_results(iteration_results, best)\n\n # Next iteration only compute contractions with the new tensor\n # All other contractions have been accounted for\n input_sets = best[2]\n new_tensor_pos = len(input_sets) - 1\n comb_iter = ((i, new_tensor_pos) for i in range(new_tensor_pos))\n\n # Update path and total cost\n path.append(best[1])\n path_cost += best[0][1]\n\n return path", "title": "" }, { "docid": "19d72a83a94292a55394e0cd39c59f3b", "score": "0.59030193", "text": "def costfunction(A):\n global M\n global eps\n global dim\n #global dis\n #return np.sum(A)\n m=int(len(A)/dim)\n X=A.reshape(m,dim)\n# dis=cosine_similarity(X)\n# [email protected]\n cost=0\n\n for i in range(0, m):\n for j in range(i+1, m):\n #print(\"Xi==\",X[i])\n diff=np.square( X[j][0]-X[i][0]) + np.square( X[j][1]-X[i][1])\n if diff<eps:\n d=0\n else:\n d= np.sqrt(diff)\n #print(\"diss\",d)\n if abs(d - M[i][j]) > eps:\n cost = cost+ np.square(d - M[i][j])\n #dis[j][i]=dis[i][j]\n #dis = euclidean_distances(X)\n #cost1=np.square(dis-M)\n #cost=np.sum(cost1)\n return cost", "title": "" }, { "docid": "93bdda06825f1d71dd8245cacbf81ecf", "score": "0.5897924", "text": "def a_star_search(problem):\n return best_first_search(problem,\n lambda n: n.path_cost + problem.heuristic(n))", "title": "" }, { "docid": "624957506cf2cd7eb9eb7e1f90ed6229", "score": "0.5897896", "text": "def my_algorithm():\n\n data_delete = random.sample(range(0, 200), 50)\n data_delete.sort()\n # Delete data\n for index in reversed(data_delete):\n del arr_data[index]\n\n subset = set() # saves parent set\n best_accuracy = 0.0\n best_subset = set() # saves best set\n for i in range(1, len(arr_data[0])):\n print('Level: '+ str(i))\n accuracy = 0.0\n best_set = set() # saves current best temp set\n for j in range(1, len(arr_data[0])):\n temp_set = subset.copy() # saves set to test\n temp_set.add(j)\n if subset == temp_set:\n continue\n calc_accuracy = nearest_neighbor_loocv(temp_set)\n if calc_accuracy > accuracy:\n accuracy = calc_accuracy\n best_set = temp_set.copy()\n subset = best_set.copy()\n if accuracy > best_accuracy:\n best_accuracy = accuracy\n best_subset = subset.copy()\n\n print('Finished search!! The best feature subset is ' + str(best_subset) + ', which has an accuracy of '\n + str(best_accuracy * 100) + '%')", "title": "" }, { "docid": "fc01af2d3a7cda371f5665b16118d870", "score": "0.58817554", "text": "def main():\r\n arr = [7, 3, 2, 1, 4, 6, 8, 9, 10, 11, 0]\r\n # time complexity is O(n^2)\r\n find_pairs(arr, 11)\r\n\r\n # another approach\r\n print(\"Another approach using python dictionary\")\r\n find_pairs_optimal_approach(arr, 11)\r\n\r\n print(\"Solving it using Binary Search\")\r\n # Another approach\r\n find_pairs_ii(arr, 11)", "title": "" }, { "docid": "4d73f3705baed242e488b842cd7cab2c", "score": "0.58798856", "text": "def weighted_global_efficiency(matrix):\n n_nodes = len(matrix)\n min_distances = weighted_shortest_path(matrix)\n\n sum_vector = np.empty(n_nodes)\n for i in range(n_nodes):\n # calculate the inner sum\n sum_vector[i] = (1/(n_nodes-1)) * np.sum([1 / min_distances[i, j] for j in range(n_nodes) if j != i])\n\n return (1/n_nodes) * np.sum(sum_vector)", "title": "" }, { "docid": "f4f5f279d580a40d73f4d2f7532a66cd", "score": "0.5879796", "text": "def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n b = util.PriorityQueue()\n n = problem.getStartState()\n b.push(n, 0)\n vis = {}\n p = {}\n vis[n] = 0\n p[n] = []\n while not b.isEmpty():\n n = b.pop()\n if problem.goalTest(n):\n return p[n]\n for action in problem.getActions(n):\n c_node = problem.getResult(n, action)\n cost_c_node = problem.getCostOfActions(p[n] + [action]) + heuristic(c_node, problem)\n if not c_node in vis or vis[c_node] > cost_c_node:\n p[c_node] = p[n] + [action]\n vis[c_node] = cost_c_node\n b.push(c_node, cost_c_node)\n\n util.raiseNotDefined()", "title": "" }, { "docid": "1e80005e47de2d43539b9c771b910a75", "score": "0.58792466", "text": "def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n # Here we implement the aStarSearch by first creating a priority queue, 2 lists and 2 dictionaries.\n # The priority queue is the fringe that contains the still unexpanded nodes (that can be visited) ordered by the\n # least estimated total cost of path through a specific node to goal(=f).\n # The list closed contains the nodes that have been visited.\n # The list actions contains the results.\n # The dictionary path_cost contains the cost of the path.\n # The dictionary dict contains all the child nodes with the parent node as a reference.\n # It checks the same conditions as UCS. The only difference is that in the else-statement it calculates the\n # the current (up to now) cost path (= g) to calculate the cost of f (= g + h(given heuristic)) and then pushes f and the node on\n # the fringe if the node has successors.\n # It also puts the new value of cost in the path_cost list to update the list.\n startState = (problem.getStartState(), 0, 0)\n fringe = util.PriorityQueue()\n closed = []\n dict = {}\n path_cost = {}\n path_cost[startState] = startState[2]\n actions = []\n fringe.push(startState, 0)\n\n\n while not fringe.isEmpty():\n node = fringe.pop()\n\n if node[0] not in closed:\n\n if problem.isGoalState(node[0]):\n current = node\n while (current != startState):\n actions.append(current[1])\n current = dict[current]\n actions.reverse()\n return actions\n\n else:\n closed.append(node[0])\n succ = problem.getSuccessors(node[0])\n if not succ:\n continue\n for s in succ:\n cost = path_cost[node] + s[2]\n fcost = cost + heuristic(s[0], problem)\n fringe.push(s, fcost)\n if node in dict.keys() and s in dict.values():\n continue\n dict[s] = node\n path_cost[s] = cost\n util.raiseNotDefined()", "title": "" }, { "docid": "cbbcd4c68e092e3d1b3e4c1c08b70e12", "score": "0.58749205", "text": "def attack(self, data, target):\n B, K = data.shape[:2]\n data = data.float().cuda().detach()\n data = data.transpose(1, 2).contiguous() # [B, 3, K]\n ori_data = data.clone().detach() # [B, 3, K]\n ori_data.requires_grad = False\n target = target.long().cuda().detach() # [B]\n label_val = target.detach().cpu().numpy() # [B]\n\n # weight factor for budget regularization\n lower_bound = np.zeros((B,))\n upper_bound = np.ones((B,)) * self.max_weight\n current_weight = np.ones((B,)) * self.init_weight\n\n # record best results in binary search\n o_bestdist = np.array([1e10] * B)\n o_bestscore = np.array([-1] * B)\n o_bestattack = np.zeros((B, 3, self.num_add * self.cl_num_p))\n\n # init clusters on vulnerable regions!\n # clusters is np.array of shape [B, num_add, cl_num_p, 3]\n clusters = self._init_centers(ori_data, target)\n clusters = torch.from_numpy(clusters).float().cuda()\n clusters = clusters.view(B, self.num_add * self.cl_num_p, 3)\n # to [B, 3, self.num_add * self.cl_num_p]\n clusters = clusters.transpose(1, 2).contiguous()\n\n # perform binary search\n for binary_step in range(self.binary_step):\n # init with critical points!\n adv_data = clusters + torch.randn(\n (B, 3, self.num_add * self.cl_num_p)).cuda() * 1e-7\n adv_data.requires_grad_() # [B, 3, num_add * cl_num_p]\n bestdist = np.array([1e10] * B)\n bestscore = np.array([-1] * B)\n opt = optim.Adam([adv_data], lr=self.attack_lr, weight_decay=0.)\n\n adv_loss = torch.tensor(0.).cuda()\n dist_loss = torch.tensor(0.).cuda()\n\n total_time = 0.\n forward_time = 0.\n backward_time = 0.\n update_time = 0.\n\n for iteration in range(self.num_iter):\n t1 = time.time()\n\n # forward passing\n # concat added clusters with real pc!\n cat_data = torch.cat([ori_data, adv_data], dim=-1)\n logits = self.model(cat_data) # [B, num_classes]\n if isinstance(logits, tuple): # PointNet\n logits = logits[0]\n\n t2 = time.time()\n forward_time += t2 - t1\n\n # print\n pred = torch.argmax(logits, dim=-1) # [B]\n success_num = (pred == target).sum().item()\n if iteration % (self.num_iter // 5) == 0:\n print('Step {}, iteration {}, success {}/{}\\n'\n 'adv_loss: {:.4f}, dist_loss: {:.4f}'.\n format(binary_step, iteration, success_num, B,\n adv_loss.item(), dist_loss.item()))\n\n # record values!\n dist_val = self.dist_func(\n adv_data.transpose(1, 2).contiguous(),\n ori_data.transpose(1, 2).contiguous(),\n batch_avg=False).detach().cpu().numpy() # [B]\n pred_val = pred.detach().cpu().numpy() # [B]\n input_val = adv_data.detach().cpu().numpy() # [B, 3, K]\n\n # update binary search\n for e, (dist, pred, label, ii) in \\\n enumerate(zip(dist_val, pred_val, label_val, input_val)):\n if dist < bestdist[e] and pred == label:\n bestdist[e] = dist\n bestscore[e] = pred\n if dist < o_bestdist[e] and pred == label:\n o_bestdist[e] = dist\n o_bestscore[e] = pred\n o_bestattack[e] = ii\n\n t3 = time.time()\n update_time += t3 - t2\n\n # compute loss and backward\n adv_loss = self.adv_func(logits, target).mean()\n dist_loss = self.dist_func(\n adv_data.transpose(1, 2).contiguous(),\n ori_data.transpose(1, 2).contiguous(),\n weights=torch.from_numpy(current_weight)).mean()\n loss = adv_loss + dist_loss\n opt.zero_grad()\n loss.backward()\n opt.step()\n\n t4 = time.time()\n backward_time += t4 - t3\n total_time += t4 - t1\n\n if iteration % 100 == 0:\n print('total: {:.2f}, for: {:.2f}, '\n 'back: {:.2f}, update: {:.2f}'.\n format(total_time, forward_time,\n backward_time, update_time))\n total_time = 0.\n forward_time = 0.\n backward_time = 0.\n update_time = 0.\n torch.cuda.empty_cache()\n\n # adjust weight factor\n for e, label in enumerate(label_val):\n if bestscore[e] == label and bestscore[e] != -1 and bestdist[e] <= o_bestdist[e]:\n # success\n lower_bound[e] = max(lower_bound[e], current_weight[e])\n current_weight[e] = (lower_bound[e] + upper_bound[e]) / 2.\n else:\n # failure\n upper_bound[e] = min(upper_bound[e], current_weight[e])\n current_weight[e] = (lower_bound[e] + upper_bound[e]) / 2.\n\n torch.cuda.empty_cache()\n\n # end of CW attack\n # fail to attack some examples\n # just assign them with last time attack data\n fail_idx = (lower_bound == 0.)\n o_bestattack[fail_idx] = input_val[fail_idx] # [B, 3, num]\n # return final results\n success_num = (lower_bound > 0.).sum()\n print('Successfully attack {}/{}'.format(success_num, B))\n\n # concatenate add and ori data\n ori_data = ori_data.detach().cpu().numpy() # [B, 3, K]\n o_bestattack = np.concatenate([ori_data, o_bestattack], axis=-1)\n return o_bestdist, o_bestattack.transpose((0, 2, 1)), success_num", "title": "" }, { "docid": "3cdfaa015885c0a24e88a3858e65eabe", "score": "0.5866124", "text": "def a_star_search(problem, heuristic):\n pq = PriorityQueue()\n\n init_cost, init_node = problem.initial_state()\n\n # put start state on the priority queue\n pq.put((init_cost, init_node))\n\n parent_map = {}\n\n explored_nodes = set()\n\n while not pq.empty():\n # pop node off of the priority queue\n cur_node_cost, cur_node = pq.get()\n print 'cur_node_cost, cur_node', cur_node_cost, cur_node\n\n # check if this is the goal node\n if problem.goal_test(cur_node):\n return path_from_start(cur_node, parent_map)\n\n # if not, acquire the list of actions for that state.\n actions = problem.actions(cur_node) # [(2, node1), (4, node2)]\n\n parent_heuristic_cost = heuristic(cur_node)\n print 'parent_heuristic_cost', parent_heuristic_cost\n\n for action in actions:\n transition_cost, child_node = action ####\n print 'transition_cost, child_node', transition_cost, child_node\n\n # don't explore nodes more than once\n if child_node in explored_nodes:\n continue\n\n child_heuristic_cost = heuristic(child_node)\n\n # create a map from the child to the current node\n parent_map[child_node] = cur_node\n current_cost = cur_node_cost - parent_heuristic_cost + transition_cost + child_heuristic_cost\n\n pq.put((current_cost, child_node))\n\n # put the current node on the list of explored, once we have\n # explored the available actions for this node\n explored_nodes.add(cur_node)\n\n # goal not found\n return None\n\n\n # f(n) = g(n) + h(n) = \"backwards cost\" + \"forward cost\"\n # suppose \"parent\" of n is m.. transitioncost = cost of going from m to n\n # path taken to get to n = m1 -> m2 -> m3 ... -> x -> m\n # g(n) = tc(m1, m2) + tc(m2, m3) + .... + tc(x, m)\n #\n # suppose m is the node we visited immediately before n.. \"m parent of n\"\n # f(n) = .... f(m) ...\n # f(n) = g(n) + h(n)\n # f(n) = g(m)[start state to m] + transition(m -> n)[m to n] + h(n)[heuristic from n to goal]\n #\n # f(m) = g(m) + h(m) << by definition\n # f(m) - h(m) = g(m) + h(m) - h(m)\n # f(m) - h(m) = g(m) + 0\n # g(m) = f(m) - h(m) << by algebra from previous line\n #\n # line 59 and line 64:\n # f(n) = g(m) + tc(m -> n) + h(n) << line 59\n # f(n) = (f(m) - h(m))[g(m)] + tc(m -> n) + h(n) << using expression of g(m) from line 64\n\n # f(n) are the priorities in the PQ\n\n # h(m) =\n # tc(m -> n) =\n # h(n) =\n # f(m) = \"priority of m in the PQ\"", "title": "" }, { "docid": "4fb4b35486663dbf1284461aff50554e", "score": "0.58585584", "text": "def astar_search(self, graph, start = (start_x, start_y),\n goal = (goal_x, goal_y)):\n \n print(\"\\n__________________ A-Star (A*) SEARCH STARTED __________________\\n\") \n #* START THE TIMER\n start_time = timeit.default_timer()\n start_process_time = process_time()\n # SET THE START AND GOAL VALUES\n self.start = start\n print(f'The Start Node is located at: {self.start}')\n self.goal = goal\n print(f'The Goal Node is located at: {self.goal}')\n # IMPORT THE QUEUE TO PUT THE NODES\n self.frontier = world.createWorld.PriorityQueue()\n #* Put the nodes on the Frontier with cost 0\n self.frontier.put(vec_to_int(self.start), 0)\n #* Starts the Path Dictionary\n self.path = {}\n #* Starts the Cost Dictionary\n self.cost = {}\n # THE START IS NONE SINCE IS WERE WE ARE\n self.path[vec_to_int(self.start)] = None\n self.cost[vec_to_int(self.start)] = 0\n #? Init the While Interactions Variable\n self.while_interactions = 0\n while not self.frontier.empty():\n #? Add 1 interaction for every loop\n self.while_interactions += 1\n #* The next one will be the one with lowest cost\n self.current = self.frontier.get()\n #* If the goal is reached break\n if self.current == self.goal:\n break\n #* Find the neighbors of the current node\n #? Init the For Interactions Variable\n self.for_interactions = 0\n for next in graph.find_neighbors(vec(self.current)):\n #? Add 1 interaction for every loop\n self.for_interactions += 1\n next = vec_to_int(next)\n #* The cost is the atual cost plus the cost to move to the next node\n self.next_cost = self.cost[self.current] + graph.cost(self.current, next)\n #* If not in the cost or have a lower cost\n if next not in self.cost or self.next_cost < self.cost[next]:\n #* Update the values\n self.cost[next] = self.next_cost\n #? Instead of the Dijkstra, the priority will be the heuristic function\n self.priority = self.next_cost + manhattan_distance(self.goal, vec(next))\n #* Put in the priority\n self.frontier.put(next, self.priority)\n #* Put in the path vector\n self.path[next] = vec(self.current) - vec(next)\n #* Stop the Default Timer (Wall Timer)\n stop_time = timeit.default_timer()\n #* Stop the Process Timer (Wall Timer)\n stop_process_time = process_time()\n # PRINT ALL THE VISITED NODES\n print(f\"\\nThe A* Search Path Available Nodes Movement are:\\n{self.path}\")\n print(f\"\\nThe A* Search Path have: {len(self.path)} Available Nodes\")\n print(f\"\\nThe A* Search Path 'While Loop' Interactions was: {self.while_interactions}\")\n print(f\"\\nThe A* Search Path 'For Loop' Interactions was: {self.for_interactions}\")\n print(\"\\nThe A* Search Path 'Wall time' was \", stop_time - start_time, 'sec')\n print(\"\\nThe A* Search Path 'Process Time' was \",\n stop_process_time - start_process_time, 'sec\\n')\n \n return self.path", "title": "" }, { "docid": "fa6075174e787b36e2bc81c46a0997f3", "score": "0.58314157", "text": "def execute_search(self):\n for i in range(1, self.t_bounds[1]):\n self.t = i\n counter = 0\n for p in self.swarm:\n p.update_inertia(self.gbinary)\n p.update_velocity(self.gbest, self.abest)\n p.update_position()\n p.update_binary_position()\n self.var_by_time['velocities'][i, counter] = p.v.mean()\n f, f_score = self.eval_fitness(p.b)\n if f > p.p_fitness:\n p.pbest = p.x.copy()\n p.pbinary = p.b.copy()\n p.p_fitness = f\n p.p_score = f_score\n if f > self.g_fitness:\n # -1 because the counter should be 0 during the\n # next comparison to updated positions. The other\n # 2 resets (shuffle_gbest and init) happen after\n # the \"self.gbest_counter += 1\" line. This one\n # happens before.\n self.gbest_counter = -1\n self.gbest = p.x.copy()\n self.gbinary = p.b.copy()\n self.g_fitness = f\n self.g_score = f_score\n counter += 1\n if self.g_fitness > self.a_fitness:\n self.abest = self.gbest.copy()\n self.abinary = self.gbinary.copy()\n self.a_fitness = self.g_fitness\n self.a_score = self.g_score\n self.gbest_counter += 1\n if self.gbest_counter >= 3:\n self.shuffle_gbest()\n self.var_by_time['num_features'][i] = np.count_nonzero(self.abinary)\n self.var_by_time['g_fitness'][i] = self.g_fitness\n self.var_by_time['g_score'][i] = self.g_score\n self.var_by_time['a_fitness'][i] = self.a_fitness\n self.var_by_time['a_score'][i] = self.a_score", "title": "" }, { "docid": "9d1074580491823d73dbc67ec8debd54", "score": "0.5829396", "text": "def dynamic_programming(graph, start, goal):\n # TODO IF COSTS ARE ALREADY CALCULATED THEN RETURN THEM\n distances = np.full((graph.shape[0],1), fill_value=float(\"Inf\"))\n predcs = np.zeros((graph.shape[0],1))\n distances[start] = 0\n for i in range(len(graph)): # performing 18 times\n for row in range(len(graph)-1):\n successors = np.nonzero(graph[row,:])[0]\n for node in successors:#get_successors(row, ad_matrix):\n #breakpoint()\n if distances[row] != float(\"Inf\") and distances[row] + graph[row, node] < distances[node]:\n distances[node] = distances[row] + graph[row,node]\n predcs[node] = row\n path = [goal]\n while goal != start:\n #breakpoint()\n van = predcs[int(goal)][0]\n path.append(int(van))\n goal = van\n path = np.array(path)\n return path[::-1]\n #return deque()", "title": "" }, { "docid": "a1b4d475095bacc91d34466ac24330d7", "score": "0.58270264", "text": "def naiveSearch(self):\n n = len(self.graph)\n minvc = n # minimum vertex cover\n for i in list(itertools.product(*[\"01\"] *n)):\n if vertexcover.validityCheck(ins, i):\n counter = 0\n for j in i:\n if j == \"1\":\n counter += 1\n minvc = min(counter, minvc)", "title": "" }, { "docid": "1dfdbf2be8fc97152c4562b3b09f9de4", "score": "0.5821439", "text": "def SCO(S, N, ς, w, B, MaxTry=5, T=10):\n time_start = timeit.default_timer()\n \n Y_SET = {0: [np.random.uniform(B[0], B[1]) for _ in range(N)]}\n X_SET = {}\n X_best = {}\n V = {}\n t = 0\n N_elite = int(np.ceil(N * ς))\n \n R = np.zeros(N_elite) \n σ = {} \n I_SET = np.arange(N_elite)\n \n B_i = np.concatenate((np.ones(N - N_elite), np.zeros(2*N_elite - N)))\n \n best = (None, -np.inf)\n while True:\n print(f\"ITERATION {t}...\")\n S_X = np.array([S(X) for X in Y_SET[t]])\n idx = np.argsort(S_X)[::-1][:N_elite]\n S_X = S_X[idx]\n X = [Y_SET[t][i] for i in idx]\n X_SET[t+1] = X\n V[t+1] = S_X[0]\n X_best[t+1] = X[0].copy()\n \n np.random.shuffle(B_i) \n \n for i in range(N_elite):\n R[i] = int(np.floor(N / N_elite) + B_i[i]) # random splitting factor\n Y = X[i].copy()\n Y_dash = Y.copy()\n \n for j in range(int(R[i])):\n I = np.random.choice(I_SET[I_SET != i])\n σ[i] = w * np.abs(X[i] - X[I])\n μ = np.random.permutation(2)\n \n for Try in range(MaxTry): # optimising the threshold\n Z = np.random.normal()\n Y_dash[μ[0]] = max(0, min(100, Y[μ[0]] + σ[i][μ[0]] * Z))\n if S(Y_dash) > S(Y):\n Y = Y_dash.copy()\n break\n \n for Try in range(MaxTry): # optimising the quantile\n Z = np.random.normal()\n Y_dash[μ[1]] = Y[μ[1]] + σ[i][μ[1]] * Z\n if S(Y_dash) > S(Y):\n Y = Y_dash.copy()\n break\n \n if Y_SET.get(t+1) == None:\n Y_SET[t+1] = []\n Y_SET[t+1].append(Y.copy())\n \n t = t + 1\n if V[t] > best[1]:\n best = (X_best[t], V[t])\n print(f\"Best value: {best[1]}\")\n print(f\"Best threshold: {best[0][0]}\")\n print(f\"Best quantile: {best[0][1]}\")\n \n if t == T:\n break\n \n time_stop = timeit.default_timer()\n print(f\"\\nTerminated after {time_stop - time_start} seconds.\")\n \n return (X_best, V)", "title": "" }, { "docid": "3289f2d5658297d958c8558634be87ef", "score": "0.5811212", "text": "def global_search(self, X, i):\n for j in range(self.d):\n self.V[i][j] = self.V[i][j] + (self.solutions[i][j] - self.best[j]) * self.Q[i]\n X[i][j] = np.clip(self.solutions[i][j] + self.V[i][j],\n self.lower_bound, self.upper_bound)", "title": "" }, { "docid": "6aafd62aab9a1aaf104e8c01f66c8006", "score": "0.58087957", "text": "def ctr(graph,budget,removable,hidden):\n\n\tscore_removable={i:0 for i in removable}\n\n\thiddens_per_node={i:0 for i in G}\n\tfor (u_h,v_h) in hidden:\n\t\thiddens_per_node[u_h]+=1\n\t\thiddens_per_node[v_h]+=1\n\t\tCN_uh_vh=nx.common_neighbors(G,u_h,v_h)\n\t\tfor w in CN_uh_vh:\n\t\t\ta=min(u_h,w)\n\t\t\tb=max(u_h,w)\n\t\t\tif (a,b) in score_removable:\n\t\t\t\tscore_removable[(a,b)]+=1\n\t\t\ta=min(v_h,w)\n\t\t\tb=max(v_h,w)\n\t\t\tif (a,b) in score_removable:\n\t\t\t\tscore_removable[(a,b)]+=1\n\tfor (u,v) in removable:\n\t\tif score_removable[(u,v)]==0:\n\t\t\tscore_removable.pop((u,v),None)\n\tremoved=[]\n\n\tfor i in range(budget):\n\t\tmax_score_nodes=None\n\t\tmax_score=0\n\t\tfor (u,v) in score_removable:\n\t\t\tif max_score<score_removable[(u,v)]:\n\t\t\t\tmax_score=score_removable[(u,v)]\n\t\t\t\tmax_score_nodes=(u,v)\n\t\tif max_score_nodes is None:\n\t\t\tprint(\"ERR!!! All scores are 0\")\n\t\t\texit(1)\n\t\tremoved.append(max_score_nodes)\n\t\tu,v=max_score_nodes\n\t\tfor (u_h,v_h) in hidden:\n\t\t\tif u==u_h:\n\t\t\t\tminn=min(v,v_h)\n\t\t\t\tmaxn=max(v,v_h)\n\t\t\telif v==v_h:\n\t\t\t\tminn=min(u,u_h)\n\t\t\t\tmaxn=max(u_h,u)\n\t\t\tif (minn,maxn) in score_removable:\n\t\t\t\tscore_removable[(minn,maxn)]-=1\n\t\t\tif score_removable[(minn,maxn)]==0:\n\t\t\t\tscore_removable.pop((minn,maxn),None)\n\treturn removed", "title": "" }, { "docid": "e039c21ab95e7bb3bbe7fc9ff08047cb", "score": "0.5808127", "text": "def bidirectionalMMsearch(problem, heuristic=nullHeuristic):\n #start point of the forward search\n curr_state_fwd = problem.getStartState()\n # start point of the backward search\n curr_state_bck = problem.getGoalState()\n # Lists to store the actions followed for the forward and backward searches\n path_fwd = []\n path_bck = []\n # dictonaries for states and their g values in the forward and backward direction\n g_fwd = {curr_state_fwd: 0}\n g_bck = {curr_state_bck: 0}\n # open and closed lists in the forward and backward directions\n open_fwd = [(curr_state_fwd, path_fwd)]\n open_bck = [(curr_state_bck, path_bck)]\n closed_fwd = []\n closed_bck = []\n # U = Cost of the cheapest solution found so far\n U = np.inf\n\n def search_dir(U, open1, open2, g1, g2, closed, dir):\n \"Search in the direction dir\"\n n, path = min_p_g(C, open1, g1, dir)\n open1.remove((n, path))\n closed.append((n, path))\n successor_list = problem.getSuccessors(n)\n for (c, next_direction, additional_cost) in successor_list:\n if found(open1, c) or found(closed, c):\n if g1[c] <= g1[n] + additional_cost:\n continue\n\n open1 = delete(open1, c)\n\n g1[c] = g1[n] + additional_cost\n open1.append((c, path + [next_direction]))\n #visited_states.add(c)\n if found(open2, c):\n U = min(U, g1[c] + g2[c])\n\n return U, open1, closed, g1\n\n def delete(open1, n):\n \"\"\"Delete state n from Open list open1\"\"\"\n for (c, path) in open1:\n if c == n:\n open1.remove((c, path))\n return open1\n\n def found(open1, n):\n \"\"\"Check if the state n is on the Open list open1\"\"\"\n for (c, path) in open1:\n if c == n:\n return True\n return False\n\n def choose_min_n(open1, g, dir):\n \"\"\"Function to find the minimum values of f and g\n for the states in the open list in the current direction\"\"\"\n prmin, prmin_F = np.inf, np.inf\n for (n, path) in open1:\n f = g[n] + heuristic(n, problem, dir)\n pr = max(f, 2 * g[n])\n prmin = min(prmin, pr)\n prmin_F = min(prmin_F, f)\n\n return prmin, prmin_F, min(g.values())\n\n def min_p_g(prmin, open1, g, dir):\n \"\"\"find prmin and gmin in open list\"\"\"\n m = np.inf\n node = problem.goal\n final_path = []\n for (n, path) in open1:\n pr = max(g[n] + heuristic(n, problem, dir), 2 * g[n])\n if pr == prmin:\n if g[n] < m:\n m = g[n]\n node = n\n final_path = path\n\n return node, final_path\n\n def getPath(open_fwd, open_bck):\n \"\"\"Get the optimal forward and backward path\"\"\"\n for (nf, path_fwd) in open_fwd:\n for (nb, path_bck) in open_bck:\n if(nf == nb):\n return path_fwd, path_bck\n #If no nodes are found to be common\n print('No common node found #SR')\n\n\n def opposite(path):\n \"\"\"Reverse the directions in the given path. This is used for the path from\n the goal node to the start node\"\"\"\n reversed_path = []\n for i in path:\n # Convert NORTH to SOUTH\n if i == 'North':\n reversed_path.append('South')\n # Convert SOUTH to NORTH\n elif i == 'South':\n reversed_path.append('North')\n # Convert EAST to WEST\n elif i == 'East':\n reversed_path.append('West')\n # Convert WEST to EAST\n else:\n reversed_path.append('East')\n #print('\\n Path_bck = {0}'.format(j))\n return reversed_path\n\n #while the open lists are not empty\n while open_fwd and open_bck:\n prmin_F, fmin_fwd, gmin_fwd = choose_min_n(open_fwd, g_fwd, 0)\n prmin_b, fmin_bck, gmin_bck = choose_min_n(open_bck, g_bck, 1)\n C = min(prmin_F, prmin_b)\n\n if U <= max(C, fmin_fwd, fmin_bck, gmin_fwd + gmin_bck + 1):\n \"\"\"The condition that indicates that the optimal solution has been found.\n The cost of the cheapest edge in this problem is 1\"\"\"\n \"\"\"\n totalOpenNodes = len(open_fwd) + len(open_bck) + 1\n totalClosedNodes = len(closed_fwd) + len(closed_bck)\n print('\\nTotal nodes expanded = {0}'.format(totalOpenNodes + totalClosedNodes))\n print(' (open nodes = {0} and closed nodes = {1})'.format(totalOpenNodes, totalClosedNodes))\n \"\"\"\n print('\\nPath length = {0}'.format(U))\n path_fwd, path_bck = getPath(open_fwd, open_bck)\n #print('\\n path_bck = {0}'.format(path_bck))\n path_bck = reversed(path_bck)\n #print('\\n Path_fwd = {0}'.format(path_fwd))\n if path_bck:\n path_fwd= path_fwd + opposite(path_bck)\n problem.isGoalState(problem.getGoalState())\n return path_fwd\n\n if C == prmin_F:\n # Search in the forward direction\n U, open_fwd, closed_fwd, g_fwd = search_dir(U, open_fwd, open_bck, g_fwd, g_bck, closed_fwd, 0)\n else:\n # Search in the backward direction\n U, open_bck, closed_bck, g_bck = search_dir(U, open_bck, open_fwd, g_bck, g_fwd, closed_bck, 1)\n\n #Incase U never reaches the optimal value\n print('\\nPath length = infinity')\n return path_fwd", "title": "" }, { "docid": "f01b3fb749f1f7eb7043285ecb5b09f9", "score": "0.58042556", "text": "def compute_sub_problems(self, start_node):\n start_time = time.time()\n nb_subset = pow(2,self._nb_node)\n\n T=[0]*self._nb_node\n T_first=[0]*self._nb_node\n T=[[float(\"inf\")]*nb_subset for _ in range(0,self._nb_node)]\n\n P=[0]*self._nb_node\n P=[[None]*nb_subset for _ in range(0,self._nb_node)]\n\n T = {}\n T_first = {}\n for node in range(0,self._nb_node):\n P[node] = {}\n T[node] = {}\n T[node][(1 << node)] = self._input[node][start_node]\n\n\n #print(self.get_size(T_first))\n #process = psutil.Process(os.getpid())\n\n # compute cost for all the sets of node by increasing size.\n #A set of size N requires cost of sets of size N-1\n for k in range(2,self._nb_node):\n #print(k)\n #print(process.memory_info().rss)\n subset_list = self.generate_subset_by_size(self._nb_node, k, start_node)\n #print(\"Nb set=\",len(subset_list))\n\n for subset in subset_list:\n #self.print_node_subset(subset)\n for first_node in range(0,self._nb_node):\n\n if (first_node!=start_node) and ((subset>>first_node)&1):\n #print(\"first_node=\"+str(first_node))\n mask = subset^(1<<first_node)\n\n if mask:\n min_cost = float(\"inf\")\n min_cost_node = -1\n for next_node in range(0,self._nb_node):\n if next_node!=start_node and next_node!=first_node and ((subset>>next_node)&1):\n #print(\"Next_node=\",next_node,\" mask=\",mask, \" cost=\",T[next_node][mask], \" d=\",self._input[next_node][first_node])\n if (T[next_node][mask]+self._input[first_node][next_node])<min_cost:\n min_cost = T[next_node][mask]+self._input[first_node][next_node]\n min_cost_node = next_node\n\n if first_node not in T_first.keys():\n T_first[first_node] = {}\n T_first[first_node][subset] = min_cost\n P[first_node][subset] = min_cost_node\n\n #print(\"***\",self.get_size(T))\n self.clear_dictionnary_list(T)\n self.copy_dictionnary_list(T_first,T)\n self.clear_dictionnary_list(T_first)\n gc.collect()\n #print(process.memory_info().rss)\n\n complete_set=(1<<self._nb_node)-1\n mask=complete_set^(1<<start_node)\n min_cost = float(\"inf\")\n first_node = -1\n\n for node in range(0,self._nb_node):\n if node!=start_node:\n T[node][complete_set] = T[node][mask]+self._input[start_node][node]\n if min_cost > T[node][complete_set]:\n min_cost = T[node][complete_set]\n first_node = node\n\n P[start_node][complete_set] = first_node\n\n path=[start_node+1]\n first_node = start_node\n mask=complete_set\n\n while mask in P[first_node].keys():\n next_node = P[first_node][mask]\n mask=mask^(1<<first_node)\n path.append(next_node+1)\n first_node=next_node\n\n path.append(self._start_node+1)\n end_time = time.time()\n #print(\"Optimal Tour:\", path, \", Optimal Cost:\", int(min_cost), \", time taken:\", (end_time-start_time))\n return min_cost, path, (end_time-start_time)", "title": "" }, { "docid": "68719fb5dd2008ad9b6d62373ada77a2", "score": "0.5792094", "text": "def crossPathRelinking(best_sol, p1, cp1, p2, cp2, weights, distances):\n s = np.zeros(len(p1), dtype = np.int64)\n s[:] = p1[:]\n sinverse = np.argsort(s)\n current_cost = cp1\n best_cost = -1\n dist = numbaDistance(p1, p2)\n # If the solutions differ in a transposition, then the best solution is returned.\n if dist <= 2:\n if cp1 >= cp2:\n best_sol[:] = p1[:]\n return cp1, 0\n else:\n best_sol[:] = p2[:]\n return cp2, 0\n\n # Selects a random order for going from p1 to p2.\n order = np.arange(len(p1))\n np.random.shuffle(order)\n num_evaluations = 0\n for j in range(0, len(p1)-2):\n i = order[j]\n if s[i] != p2[i]:\n # Computes the new cost\n current_cost += applyTranspositionQAP(s, i, sinverse[p2[i]], weights, distances)\n # Computes the new distance\n dist -= 2 if s[i] == p2[sinverse[p2[i]]] else 1\n # Makes the exhange\n s[sinverse[p2[i]]] = s[i]\n sinverse[s[i]] = sinverse[p2[i]]\n s[i] = p2[i]\n sinverse[p2[i]] = i\n\n num_evaluations += 1\n # Updates the best solution found\n if best_cost < 0 or best_cost > current_cost:\n best_sol[:] = s[:]\n best_cost = current_cost\n\n if dist <= 2:\n break\n \n return best_cost, num_evaluations", "title": "" }, { "docid": "6043c839c3a013b04ee737c8154a23d9", "score": "0.5785395", "text": "def dijkstras_search(self, graph, start = (start_x, start_y),\n goal = (goal_x, goal_y)):\n \n print(\"\\n__________________ DIJKSTRA'S SEARCH STARTED __________________\\n\") \n #* START THE TIMER\n start_time = timeit.default_timer()\n start_process_time = process_time()\n # SET THE START AND GOAL VALUES\n self.start = start\n print(f'The Start Node is located at: {self.start}')\n self.goal = goal\n print(f'The Goal Node is located at: {self.goal}')\n # IMPORT THE QUEUE TO PUT THE NODES\n self.frontier = world.createWorld.PriorityQueue()\n #* Put the nodes on the Frontier with cost 0\n self.frontier.put(vec_to_int(self.start), 0)\n #* Starts the Path Dictionary\n self.path = {}\n #* Starts the Cost Dictionary\n self.cost = {}\n # THE START IS NONE SINCE IS WERE WE ARE\n self.path[vec_to_int(self.start)] = None\n self.cost[vec_to_int(self.start)] = 0\n #? Init the While Interactions Variable\n self.while_interactions = 0\n while not self.frontier.empty():\n #? Add 1 interaction for every loop\n self.while_interactions += 1\n #* The next one will be the one with lowest cost\n self.current = self.frontier.get()\n #* If the goal is reached break\n if self.current == self.goal:\n break\n #* Find the neighbors of the current node\n #? Init the For Interactions Variable\n self.for_interactions = 0\n for next in graph.find_neighbors(vec(self.current)):\n #? Add 1 interaction for every loop\n self.for_interactions += 1\n next = vec_to_int(next)\n #* The cost is the atual cost plus the cost to move to the next node\n self.next_cost = self.cost[self.current] + graph.cost(self.current, next)\n #* If not in the cost or have a lower cost\n if next not in self.cost or self.next_cost < self.cost[next]:\n #* Update the values\n self.cost[next] = self.next_cost\n self.priority = self.next_cost\n #* Put in the priority\n self.frontier.put(next, self.priority)\n #* Put in the path vector\n self.path[next] = vec(self.current) - vec(next)\n #* Stop the Default Timer (Wall Timer)\n stop_time = timeit.default_timer()\n #* Stop the Process Timer (Wall Timer)\n stop_process_time = process_time()\n # PRINT ALL THE VISITED NODES\n print(f\"\\nThe Dijkstra's Search Path Available Nodes Movement are:\\n{self.path}\")\n print(f\"\\nThe Dijkstra's Search Path have: {len(self.path)} Available Nodes\")\n print(f\"\\nThe Dijkstra's Search Path 'While Loop' Interactions was: {self.while_interactions}\")\n print(f\"\\nThe Dijkstra's Search Path 'For Loop' Interactions was: {self.for_interactions}\")\n print(\"\\nThe Dijkstra's Search Path 'Wall time' was \", stop_time - start_time, 'sec')\n print(\"\\nThe Dijkstra's Search Path 'Process Time' was \",\n stop_process_time - start_process_time, 'sec\\n')\n \n return self.path", "title": "" }, { "docid": "706c69f46c0669d4d2641210319e7adc", "score": "0.5784502", "text": "def evaluate(self, costargs):\n cost = 0\n grad = np.zeros(self.num_nodes * 2)\n\n nodes_to_ignore = (costargs['norms'] >= self.minimum_dist)\n mask = np.logical_or(self.mask, nodes_to_ignore)\n\n masked_norms = np.ma.masked_array(costargs['norms'], mask=mask)\n\n cost_matrix = self.weight * (self.minimum_dist - masked_norms) ** 2\n cost += np.sum(cost_matrix.filled(0))\n\n grad_common_term = self.weight * (masked_norms - self.minimum_dist) * (4 / masked_norms)\n grad_matrix = costargs['deltas'] * grad_common_term[:, :, None]\n\n grad += np.reshape(np.sum(grad_matrix.filled(0), axis=1), grad.shape)\n\n return (cost, grad)", "title": "" }, { "docid": "48983faf83dcac9d2a1a4147db426de5", "score": "0.5781993", "text": "def optimize_with_pygenetic(self):", "title": "" }, { "docid": "01867680ed4a60692358f9e102c6225d", "score": "0.5780956", "text": "def update_cost(self):\n\n self.cost = 0\n for i, centroid in enumerate(self.C):\n for j, sample in enumerate(self.X):\n self.cost += self.U[i, j] * self.dist(centroid, sample)\n\n self.loss_tracker.append(self.cost)", "title": "" }, { "docid": "a24d0de45dc04acfabd16e3c88f497c0", "score": "0.5766455", "text": "def find_best_fits_fill_whole():", "title": "" }, { "docid": "5716d71d1d57acb8e8abec70da8c2ec1", "score": "0.5764209", "text": "def _cx_cost2(clifford):\n U = clifford.tableau[:, :-1]\n r00 = _rank2(U[0, 0], U[0, 2], U[2, 0], U[2, 2])\n r01 = _rank2(U[0, 1], U[0, 3], U[2, 1], U[2, 3])\n if r00 == 2:\n return r01\n return r01 + 1 - r00", "title": "" }, { "docid": "1d029fc7e626de54662a7695fe6ba277", "score": "0.5762014", "text": "def exhaustive_search(data_set, target):\n shortest_dist = 0#Distance to closest point\n for i in xrange(len(data_set)):#len: gives # of rows\n point = data_set[i,:]#Gives Each separate row\n current = metric(target, point)#Distance to current point\n if i == 0 or current < shortest_dist:\n closest = point\n shortest_dist = current\n return closest, shortest_dist", "title": "" }, { "docid": "2255112dda9dc0d0b6c4a527f83f778c", "score": "0.575917", "text": "def _cx_cost3(clifford):\n # pylint: disable=too-many-return-statements,too-many-boolean-expressions\n U = clifford.tableau[:, :-1]\n n = 3\n # create information transfer matrices R1, R2\n R1 = np.zeros((n, n), dtype=int)\n R2 = np.zeros((n, n), dtype=int)\n for q1 in range(n):\n for q2 in range(n):\n R2[q1, q2] = _rank2(U[q1, q2], U[q1, q2 + n], U[q1 + n, q2], U[q1 + n, q2 + n])\n mask = np.zeros(2 * n, dtype=int)\n mask[[q2, q2 + n]] = 1\n isLocX = np.array_equal(U[q1, :] & mask, U[q1, :])\n isLocZ = np.array_equal(U[q1 + n, :] & mask, U[q1 + n, :])\n isLocY = np.array_equal((U[q1, :] ^ U[q1 + n, :]) & mask, (U[q1, :] ^ U[q1 + n, :]))\n R1[q1, q2] = 1 * (isLocX or isLocZ or isLocY) + 1 * (isLocX and isLocZ and isLocY)\n\n diag1 = np.sort(np.diag(R1)).tolist()\n diag2 = np.sort(np.diag(R2)).tolist()\n\n nz1 = np.count_nonzero(R1)\n nz2 = np.count_nonzero(R2)\n\n if diag1 == [2, 2, 2]:\n return 0\n\n if diag1 == [1, 1, 2]:\n return 1\n\n if (\n diag1 == [0, 1, 1]\n or (diag1 == [1, 1, 1] and nz2 < 9)\n or (diag1 == [0, 0, 2] and diag2 == [1, 1, 2])\n ):\n return 2\n\n if (\n (diag1 == [1, 1, 1] and nz2 == 9)\n or (\n diag1 == [0, 0, 1]\n and (nz1 == 1 or diag2 == [2, 2, 2] or (diag2 == [1, 1, 2] and nz2 < 9))\n )\n or (diag1 == [0, 0, 2] and diag2 == [0, 0, 2])\n or (diag2 == [1, 2, 2] and nz1 == 0)\n ):\n return 3\n\n if diag2 == [0, 0, 1] or (\n diag1 == [0, 0, 0]\n and (\n (diag2 == [1, 1, 1] and nz2 == 9 and nz1 == 3)\n or (diag2 == [0, 1, 1] and nz2 == 8 and nz1 == 2)\n )\n ):\n return 5\n\n if nz1 == 3 and nz2 == 3:\n return 6\n\n return 4", "title": "" }, { "docid": "80777142b66e5184537685748d1ab72f", "score": "0.5741653", "text": "def waStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE FOR TASK 2 ***\"\n '''\n Weighted A* (To solve Breaking Ties)\n\n h = W * heuristic\n W = (1 + p)\n\n 若 W -> Inf, wA* 表现趋向为 Greedy bset-first search (expand的结点变少 但不一定是最优路径)\n 若 W -> 0, wA* 表现趋向为 Uniform cost search (Dijkstra algorithm) (一定是最优路径 但expand的结点变多)\n\n 选择因子 p 使得 p < 移动一步(step)的最小代价 / 期望的最长路径长度\n 假设你不希望你的路径超过1000步(step), 你可以使p = 1 / 1000\n 添加这个附加值的结果是, 在保证最优路径的情况下A*比以前搜索的结点更少了\n\n 此代码设置 W = 2\n (虽然 W = 1.002 基本能找出最短路径 但会expand更多结点更为耗时 W = 2 不一定找到最短路径 但较省时)\n\n '''\n if problem.name == 'GraphSearch':\n prior_que = util.PriorityQueue()\n prior_que.push((problem.getStartState(), []), 0)\n visited, actions = [], []\n\n while prior_que:\n cur, actions = prior_que.pop()\n if problem.isGoalState(cur):\n break\n if cur not in visited:\n visited.append(cur)\n nextnode = problem.getSuccessors(cur)\n for successor, action, cost in nextnode:\n tempActions = actions + [action]\n nextCost = problem.getCostOfActions(tempActions) + 2*heuristic(successor, problem)\n if successor not in visited:\n prior_que.push((successor, tempActions), nextCost)\n return actions\n\n if problem.name == 'PositionSearchProblem' or problem.name == 'AnyFoodSearchProblem':\n prior_que = util.PriorityQueue()\n prior_que.push(problem.getStartState(), 0)\n path, cost = {}, {}\n path[problem.getStartState()] = None\n cost[problem.getStartState()] = 0\n\n goalpos = (-1, -1)\n while not prior_que.isEmpty():\n cur = prior_que.pop()\n if problem.isGoalState(cur):\n goalpos = cur\n break\n for nextnode in problem.getSuccessors(cur):\n new_cost = cost[cur] + nextnode[2]\n if nextnode[0] not in cost or new_cost < cost[nextnode[0]]:\n cost[nextnode[0]] = new_cost\n path[nextnode[0]] = cur\n # weight = 2\n prior_que.push(nextnode[0], new_cost+2*heuristic(cur,problem))\n if goalpos == (-1, -1):\n print(\"CAN NOT FIND GOAL\")\n return\n\n def pos_2_dir(successor, predecessor):\n if predecessor[1] - successor[1] == 0:\n if predecessor[0] - successor[0] == 1:\n return 'West'\n else:\n return 'East'\n elif predecessor[1] - successor[1] == 1:\n return 'South'\n else:\n return 'North'\n \n def inverse_path(path, goalpos):\n res = []\n while True:\n rootpos = path[goalpos]\n if rootpos == None:\n break\n res.append(pos_2_dir(goalpos, rootpos))\n goalpos = rootpos\n return res\n\n return inverse_path(path, goalpos)[::-1]\n\n elif problem.name == 'CornersProblem':\n def pos_2_dir(successor, predecessor):\n if predecessor[1] - successor[1] == 0:\n if predecessor[0] - successor[0] == 1:\n return 'West'\n else:\n return 'East'\n elif predecessor[1] - successor[1] == 1:\n return 'South'\n else:\n return 'North'\n \n def get_inverse_dir(path, start):\n res = []\n while True:\n rootpos = path[start]\n if rootpos == None:\n break\n res.append(pos_2_dir(start, rootpos))\n start = rootpos\n\n return res[::-1]\n\n prior_que = util.PriorityQueue()\n prior_que.push(problem.getStartState(), 0)\n path, cost = {}, {}\n path[problem.getStartState()] = None\n cost[problem.getStartState()] = 0\n final_path = []\n\n while not prior_que.isEmpty():\n cur = prior_que.pop()\n if problem.isGoalState(cur):\n if len(problem.corners_list) == 1:\n final_path.extend(get_inverse_dir(path, cur))\n return final_path\n else:\n final_path.extend(get_inverse_dir(path, cur))\n problem.corners_list.remove(cur)\n while not prior_que.isEmpty():\n prior_que.pop()\n prior_que.push(cur, 0)\n path, cost = {}, {}\n path[cur] = None\n cost[cur] = 0\n\n for nextnode in problem.getSuccessors(cur):\n new_cost = cost[cur] + nextnode[2]\n if nextnode[0] not in cost or new_cost < cost[nextnode[0]]:\n cost[nextnode[0]] = new_cost\n path[nextnode[0]] = cur\n prior_que.push(nextnode[0], new_cost+2*heuristic(cur,problem))\n\n elif problem.name == 'FoodSearchProblem':\n # import time\n # tstart = time.time()\n # subtotal = 0\n prior_que = util.PriorityQueue()\n prior_que.push(problem.getStartState(), 0)\n path, cost = {}, {}\n path[problem.getStartState()] = None\n cost[problem.getStartState()] = 0\n\n goalpos = (-1, -1)\n while not prior_que.isEmpty():\n cur = prior_que.pop()\n if problem.isGoalState(cur):\n goalpos = cur\n break\n for nextnode in problem.getSuccessors(cur):\n new_cost = cost[cur] + nextnode[2]\n # !!! item in cost : ((x,y), foodgrid), the (x,y) can be same however not the foodgrid\n if nextnode[0] not in cost or new_cost < cost[nextnode[0]]:\n cost[nextnode[0]] = new_cost\n path[nextnode[0]] = cur\n # substart = time.time()\n prior_que.push(nextnode[0], new_cost+2*heuristic(cur,problem))\n # subtotal += time.time() - substart\n if goalpos == (-1, -1):\n print(\"CAN NOT FIND GOAL\")\n return\n\n def pos_2_dir(successor, predecessor):\n if predecessor[0][1] - successor[0][1] == 0:\n if predecessor[0][0] - successor[0][0] == 1:\n return 'West'\n else:\n return 'East'\n elif predecessor[0][1] - successor[0][1] == 1:\n return 'South'\n else:\n return 'North'\n \n def inverse_path(path, goalpos):\n res = []\n while True:\n rootpos = path[goalpos]\n if rootpos == None:\n break\n res.append(pos_2_dir(goalpos, rootpos))\n goalpos = rootpos\n return res\n\n # print('total time {}, heuristic time {}, percent {}'.format(time.time()-tstart,subtotal,subtotal/(time.time()-tstart)))\n return inverse_path(path, goalpos)[::-1]\n\n elif problem.name == 'CapsuleSearchProblem':\n\n def pos_2_dir(successor, predecessor):\n if predecessor[0][1] - successor[0][1] == 0:\n if predecessor[0][0] - successor[0][0] == 1:\n return 'West'\n else:\n return 'East'\n elif predecessor[0][1] - successor[0][1] == 1:\n return 'South'\n else:\n return 'North'\n \n def inverse_path(path, goalpos):\n res = []\n while True:\n rootpos = path[goalpos]\n if rootpos == None:\n break\n res.append(pos_2_dir(goalpos, rootpos))\n goalpos = rootpos\n return res\n\n # import time\n # tstart = time.time()\n # subtotal = 0\n prior_que = util.PriorityQueue()\n prior_que.push(problem.getStartState(), 0)\n path, cost = {}, {}\n path[problem.getStartState()] = None\n cost[problem.getStartState()] = 0\n\n goalpos = (-1, -1)\n while not prior_que.isEmpty():\n cur = prior_que.pop()\n if problem.isCapsule(cur):\n problem.capsulesEaten = True\n goalpos = cur\n break\n for nextnode in problem.getSuccessors(cur):\n new_cost = cost[cur] + nextnode[2]\n # !!! item in cost : ((x,y), foodgrid), the (x,y) can be same however not the foodgrid\n if nextnode[0] not in cost or new_cost < cost[nextnode[0]]:\n cost[nextnode[0]] = new_cost\n path[nextnode[0]] = cur\n # substart = time.time()\n prior_que.push(nextnode[0], new_cost+2*heuristic(cur,problem))\n # subtotal += time.time() - substart\n if goalpos == (-1, -1):\n print(\"CAN NOT FIND CAPSULE\")\n return\n res = inverse_path(path, goalpos)[::-1]\n\n while not prior_que.isEmpty():\n prior_que.pop()\n prior_que.push(cur, 0)\n path, cost = {}, {}\n path[goalpos] = None\n cost[goalpos] = 0\n\n goalpos = (-1, -1)\n while not prior_que.isEmpty():\n cur = prior_que.pop()\n if problem.isGoalState(cur):\n goalpos = cur\n break\n for nextnode in problem.getSuccessors(cur):\n new_cost = cost[cur] + nextnode[2]\n # !!! item in cost : ((x,y), foodgrid), the (x,y) can be same however not the foodgrid\n if nextnode[0] not in cost or new_cost < cost[nextnode[0]]:\n cost[nextnode[0]] = new_cost\n path[nextnode[0]] = cur\n # substart = time.time()\n prior_que.push(nextnode[0], new_cost+2*heuristic(cur,problem))\n # subtotal += time.time() - substart\n if goalpos == (-1, -1):\n print(\"CAN NOT FIND GOAL\")\n return\n\n res.extend(inverse_path(path, goalpos)[::-1])\n # print('total time {}, heuristic time {}, percent {}'.format(time.time()-tstart,subtotal,subtotal/(time.time()-tstart)))\n return res\n\n util.raiseNotDefined()", "title": "" }, { "docid": "174dbcc2a2678103c6c68a6caceccd15", "score": "0.57376575", "text": "def fitness(solution):\n cur_fit = 0\n for i in range(N):\n cur_fit += dist(solution[i % N], solution[(i + 1) % N])\n return cur_fit", "title": "" }, { "docid": "6bd3e7f9015de624bd9173712cbfc2e6", "score": "0.5729106", "text": "def Search(S,R,L,r,E,fold,p_tup,individual_bankrupt): #searches squares within a given radius r to do R&D on. This R&D effort is given by E and if successful states are changed from 1 to 2\r\n m=int(S.shape[0])\r\n n=int(S.shape[1])\r\n p_win=[]\r\n win_col=[]\r\n order=range(0,n) #gives an array of n column values\r\n rand.shuffle(order) #randomizes the column order for the R&D search\r\n \r\n for j in order:\r\n BPF_y=j\r\n BPF_x=fold[j]\r\n if BPF_x>=0 and individual_bankrupt[j]==0: #ensures that there is a BPF point around with R&D can be conducted and the column has a budget\r\n x_val,y_val,num_sites=Search_Index(m,n,BPF_x,BPF_y,r,L)\r\n if num_sites>0:\r\n one_index=RD(x_val,y_val,num_sites,E[j],S,R,L)\r\n x_val=one_index[0]\r\n y_val=one_index[1]\r\n for v in range(len(one_index[0])):\r\n c=x_val[v]\r\n d=y_val[v]\r\n count=0\r\n for x in range(max(0,c-1),min(m,c+2)): #searches up and down\r\n if S[x,d]==2:\r\n S[c,d]=2\r\n L[c,d]=S[c,d]\r\n if (c,d) in p_tup and ((c,d) not in p_win):\r\n p_win.append((c,d))\r\n win_col.append(j)\r\n count=1\r\n for y in range(max(0,d-1),min(d+2,n)): #searches left and right\r\n if S[c,y]==2:\r\n S[c,d]=2\r\n L[c,d]=S[c,d]\r\n if (c,d) in p_tup and ((c,d) not in p_win):\r\n p_win.append((c,d))\r\n win_col.append(j)\r\n count=1\r\n if count==1:\r\n twos_i=[c]\r\n twos_j=[d]\r\n Y=twocheck(S,L,twos_i,twos_j,p_tup,p_win,win_col,j) #searches like a chain for further changes to state 2\r\n S=Y[0]\r\n p_tup=Y[1]\r\n p_win=Y[2]\r\n win_col=Y[3]\r\n\r\n return p_win,win_col #array of tuples corresponding to the 'prizes' discovered during this round of Search and win_col is an array of the columns that were doing search when the prizes were found\r", "title": "" }, { "docid": "34c6a03168ac0f08d4e435f3cb4d5243", "score": "0.5728394", "text": "def cost_algorithm(m: int) -> int:\n return 10 * m", "title": "" }, { "docid": "ea0aa8b00aa345ce13a1a711f491ae3b", "score": "0.5721499", "text": "def heuristic(k):\n return k * k", "title": "" }, { "docid": "097763553329e20c3e0b37bf035b9c40", "score": "0.5703237", "text": "def greedy_cost_solve(log_adj_in: np.ndarray):\n tol = 1e-6 # tolerance for float comparison\n N = log_adj_in.shape[0]\n log_adj = log_adj_in.copy().reshape(N, N)\n orders = np.zeros([2, 0], dtype=int)\n costs = None\n\n for _ in range(N - 1):\n # compute tensor dims and costs\n N = log_adj.shape[0]\n dims = np.sum(log_adj, axis=0).reshape(N)\n comb_dims = np.add.outer(dims, dims)\n single_cost = comb_dims - log_adj\n\n # penalize trivial contractions and self-contractions\n triv_conts = (log_adj < tol)\n trimmed_costs = single_cost + np.max(single_cost.flatten()) * triv_conts\n trimmed_costs = trimmed_costs + np.max(trimmed_costs.flatten()) * np.eye(N)\n\n # find best contraction\n tensors_to_contract = np.divmod(np.argmin(trimmed_costs), N)\n i = max(tensors_to_contract)\n j = min(tensors_to_contract)\n\n # build new log adjacency\n log_adj[j, j] = log_adj[j, j] - 2 * log_adj[j, i]\n log_adj[j, :] = log_adj[j, :] + log_adj[i, :]\n log_adj[:, j] = log_adj[:, j] + log_adj[:, i]\n log_adj = np.delete(log_adj, i, axis=0)\n log_adj = np.delete(log_adj, i, axis=1)\n\n # build new orders\n orders = np.hstack((orders, np.asarray(tensors_to_contract).reshape(2, 1)))\n\n # tally the cost\n if costs is None:\n costs = single_cost[i, j]\n else:\n costs = costs + np.log10(1 + 10**(single_cost[i, j] - costs))\n\n return orders, costs", "title": "" }, { "docid": "6a6b4e479334dfe55942ba9755bf7a6e", "score": "0.57021546", "text": "def cost_raw(self, elem):\n return 0.5 * sp.sparse.linalg.norm(elem.evaluate(self.sigma_set) - self.target_matrix) ** 2", "title": "" }, { "docid": "e3545bea40436739796f2c236e970ff6", "score": "0.5699456", "text": "def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE FOR TASK 3 ***\"\n initial_node = (problem.getStartState(),[],0)\n initial_state, initial_action, initial_cost = initial_node\n opened = util.PriorityQueue()\n closed = set()\n initial_heuristic = heuristic(initial_state, problem)\n opened.push(initial_node, initial_heuristic)\n while not opened.isEmpty():\n node = opened.pop()\n state, actions, cost = node\n if state not in closed:\n closed.add(state)\n if problem.isGoalState(state):\n return actions\n for successor in problem.getSuccessors(state):\n succ_state, succ_action, succ_cost = successor\n new_node = succ_state, actions + [succ_action], cost + succ_cost\n succ_heuristic = heuristic(succ_state, problem)\n f = (cost + succ_cost) + succ_heuristic\n opened.push(new_node, f)\n return 0\n util.raiseNotDefined()", "title": "" }, { "docid": "4654894c53e888b74bdc7348737f7ed3", "score": "0.56991667", "text": "def compute_d_pess():\n # u - v <= d(si, sj)\n A = [1, -1]\n bounds = [(-1, 1), (-1, 1)]\n\n d = np.zeros((src_state_space, 1, tgt_state_space, action_space))\n for s1_pos, s1_state in src_env.state2idx.items():\n a = src_agent.get_best_action(s1_state, src_possible_actions)\n next_state, reward_a, done, next_possible_states = src_env.step(a)\n src_env.position = s1_pos\n for s2_pos, s2_state in tgt_env.state2idx.items():\n tgt_env.position = s2_pos\n for b in range(action_space):\n p1 = -np.sum(src_env.tp_matrix[s1_state,a])\n p2 = np.sum(tgt_env.tp_matrix[s2_state,b])\n c = [p1, p2]\n # print (c)\n next_state, reward_b, done, next_possible_states = tgt_env.step(b)\n d[s1_state,0,s2_state,b] = math.fabs(reward_a - reward_b) + wasserstein_distance(src_env.tp_matrix[s1_state,a], tgt_env.tp_matrix[s2_state,b])\n b = [d[s1_state,0,s2_state,b]]\n # res = linprog(c, A_ub=A, b_ub=b, bounds=bounds, options={\"disp\": True})\n # print (res.fun)\n tgt_env.position = s2_pos\n return d", "title": "" }, { "docid": "66095001d9218db0bdf66bf00e7b0ed1", "score": "0.56950873", "text": "def estimated_cost_to_goal(self, node):", "title": "" }, { "docid": "af83d5b821d23c2d7e86c78f8a5d1787", "score": "0.56891334", "text": "def start_search(self,max_iter,initial_solution = \"random\",\n movement = \"swap\", tabu_time = 8, CE_iter =100):\n\n self.tabu_time = int(self.graph_size/ tabu_time)\n\n if initial_solution == \"random\":\n self.best_route = [0] + list(np.random.permutation(\n [x for x in range(1,self.graph_size)]))\n elif initial_solution == \"greedy\":\n self.best_route = self.greedy_solution()\n elif initial_solution == \"natural\":\n self.best_route = list(np.arange(self.graph_size))\n else:\n raise ValueError(\"Incorrect value\")\n\n self.best_distance = self.tsp.compute_distance(self.best_route)\n\n if movement == \"swap\":\n self.movement = self.swap\n elif movement == \"insert\":\n self.movement = self.insert\n elif movement == \"invert\":\n self.movement = self.invert\n else:\n raise ValueError(\"Incorrect value\")\n\n self.current_route = self.best_route[:]\n self.current_distance = self.best_distance\n\n\n iter_without_improvement = 0\n\n for _ in range(max_iter):\n\n next_distance = -1\n next_route = []\n new_tabu = []\n\n for i in range(1,self.graph_size):\n for j in range(i+1,self.graph_size):\n in_tabu = False\n # if (i == j) or (i == 0) or (j==0):\n # continue\n self.neighbour_route = self.movement(i,j)\n self.neighbour_distance = self.tsp.compute_distance(\n self.neighbour_route)\n\n for tabu in self.tabu_list:\n if (i,j) == tabu[0]:\n in_tabu = True\n break\n\n # Aspiration check\n if in_tabu == True and self.neighbour_distance>=self.best_distance:\n continue\n\n if((next_distance == -1) or (self.neighbour_distance < next_distance)):\n next_route = self.neighbour_route[:]\n next_distance = self.neighbour_distance\n next_tabu = [(i,j),self.tabu_time]\n\n if next_distance<self.best_distance:\n self.best_distance = next_distance\n self.best_route = next_route[:]\n else:\n iter_without_improvement +=1\n\n\n for tabu in self.tabu_list:\n tabu[1] -= 1\n if tabu[1] == 0:\n self.tabu_list.remove(tabu)\n\n self.tabu_list.append(next_tabu)\n # print(next_tabu)\n # print(len(self.tabu_list))\n\n\n # Critical event\n if iter_without_improvement<CE_iter:\n self.current_route = next_route[:]\n self.current_distance = next_distance\n else:\n self.current_route = [0] + list(np.random.permutation([x for x in range(1,self.graph_size)]))\n self.current_distance = self.tsp.compute_distance(self.current_route)\n if self.current_distance < self.best_distance:\n self.best_route = self.current_route[:]\n self.best_distance = self.current_distance\n\n iter_without_improvement = 0\n\n return self.best_distance, self.best_route", "title": "" }, { "docid": "31fe1fdfd801b929bc4c6693074cb9d2", "score": "0.56834686", "text": "def greedy(self, src, goal, est_cost):\n node_heap = [(est_cost[src], SearchNode(src))]\n current_node = heapq.heappop(node_heap)\n while current_node[1].name is not goal:\n for n in self.forward_nodes(current_node[1].name):\n heapq.heappush(node_heap, (est_cost[src], SearchNode(n.name, n.distance, current_node[1])))\n current_node = heapq.heappop(node_heap)\n return current_node[1]", "title": "" }, { "docid": "37702ff4d47e7b0f1adf3984b61d1f1a", "score": "0.5681939", "text": "def astar_search(problem, h=None):\n h = memoize(h or problem.h, 'h')\n return best_first_graph_search(problem, lambda n: n.path_cost + h(n))", "title": "" }, { "docid": "37702ff4d47e7b0f1adf3984b61d1f1a", "score": "0.5681939", "text": "def astar_search(problem, h=None):\n h = memoize(h or problem.h, 'h')\n return best_first_graph_search(problem, lambda n: n.path_cost + h(n))", "title": "" }, { "docid": "28c1c647d55bd127c13a9d4618b09b38", "score": "0.5661458", "text": "def a_star_search(starting_node, goal_node):\n visited_nodes_in_order = []\n #开列表\n open_lists = []\n #闭列表\n close_lists = []\n\n #将开始值加入闭列表\n close_lists.append(starting_node)\n visited_nodes_in_order.append(starting_node.ID)\n min_node = starting_node\n now_id = min_node.ID\n\n for node in min_node.connected_nodes:\n open_lists.append(node)\n\n #开始搜索\n while goal_node!=now_id:\n # 从开列表中寻找代价最小的值\n min = 1000.0\n temp = None\n for n in open_lists:\n if (n[0]+n[1].heuristic_cost)<min:\n min = n[0]+n[1].heuristic_cost\n temp = n\n\n if temp!=None:\n close_lists.append(temp)\n open_lists.remove(temp)\n visited_nodes_in_order.append(temp[1].ID)\n now_id = temp[1].ID\n\n for node in temp[1].connected_nodes:\n open_lists.append(node)\n\n\n return visited_nodes_in_order", "title": "" }, { "docid": "15accbf4740a2676eca7df22fe6a77ec", "score": "0.5654353", "text": "def vns(sol, k_max, t_max):\n start_time = time.time() \n current_best_sol = sol\n while t_max > time.time() - start_time:\n new_sol = shaking(current_best_sol, k_max)\n new_sol = local_search_2opt(new_sol)\n if new_sol.g < current_best_sol.g:\n current_best_sol = new_sol\n \n return current_best_sol", "title": "" }, { "docid": "60a657606e8d5e0afb1c3c5c736415e3", "score": "0.5642195", "text": "def a_star(self, src, goal, est_cost):\n node_heap = [(est_cost[src], SearchNode(src))]\n current_node = heapq.heappop(node_heap)\n while current_node[1].name is not goal:\n for n in self.forward_nodes(current_node[1].name):\n heapq.heappush(node_heap,\n (current_node[1].distance + n.distance + est_cost[n.name],\n SearchNode(n.name, n.distance, current_node[1])))\n current_node = heapq.heappop(node_heap)\n return current_node[1]", "title": "" }, { "docid": "9b397366b5a810b91499c65937436bed", "score": "0.5636455", "text": "def test_big_optimal_algorithm():\n pass", "title": "" }, { "docid": "84a25bdf45169315b3a28f11ed1eb5ae", "score": "0.5633725", "text": "def timed_swarm_search(self, swarm_size: int, run_time: int):\n end_time = time.time() + run_time\n gbest = self.pallet_problem.generate_random_solution()\n gbest_cost = self.pallet_problem.evaluate_cost(gbest)\n self.particles = [None for x in range(swarm_size)]\n\n for i in range(swarm_size):\n self.particles[i] = Particle(self.pallet_problem, self.pallet_problem.generate_random_solution())\n\n while time.time() < end_time:\n for j in range(len(self.particles)):\n pbest = self.particles[j].update_particle(gbest)\n pbest_cost = self.pallet_problem.evaluate_cost(pbest)\n\n if pbest_cost < gbest_cost:\n gbest = pbest\n gbest_cost = pbest_cost\n\n #print(f\"Best found by swarm search: {self.gbest} costing {self.gbest_cost}\")\n return [gbest, gbest_cost]", "title": "" }, { "docid": "4ef54ba545754f548385804ecbbdccf0", "score": "0.56312263", "text": "def dp_select(values, weights, capacity):\n \n # convert params to n,K notation as used in equations in dynamic programming notes\n n = len(values)\n K = capacity\n \n # calculate table of optimal value by j,k (j items in 0..n, k is all capacities in 0..K)\n # see 9:00 - 12:30 here: https://www.coursera.org/learn/discrete-optimization/lecture/wFFdN/knapsack-4-dynamic-programming\n values_table = np.zeros((K+1,n+1), dtype=np.uint32)\n \n print(\"building DP optimal-value table for n,K: \", n, \",\", K)\n print(\"Tracking Progress. j(n) = ....\\n-------------------\")\n for j in range(1, n+1):\n if j % 20 == 0:\n print(j)\n \n item_weight = weights[j-1]\n item_value = values[j-1]\n for k in range(1, K+1):\n if item_weight > k:\n values_table[k,j] = values_table[k, j-1]\n else:\n values_table[k,j] = max(values_table[k, j-1], item_value + values_table[k-item_weight, j-1])\n optimal_value = values_table[-1, -1]\n #print(f\"optimal value is {optimal_value}. Now proceeding to derive final item-set\")\n\n # from this table of optimal values, we now need to derive final item-set for optimal solution\n # logic of code below explained 12:30 - 14:00 at https://www.coursera.org/learn/discrete-optimization/lecture/wFFdN/knapsack-4-dynamic-programming\n taken = [0] * len(values)\n k = K # in keeping w/ eqs, K is total capacity but k is k'th row as we move through j,k table\n for j in range(n, 0, -1):\n if values_table[k,j] != values_table[k,j-1]:\n taken[j-1] = 1\n k = k - weights[j-1]\n \n return optimal_value, taken", "title": "" }, { "docid": "2483bdb4ffe0482425086a6ba765ee6a", "score": "0.5629168", "text": "def search(nbs, minx, maxx, miny, maxy, minz, maxz):\n steps = 11\n step_size_x = (maxx - minx // steps)\n step_size_y = (maxy - miny // steps)\n step_size_z = (maxz - minz // steps)\n\n best_num = 0\n lowest_sum = float('inf')\n while step_size_x > 1 and step_size_y > 1 and step_size_z > 1:\n best_low_x = minx\n best_low_y = miny\n best_low_z = minz\n\n for low_x in range(minx, maxx + 1, step_size_x):\n x = low_x + (step_size_x // 2)\n # x = random.randrange(low_x, low_x + step_size_x)\n\n for low_y in range(miny, maxy + 1, step_size_y):\n y = low_y + (step_size_y // 2)\n # y = random.randrange(low_y, low_y + step_size_y)\n\n for low_z in range(minz, maxz + 1, step_size_z):\n z = low_z + (step_size_z // 2)\n # z = random.randrange(low_z, low_z + step_size_z)\n\n pos = x, y, z\n # print(pos, best_num)\n num = num_in_range(pos, nbs)\n if num > best_num or (\n num == best_num and sum((x, y, z)) < lowest_sum):\n if sum((x, y, z)) < lowest_sum:\n lowest_sum = min(lowest_sum, sum((x, y, z)))\n best_num = num\n best_low_x = low_x\n best_low_y = low_y\n best_low_z = low_z\n print(best_num, lowest_sum, (x, y, z))\n\n minz = best_low_z\n maxz = minz + step_size_z\n\n miny = best_low_y\n maxy = miny + step_size_y\n\n minx = best_low_x\n maxx = minx + step_size_x\n\n step_size_x //= steps\n step_size_y //= steps\n step_size_z //= steps\n\n # now minx, miny, minz are a coordinate that gives the highest discovered\n # num_in_range, need to walk towards the origin to find the coordinate in\n # this \"best_num cloud\" that has the lowest manhattan dist to origin\n\n # then walk towards origin until at the edge of best_num cloud\n x = minx\n y = miny\n z = minz\n\n best_x = best_y = best_z = float('inf')\n\n while abs(x) < abs(best_x) or abs(y) < abs(best_y) or abs(z) < abs(best_z):\n best_x, best_y, best_z = x, y, z\n\n step_size_x = abs(x - 0) // steps\n step_size_x = step_size_x if x > 0 else -(step_size_x)\n while abs(step_size_x) >= 1:\n if num_in_range((x - step_size_x, y, z), nbs) >= best_num:\n x -= step_size_x\n else:\n step_size_x //= steps\n\n step_size_y = abs(y - 0) // steps\n step_size_y = step_size_y if y > 0 else -(step_size_y)\n while abs(step_size_y) >= 1:\n if num_in_range((x, y - step_size_y, z), nbs) >= best_num:\n y -= step_size_y\n else:\n step_size_y //= steps\n\n step_size_z = abs(z - 0) // steps\n step_size_z = step_size_z if z > 0 else -(step_size_z)\n while abs(step_size_z) >= 1:\n if num_in_range((x, y, z - step_size_z), nbs) >= best_num:\n z -= step_size_z\n else:\n step_size_z //= steps\n\n # Best found so far manually:\n # (Pdb) num_in_range((18488882, 11656388, 15564375), nbs)\n # 917\n # >>> sum((18488882, 11656388, 15564375))\n # 45709645\n return (x, y, z), best_num", "title": "" }, { "docid": "e526d88faa0cf9b47768ec9e2e0b8bf0", "score": "0.5627138", "text": "def cost_function(data, alpha):\n # Quantize the data into bins of size 2 and recast them to ints.\n quantized_data = (numpy.floor(data / age_bin_size)\n * age_bin_size).astype(int)\n\n # Try every non-redundant (i.e. even values only) and save the weighted\n # variances for comparison.\n all_weighted_variances = []\n for threshold in quantized_data:\n left_indices = (quantized_data <= threshold).nonzero()\n left_data = quantized_data.take(left_indices)\n weight_left = left_data.size/quantized_data.size\n variance_left = left_data.var()\n\n right_indices = (quantized_data > threshold).nonzero()\n right_data = quantized_data.take(right_indices)\n if right_data.size == 0:\n weight_right = 0\n variance_right = 0\n else:\n weight_right = right_data.size/quantized_data.size\n variance_right = right_data.var()\n\n # The value is the weighted variance of each side plus the\n # regularization factor. Norm-factor is constant at 100, but alpha\n # is a provided argument for the function.\n all_weighted_variances.append(weight_left*variance_left +\n weight_right*variance_right +\n numpy.abs(left_data.size -\n right_data.size)\n / norm_factor * alpha)\n\n # Iterate through the data to find the minimum and print out any duplicates\n # found and ignored during the process.\n min_variance = all_weighted_variances[0]\n min_index = 0\n for index in range(1, len(all_weighted_variances)):\n if all_weighted_variances[index] < min_variance:\n min_index = index\n min_variance = all_weighted_variances[index]\n\n print(\"Threshold(s) for minimum variance: {0}\".format(quantized_data[min_index]))\n print(\"Minimum variance = {0}\".format(min_variance))", "title": "" }, { "docid": "7ad066181852f5e6faa9072182d9ee58", "score": "0.56219256", "text": "def linearSearch(arr, target):\n for index, element in enumerate(arr, start=0):\n if element.calculatePriceSizeRatio() == target:\n return index\n return -1", "title": "" }, { "docid": "56f5b4c1d94f0d5891125e707aae79e6", "score": "0.5621441", "text": "def aStarSearch(problem, heuristic=nullHeuristic):\n frontier = util.PriorityQueue()\n\n explored = []\n\n startState = problem.getStartState()\n start = (startState, [], 0)\n\n frontier.push(start, 0)\n\n while (not frontier.isEmpty()):\n # cost + heauristic \n current_state, actions, current_cost = frontier.pop()\n\n # put popped node into explored list\n current_node = (current_state, current_cost)\n\n # put explored node into explored list\n current_node = (current_state, current_cost)\n\n explored.append((current_state, current_cost))\n\n # check if reached goal\n if (problem.goalTest(current_state)):\n return actions\n else:\n # loop for recovering each possible actions\n for i in problem.getActions(current_state):\n # find the next action to take\n new_actions = actions + [i]\n\n # find cost given available actions\n new_cost = current_cost + problem.getCostOfActions(new_actions)\n\n # get the next state given the current state and action retrieved\n new_state = problem.getResult(current_state, i)\n\n # new node is the next state found per the next action and total cost\n newNode = (new_state, new_actions, new_cost)\n\n # check if this node has already been explored\n already_found = False\n for i in explored:\n explored_state, explored_cost = i\n # if the state has been explored as well as cost being >= than explored, skip\n if (new_state == explored_state and new_cost >= explored_cost):\n already_found = True\n # if current node hasn't been explored, push to frontier and mark as explored\n if (not already_found):\n # push to frontier\n frontier.push(newNode, new_cost + heuristic(new_state, problem))\n # update explored list\n explored.append((current_state, new_cost))\n\n return actions", "title": "" }, { "docid": "af9ab93864660984c0375184b46fed8c", "score": "0.56197476", "text": "def aStarSearch(problem, heuristic=nullHeuristic):\n \"\"\"Call heuristic(s,problem) to get h(s) value.\"\"\"\n \"*** YOUR CODE HERE ***\"\n if problem.isGoalState(problem.getStartState()):\n return [] \n\n \"\"\"As we need LIFO for DFS\"\"\"\n from util import PriorityQueue\n \"\"\" Initiliazing state of the problem\"\"\"\n Frontier = PriorityQueue() \n \"\"\"Pushing valid start state (this time we also have to consider priorities \"\"\" \n Frontier.push(problem.getStartState(),0)\n \"\"\"Initializing empty explored set \"\"\"\n statePath=[] \n stateVisited = [] \n tempPath=[] \n \"\"\" Choosing a leaf node to remove it from Frontier\"\"\" \n xy = Frontier.pop() \n \"\"\"Initilaizing one more priority Queue for the path to our current \"\"\" \n pathCurrent=PriorityQueue() \n \"\"\"Running the loop until we are not in goal state \"\"\" \n while not problem.isGoalState(xy):\n \"\"\"Not a previosly visited node \"\"\"\n if xy not in stateVisited:\n stateVisited.append(xy)\n \"\"\"Getting Successors \"\"\"\n successorPath = problem.getSuccessors(xy)\n \"\"\"This is where a major change occurs comapred to other two searches, so to clearly illustrate i used them individually instead of 'paths' \"\"\"\n for coordinate,direction,cost in successorPath:\n newPath = statePath + [direction]\n \"\"\" Getting cost of path with state in hand\"\"\"\n costOfPath = problem.getCostOfActions(newPath) + heuristic(coordinate,problem)\n \"\"\"\n print(costOfPath)\"\"\"\n if coordinate not in stateVisited:\n \"\"\"\n print(direction)\n print(coordinate)\n \"\"\"\n Frontier.push(coordinate,costOfPath)\n pathCurrent.push(newPath,costOfPath)\n xy = Frontier.pop()\n statePath = pathCurrent.pop() \n return statePath", "title": "" } ]
120ccbba0c7350d61e4993ccac089151
patch existing users with email addresses
[ { "docid": "00004123bb44d268e53fe6b6e48adb09", "score": "0.580055", "text": "def patch_emails(csv_filename, verbose=False):\n patched = 0\n skipped = 0\n\n with open(csv_filename, 'r') as f:\n reader = csv.reader(f, delimiter=',')\n for row in reader:\n if len(row) > 1:\n username = row[0]\n email = row[1]\n if validate_email(email):\n try:\n found_user = User.objects.get(username=username)\n found_user.email = email\n found_user.is_active = True\n found_user.save()\n if verbose:\n print 'Patched %s: %s' % (username, email)\n patched += 1\n except ObjectDoesNotExist:\n skipped += 1\n if verbose:\n print 'Could not find username %s' % username\n else:\n skipped += 1\n if verbose:\n print 'Invalid email %s: %s' % (username, email)\n else:\n print 'Require username and email %s' % row\n skipped += 1\n return patched, skipped", "title": "" } ]
[ { "docid": "9bbdb970850cba99d90ec9e6e70e1cb3", "score": "0.7227075", "text": "def update_user_email(user_id, new_email):\n\n user = get_user_by_id(user_id)\n user.email = new_email\n db.session.commit()", "title": "" }, { "docid": "6a64ab15c225a824a0d60d9aabaf60df", "score": "0.70727867", "text": "def change_email(self, username, user_id, new_email):\n try:\n if type(new_email) == str and '@' in new_email and '.' in new_email:\n self.coll.update({'username': username,\n 'user_id': user_id},\n {'$set': {'email': new_email}})\n else:\n return 'This email does not supported by system'\n except Exception:\n return 'This email does not exist'", "title": "" }, { "docid": "bfa042b7b7182411ac869aa102ee587e", "score": "0.70673764", "text": "def forwards(apps, schema_editor):\n User = apps.get_model(\"auth\", \"User\")\n try:\n user = User.objects.get(username=USERNAME, email=OLD_EMAIL)\n except User.DoesNotExist:\n # Fake email doesn't need to updated if it doesn't exist\n return\n\n user.email = NEW_EMAIL\n user.save()", "title": "" }, { "docid": "82d3d175e90abab3b97083581ca580de", "score": "0.7002051", "text": "def patch_users(external_id, users):\n external_id = str(external_id)\n patch = current_app.config.get('PATCH_USERS', {})\n if external_id in patch:\n print(\"Patching company: {}\".format(external_id))\n users.extend(patch[external_id])\n return users", "title": "" }, { "docid": "f7781d0193fbb990ea6db8f316964653", "score": "0.6984003", "text": "def test_update_user_email_conflict(self):\n self.update_user(user_id=self.users[1]['id'],\n user_email=self.users[0]['email'], assert_status=409)", "title": "" }, { "docid": "e7610a7f920f1e7bdd84070cd2a869cd", "score": "0.68166906", "text": "def editEmail(username, oldEmail, newEmail):\n update_sql = \"UPDATE email SET email = %s WHERE username = %s AND email = %s\"\n exec_commit(update_sql, [newEmail, username, oldEmail])", "title": "" }, { "docid": "c84ceb85c373a701e7c2ac56df9233f5", "score": "0.6804232", "text": "def update_user(self, user, claims):\n up = UserProfile.objects.get(user=user)\n up.email_addr = claims.get('email')\n up.save()\n return user", "title": "" }, { "docid": "78d9755ddce48c4ba093a2f1567c868e", "score": "0.676197", "text": "def test_edit_email(self):\n # register two users, login them and get their login tokens\n reg_responses = [\n self.register_user(self.user_details1),\n self.register_user(self.user_details2)\n ]\n login_responses = [\n self.login_user(self.login_details1),\n self.login_user(self.login_details2)\n ]\n login_tokens = [\n self.get_token_from_response(response)\n for response in login_responses\n ]\n\n # try changing one of the user email to that of the other user\n self.kwargs[\"data\"] = json.dumps({\n \"firstname\": \"Jonah\",\n \"lastname\": \"Pat\",\n \"email\": self.user_details2[\"email\"]\n })\n self.kwargs[\"headers\"] = {\"x-access-token\": login_tokens[0]}\n\n response = self.test_client().put(\"/yummy/api/v1.0/users/\",\n **self.kwargs)\n data = response.data.decode()\n self.assertEqual(response.status_code, 400)\n self.assertIn(\n f\"The email \\'{self.user_details2['email']}\\' is already in use\",\n data)", "title": "" }, { "docid": "0a26200c597dc5a6569f9993e0524fe8", "score": "0.6727941", "text": "def test_partially_update_user_using_patch(self):\n pass", "title": "" }, { "docid": "34ee12a68aae0bb2163825a46f5c69b3", "score": "0.66578877", "text": "def update_email(self, new_email):\n if self.user.email == new_email:\n return\n\n meta = self.get_meta()\n if 'old_emails' not in meta:\n meta['old_emails'] = []\n meta['old_emails'].append([self.user.email, datetime.now(UTC).isoformat()])\n self.set_meta(meta)\n self.save()\n\n self.user.email = new_email\n self.user.save()", "title": "" }, { "docid": "9d17fe749fb9f6a93391526c3e8c5178", "score": "0.6653716", "text": "def test_email_field(self):\n\n emails = ['[email protected]', '[email protected]',\n '[email protected]', '[email protected]',\n '[email protected]', '[email protected]']\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n person.email = emails[idx]\n Person.objects.bulk_update(people)\n\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n self.assertEqual(person.email, emails[idx])", "title": "" }, { "docid": "c2f2d7c7222dc7d60b2ed6d663505f80", "score": "0.65925676", "text": "def put(self, multiple=None, _check_permissions=True, _base_permissions=True, formIdx = None, **kwargs):\n if self.copy_email:\n kwargs['username'] = kwargs['email']\n super(UserResource,self).put(multiple=multiple, _check_permissions=_check_permissions,_base_permissions=_base_permissions,formIdx=formIdx, **kwargs)", "title": "" }, { "docid": "917c140566f7a629fcd3ca7f934483c6", "score": "0.65678006", "text": "def update_user_email(request):\n if all(x in request.POST for x in ['email']):\n email = request.POST['email']\n else:\n return JsonResponse({\n 'state': 'fail', \n 'code': 1,\n 'msg': 'Email parameter is required.'\n }) \n\n if request.user.login_with_oauth == True:\n return JsonResponse({\n 'state': 'fail', \n 'code': 2,\n 'msg': 'OAuth logged in user cannot change email.'\n }) \n\n if Users.objects.filter(email=email).count() > 0:\n return JsonResponse({\n 'state': 'fail', \n 'code': 3,\n 'msg': 'Email overlapped.', \n }) \n else:\n user = request.user\n user.email = email\n user.email_verified = False\n user.save()\n \n return JsonResponse({\n 'state': 'success', \n 'code': 1,\n 'msg': 'Succeed to change email.'\n })", "title": "" }, { "docid": "648ce600573588d047e4ce9ea52b722e", "score": "0.6494308", "text": "def test_pre_created_user_is_matched_by_email(self):\n user = UserFactory.create(oauth_user_id=None, email='[email protected]')\n user_info = dict(\n id='12345', email='[email protected]', given_name='Rob',\n family_name='Charlwood', name='Rob Charlwood')\n backend = OauthenticationBackend()\n with mock.patch(\n 'accounts.backends.get_user_info', return_value=user_info):\n result = backend.authenticate(oauth_credentials=MockCredentials())\n\n # It should have used the existing user but updated it with the new\n # profile values\n self.assertEqual(result, user)\n\n # Now check that\n # (1) the returned user has been updated with the latest profile info\n # (2) User object in the DB has been updated with latest profile info\n user.refresh_from_db()\n for user_obj in result, user:\n self._check_user_info_values_match_user_fields(user_info, result)\n self._check_user_info_values_match_user_fields(user_info, user)", "title": "" }, { "docid": "6d37391a086019e947df2573b0e4d6d3", "score": "0.64906955", "text": "def test_change_email(self):\n u1 = User.objects.get(username=\"U1\")\n u1.changeEmail(\"[email protected]\")\n self.assertEqual(u1.email, \"[email protected]\")", "title": "" }, { "docid": "b36cc1cbf1eda6f48801c2654d097533", "score": "0.64733773", "text": "def set_email_by_username(sender, **kwargs):\n user = kwargs['instance']\n if not user.email:\n user.email = '{0}@{1}'.format(user.username, settings.DEVILRY_DEFAULT_EMAIL_SUFFIX)", "title": "" }, { "docid": "74778e644499872775592a5b02f9a941", "score": "0.64569783", "text": "def put(self, request, user_pk, format=None):\n user = self.get_user(user_pk)\n try:\n email_address = user.emailaddress_set.get(\n email=request.data.get(\"email\"), verified=True\n )\n email_address.set_as_primary()\n except ObjectDoesNotExist:\n raise Http404\n serializer = self.serializer_class(\n instance=email_address, data=request.data, context={\"request\": request}\n )\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "f202b4a3b486cf513d5335e5504f4a5d", "score": "0.6453652", "text": "def backwards(apps, schema_editor):\n User = apps.get_model(\"auth\", \"User\")\n try:\n user = User.objects.get(username=USERNAME, email=NEW_EMAIL)\n except User.DoesNotExist:\n # Fake email doesn't need to reverted if it wasn't changed by migration\n return\n\n user.email = OLD_EMAIL\n user.save()", "title": "" }, { "docid": "71086dedff69569c13a0256ef4f38be6", "score": "0.6380836", "text": "def replace_email_with_user(cls, user, relationship_attr):\n assert cls.allow_emails\n updated = set()\n query = (cls.query\n .filter(cls.email.in_(user.all_emails))\n .options(noload('user'), noload('local_group'), joinedload(relationship_attr).load_only('id')))\n for entry in query:\n parent = getattr(entry, relationship_attr)\n existing = (cls.query\n .with_parent(parent, 'acl_entries')\n .options(noload('user'), noload('local_group'))\n .filter_by(principal=user)\n .first())\n if existing is None:\n entry.principal = user\n else:\n existing.merge_privs(entry)\n parent.acl_entries.remove(entry)\n updated.add(parent)\n db.session.flush()\n return updated", "title": "" }, { "docid": "ceb8ff483b4f46acd2fc7d51ad124241", "score": "0.63790405", "text": "def modify_user_by_mail(self, user_email, **kwargs):\n return self._modify_user({'mail': user_email}, **kwargs)", "title": "" }, { "docid": "ef494da29ecd15316a5bdeb9d946e408", "score": "0.63635164", "text": "def update_user(user, identity):\n if user.fxa_id != identity['uid'] or user.email != identity['email']:\n log.info(\n 'Updating user info from FxA for {pk}. Old {old_email} {old_uid} '\n 'New {new_email} {new_uid}'.format(\n pk=user.pk,\n old_email=user.email,\n old_uid=user.fxa_id,\n new_email=identity['email'],\n new_uid=identity['uid'],\n )\n )\n user.update(fxa_id=identity['uid'], email=identity['email'])\n if user.auth_id is None:\n # If the user didn't have an auth id (old user account created before\n # we added the field), generate one for them.\n user.update(auth_id=UserProfile._meta.get_field('auth_id').default())", "title": "" }, { "docid": "2f8b3ed2bea1996ea5196feb6f0699a4", "score": "0.6361763", "text": "def update_email(token):\n try:\n token_data = decode_token(token)\n email = token_data['email']\n username = token_data['username']\n except Exception:\n raise MyException('please click the link to verify your email', status_code=404)\n user = User.query.filter(User.username == username).first()\n if user is None:\n raise MyException('invalid user', status_code=404)\n user.email = email\n db.session.commit()\n user.email_status = True\n return {\"updated_email\": email}", "title": "" }, { "docid": "7115bbb765ec03879bf707ce4a6073d6", "score": "0.62921304", "text": "def save_formset(self, request, form, formset, change):\n super(AccountsUserAdmin, self).save_formset(request, form, formset, change)\n if not change and formset.model == EmailAddress:\n # we are adding a new user and this is the EmailAddress formset\n # make sure the email entered on the user is also added as the primary email\n if formset.instance.email:\n EmailAddress.objects.add_email(formset.instance, formset.instance.email, make_primary=True)", "title": "" }, { "docid": "8ce0e5e827bb3acb327dac3bcc1ee7d8", "score": "0.62826526", "text": "def modify_email(self, username, newEmail):\n\t\t# Booleanos que indican si el tipo es el correcto.\n usernameIsStr = type(username) == str\n newEmailIsStr = type(newFullname) == str \n \n if ( usernameIsStr and newEmailIsStr ):\n # Booleanos que indican si cumplen con los limites.\n usernameLenValid = 1 <= len(username) <= 16\n newEmailLenValid = 1 <= len(newFullname) <= 30\n \n if ( usernameLenValid and newEmailLenValid ):\n query = self.find_email(newEmail)\n \n if (query != []) :\t\n db.session.query(User).filter(User.username==username).\\\n update({'email':(newEmail)})\n db.session.commit()\n return( True )\n\n return( False )", "title": "" }, { "docid": "d20e554eb2a5ec423a59f8864372cb1d", "score": "0.62596756", "text": "def user_profile_setemail():\n return dumps(stub.user_profile_setemail(request.form.get('token'),\n request.form.get('email')))", "title": "" }, { "docid": "e9a198d2ae389c3df794555b8c5e1ff2", "score": "0.6243277", "text": "def test_change_user(self):\n\t\tuser_object1 = User.objects.create(email='test_user1')\n\t\tuser_object2 = User.objects.create(email='test_user2')\n\t\tuser = US.objects.get(username='admin')\n\t\tdata = {'email': 'changed_email'}\n\t\tclient = APIClient()\n\t\tclient.force_authenticate(user=user)\n\t\tresponse1 = client.put('/db/users/1/', data, format='json')\n\t\tresponse2 = client.put('/db/users/2/', data, format='json')\n\t\tself.assertEqual(response1.status_code, status.HTTP_200_OK)\n\t\tself.assertEqual(response2.status_code, status.HTTP_400_BAD_REQUEST)\n\t\tself.assertEqual(User.objects.get(pk=1).email, 'changed_email')", "title": "" }, { "docid": "6afd82995b60fa4c500fcacb1431dc42", "score": "0.6242978", "text": "def _update_or_create_model(self, token, new_email):\n UserEmailUpdate.objects.update_or_create(\n defaults={'new_email': new_email, 'token': token},\n user=self.request.user\n )", "title": "" }, { "docid": "a3e8e0a335b3e11541cc9293b512fe3f", "score": "0.6236676", "text": "def sync_users(users):\n # Get the column names\n column = users.get('column')\n rows = users.get('rows', [])\n added = dict()\n # Check if this user already exists.\n for row in rows:\n\n # Map column names to row.\n row = {col: val for col, val in zip(column, row)}\n # Skip if user exists.\n user = User.objects.filter(email=row['email']).first()\n if user:\n added[row['user_id']] = user\n continue\n\n # Create the user\n username = f\"{row['name'].replace(' ', '-')}-{row['user_id']}\"\n user = User.objects.create(username=username, email=row['email'],\n password=row['password'], is_active=row['is_active'],\n is_staff=row['is_staff'], is_superuser=row['is_admin'])\n text = util.strip_tags(row['info'])\n # Update the Profile\n Profile.objects.filter(user=user).update(digest_prefs=row['digest_prefs'],\n watched_tags=row['watched_tags'],\n twitter=row['twitter_id'],\n uid=row['user_id'], name=row['name'],\n message_prefs=row['message_prefs'],\n role=row['type'], last_login=row['last_login'],\n html=row['info'], date_joined=row['date_joined'],\n location=row['location'], website=row['website'],\n scholar=row['scholar'], text=text,\n score=row['score'], my_tags=row['my_tags'],\n new_messages=row['new_messages'])\n added[row['user_id']] = user\n\n logger.info(f\"Updated {len(rows)} users.\")\n return added", "title": "" }, { "docid": "b3e9a0de01f10bb2e5c1794619091bbe", "score": "0.6233225", "text": "def update_all_users():\n for user in User.query.all():\n add_or_update_user(user.name)", "title": "" }, { "docid": "b3e9a0de01f10bb2e5c1794619091bbe", "score": "0.6233225", "text": "def update_all_users():\n for user in User.query.all():\n add_or_update_user(user.name)", "title": "" }, { "docid": "b3e9a0de01f10bb2e5c1794619091bbe", "score": "0.6233225", "text": "def update_all_users():\n for user in User.query.all():\n add_or_update_user(user.name)", "title": "" }, { "docid": "e6070ea3b03a1d07b9a2bc1e2cc05877", "score": "0.6229521", "text": "def put_users_into(users, data, defaults):\n data['recipients'] = [user.to_data(defaults = True, include_internals = True) for user in users]\n \n return data", "title": "" }, { "docid": "c29cecd202946f3c61a3fc8e9cb510e4", "score": "0.6184728", "text": "def update(self, instance, validated_data):\n password = validated_data.pop('password', None)\n email = Util.normalize_email(validated_data.pop('email', None))\n validated_data['email'] = email\n user = super().update(instance, validated_data)\n\n if password:\n user.set_password(password)\n user.save()\n\n return user", "title": "" }, { "docid": "8b2e6442f71445169ce4a1058b630969", "score": "0.617946", "text": "def competitor_post_save_sync_to_user_handler(sender, instance, **kwargs):\n\n instance.user.first_name = instance.first_name\n instance.user.last_name = instance.last_name\n instance.user.email = instance.email\n\n instance.user.save()", "title": "" }, { "docid": "9f9e8d232aa47d12db1bdf0b482f94d3", "score": "0.6174696", "text": "def patch(current_user, self, email):\n user = self.db.find_user_by_email(email)\n if user is None:\n return nonexistent_user()\n\n if current_user['isadmin'] is not True:\n return jsonify({\n \"status\": 403,\n \"message\": \"Only an admin can change the status of a user\"\n })\n\n user_status_updated = self.db.edit_user_status(email)\n if user_status_updated is True:\n success_message = {\n \"email\": email,\n \"message\": \"User status has been updated\"\n }\n return jsonify({\n \"status\": 200,\n \"data\": success_message\n })", "title": "" }, { "docid": "ec41f21e51d5c93f48a9f147412788d0", "score": "0.61695564", "text": "def test_user_set_email_update_gravatar_url(self):\n gravatar_url = self.user.gravatar_url\n\n self.user.email = '[email protected]'\n self.user.save()\n\n self.assertFalse(gravatar_url == self.user.gravatar_url)", "title": "" }, { "docid": "4f94a500a8ed6da951b6c9bd29b0387a", "score": "0.613072", "text": "def test_update_user(self):\n pass", "title": "" }, { "docid": "4f94a500a8ed6da951b6c9bd29b0387a", "score": "0.613072", "text": "def test_update_user(self):\n pass", "title": "" }, { "docid": "4f94a500a8ed6da951b6c9bd29b0387a", "score": "0.613072", "text": "def test_update_user(self):\n pass", "title": "" }, { "docid": "cda299675e32ea02bd212c5c8f3b0d2f", "score": "0.6117981", "text": "def test_update_user(self):\n user = User.objects.create_user(**user_data)\n self.assertIsInstance(user, User)\n\n new_first_name = \"Mark\"\n user.first_name = new_first_name\n user.save()\n\n self.assertEqual(user.__str__(), user_data['email'])\n self.assertEqual(user.first_name, new_first_name)", "title": "" }, { "docid": "2a475f84d9c1f7f9bf91c7c88bcf49cf", "score": "0.6112522", "text": "async def mutate(\n self, info, id: int, email: str = None, name: str = None):\n _ = await security.get_current_user(info)\n user = await User.filter(id=id).first()\n\n if not user:\n raise Exception(\"Invalid ID\")\n if email:\n user.email = email\n if name:\n user.name = name\n await user.save()\n return UpdateUser(user=user)", "title": "" }, { "docid": "e906e1b4e0025bd46ef2aeb3834dacf2", "score": "0.60989964", "text": "def patch(self, args, user):\n with api.commit_or_abort(\n db.session,\n default_error_message=\"Failed to update user details.\"\n ):\n #parameters.PatchUserDetailsParameters.perform_patch(args, user)\n db.session.merge(user)\n return user", "title": "" }, { "docid": "87b6ecc93a4c37d342df6c24e5fe2cfd", "score": "0.60975105", "text": "def test_upper_case_email(self):\n updated_user_spec = deepcopy(self.user_specs[0])\n updated_user_spec[\"email\"] = '[email protected]'\n\n bulk_upload_async(\n self.domain.name,\n list([updated_user_spec]),\n list([]),\n list([])\n )\n self.assertEqual(self.user.email, updated_user_spec['email'].lower())", "title": "" }, { "docid": "8b325f3677182c09157cf1df31af540c", "score": "0.6089628", "text": "def test_new_user_email_normalized(self):\n sample_emails = [\n [\"[email protected]\", \"[email protected]\"],\n [\"[email protected]\", \"[email protected]\"],\n [\"[email protected]\", \"[email protected]\"],\n [\"[email protected]\", \"[email protected]\"],\n ]\n for email, expected in sample_emails:\n user = get_user_model().objects.create_user(email, \"sample123\")\n self.assertEqual(user.email, expected)", "title": "" }, { "docid": "8417ac3096351f44e9bdf76707befaa8", "score": "0.60864794", "text": "def update_user(self, request):\n stored_backend = auth.load_backend(\n request.session.get(auth.BACKEND_SESSION_KEY, '')\n )\n if isinstance(stored_backend, RemoteUserBackend):\n user = request.user\n email = request.META.get(\"ADFS_EMAIL\", None)\n if email is not None:\n user.email = email\n firstname = request.META.get(\"ADFS_FIRSTNAME\", None)\n if firstname is not None:\n user.first_name = firstname\n lastname = request.META.get(\"ADFS_LASTNAME\", None)\n if lastname is not None:\n user.last_name = lastname\n self.update_user_groups(request)\n # Add user in the base group.\n self.add_group_if_nonexistent(settings.BASE_GROUP)\n base_group = Group.objects.get(name=settings.BASE_GROUP)\n if base_group not in user.groups.all():\n user.groups.add(base_group)\n # Save user object\n user.save()", "title": "" }, { "docid": "399a2473e4177ce4ef772b06888855e1", "score": "0.6080066", "text": "def create_email_change(self, user, new_email, send_email=True):\n if not user.email:\n user.email = new_email\n user.save()\n return\n # Generate a salted SHA1 hash to use as a key.\n salt = sha1(str(random.random())).hexdigest()[:5]\n action_key = sha1(salt+user.email).hexdigest()\n\n # And finally create the record.\n user.email_new = new_email\n user.save()\n record, created = self.get_or_create(user=user,\n type='E',\n defaults={'action_key': action_key})\n\n if send_email:\n current_domain = Site.objects.get_current().domain\n subject = \"Change your email address at %s\" % current_domain\n message_template = loader.get_template('accounts/password_reset.txt')\n message_context = Context({'site_url': '%s://%s' % (settings.SITE_PROTOCOL, current_domain),\n 'action_key': record.action_key,\n 'expiration_days': settings.ACTION_RECORD_DAYS,\n 'user': user})\n message = message_template.render(message_context)\n user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)\n return record", "title": "" }, { "docid": "8413a5d738524036e06a2d3e96b09d6f", "score": "0.60650396", "text": "def test_conflicting_user_with_same_email_is_not_reused(self):\n user = UserFactory.create(oauth_user_id='9999', email='[email protected]')\n user_info = dict(\n id='12345', email='[email protected]', given_name='Rob',\n family_name='Charlwood', name='Rob Charlwood')\n backend = OauthenticationBackend()\n with mock.patch(\n 'accounts.backends.get_user_info', return_value=user_info):\n result = backend.authenticate(oauth_credentials=MockCredentials())\n\n # It should have created a new user\n self.assertNotEqual(result, user)\n self.assertEqual(get_user_model().objects.count(), 2)\n\n # The existing user should now have a blank 'email' field\n user.refresh_from_db()\n self.assertEqual(user.email, '')\n\n # We should get a new user object with correct email & oauth_user_id\n self._check_user_info_values_match_user_fields(user_info, result)\n\n # And because it created a new user via oauth, it shouldn't have a\n # usable password\n self._check_has_unusable_password(result)", "title": "" }, { "docid": "bd44c2d799b35c27f0202ee577d3bbf6", "score": "0.60633296", "text": "def save(self, *args, **kwargs):\n if self.instance.pk is None:\n site = Site.objects.get(pk=settings.SITE_ID)\n self.instance.user = invitation_backend().invite_by_email(\n self.cleaned_data['email'],\n **{'first_name': self.cleaned_data['first_name'],\n 'last_name': self.cleaned_data['last_name'],\n 'organization': self.cleaned_data['organization'],\n 'domain': site})\n self.instance.user.first_name = self.cleaned_data['first_name']\n self.instance.user.last_name = self.cleaned_data['last_name']\n self.instance.user.email = self.cleaned_data['email']\n self.instance.user.save()\n return super(AccountUserForm, self).save(*args, **kwargs)", "title": "" }, { "docid": "be8d1b5e264297aa12fa31b61c7e179c", "score": "0.6044186", "text": "def updateUsers(self, name, ip):\n self.users.append((name, ip))", "title": "" }, { "docid": "ea384b0b79dfbcd8348220ed177d73c8", "score": "0.6041603", "text": "def test_get_user_addr_additional_no_default(self):\n app_settings.set(\n 'projectroles',\n 'user_email_additional',\n '{};{}'.format(USER_ADD_EMAIL, USER_ADD_EMAIL2),\n user=self.user,\n )\n self.user.email = ''\n self.assertEqual(\n get_user_addr(self.user), [USER_ADD_EMAIL, USER_ADD_EMAIL2]\n )", "title": "" }, { "docid": "b51fe309023ecf70d43f74916b3b4149", "score": "0.604104", "text": "def test_success_update_profile(self, user_index):\n\n self.client.put(\n '/api/v1/users/{}/'.format(self.user.id),\n self.form_data, format='json'\n )\n user = User.objects.get(id=self.user.id)\n self.assertEqual(user.email, self.form_data['email'])", "title": "" }, { "docid": "99d82175b7144a9381636d1ed93f5b57", "score": "0.6031948", "text": "def test_update_user_information(self):\n\n form = ProfileEditForm(data=self.params, instance=self.user)\n form.submit()\n self.user.refresh_from_db()\n self.assertEqual(self.params['email'], self.user.email)", "title": "" }, { "docid": "9920d3e0af4d146c1f8caecf9a700474", "score": "0.6018317", "text": "def test_update_profile_data(self):\n\n new_email = '[email protected]'\n data = json.dumps({'user': {'email': new_email}})\n\n request = self.factory.patch('/core/api/profile/',\n data=data,\n content_type='application/json')\n\n force_authenticate(request, user=self.admin_user, token=self.admin_user.auth_token)\n response = self.view(request, pk=self.test_profile.id)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertTrue('user' in response.data)\n self.assertEqual(response.data['user']['email'], new_email)", "title": "" }, { "docid": "8274ed96ad5c3f7efab90304d02ec617", "score": "0.6017798", "text": "def test_email_change_in_user_profile(self, browser, liveserver):\n self.api_helper.drop_all_data(liveserver)\n self.api_helper.load_base_test_data(liveserver)\n browser.get(liveserver + reverse(\"productdb:home\"))\n\n # login\n browser.find_element_by_id(\"navbar_login\").click()\n self.wait_for_text_to_be_displayed_in_body_tag(browser, \"Please enter your credentials below.\")\n\n homepage_message = \"This database contains information about network equipment like routers and switches \" \\\n \"from multiple vendors.\"\n self.login_user(browser, \"testuserprofilemail\", self.API_PASSWORD, homepage_message)\n\n # view the edit settings page\n browser.find_element_by_id(\"navbar_loggedin\").click()\n browser.find_element_by_id(\"navbar_loggedin_user_profile\").click()\n assert \"[email protected]\" in browser.find_element_by_id(\"id_email\").get_attribute('value')\n\n # change email\n new_email = \"[email protected]\"\n browser.find_element_by_id(\"id_email\").clear()\n browser.find_element_by_id(\"id_email\").send_keys(new_email)\n browser.find_element_by_id(\"submit\").click()\n self.wait_for_text_to_be_displayed_in_body_tag(browser, homepage_message)\n\n # verify redirect to homepage\n assert \"User Profile successful updated\" in browser.find_element_by_tag_name(\"body\").text, \\\n \"Should view a message that the user profile was saved\"\n\n # verify new value in email address\n browser.find_element_by_id(\"navbar_loggedin\").click()\n browser.find_element_by_id(\"navbar_loggedin_user_profile\").click()\n self.wait_for_text_to_be_displayed_in_body_tag(browser, \"Edit User Profile\")\n\n assert new_email in browser.find_element_by_id(\"id_email\").get_attribute('value'), \\\n \"Show view the correct email address of the user (%s)\" % new_email\n\n # end session\n self.logout_user(browser)", "title": "" }, { "docid": "b54d28cd3382c769cf3a69f2c606a6c8", "score": "0.60090065", "text": "def test_user_register_with_duplicate_email(self):", "title": "" }, { "docid": "1436390e94c784ccd8c6b6f43a6237cb", "score": "0.6007156", "text": "def test_update_user_after_authentication(self):\n payload = {\n 'email': '[email protected]',\n 'password': 'replacetestpassword'\n }\n res = self.client.patch(ME_URL, payload)\n\n self.user.refresh_from_db()\n self.assertEquals(res.status_code, status.HTTP_200_OK)\n self.assertEquals(self.user.email, payload['email'])\n self.assertTrue(self.user.check_password(payload['password']))", "title": "" }, { "docid": "ba61ccd4f71cabd2e565434c15610de7", "score": "0.59977", "text": "def update_user(user, data):\n user.first_name = data.get('first_name', user.first_name)\n user.last_name = data.get('last_name', user.last_name)\n user.email = data.get('email', user.email)\n user.is_staff = data.get('is_staff', user.is_staff)\n user.is_superuser = data.get('is_superuser', user.is_superuser)\n password = data.get('password', None)\n\n if password:\n user.set_password(password)\n\n user.save()\n\n return user", "title": "" }, { "docid": "69330c98f1ebf8bc225a47d73f05fb7d", "score": "0.59908485", "text": "def user_updated(user):\n if User.objects.filter(moodle_id=user['id']).exists():\n kwargs = {'first_name': user['firstname'], 'last_name': user['lastname'], 'username': user['username'],\n 'email': user['email'], 'version_time': timezone.now()}\n User.objects.filter(moodle_id=user['id']).update(**kwargs)", "title": "" }, { "docid": "21fc7f85b86e5b03000af29b359194b1", "score": "0.597796", "text": "def test_account_update(self):\n test_email = \"[email protected]\"\n acc = Account.objects.create(username=test_email, email=test_email)\n self.client.force_authenticate(acc)\n data = {\"email\": \"[email protected]\"}\n url = reverse(\"account-detail\", args=[self.user.id])\n response = self.client.patch(url, data=data)\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n url = reverse(\"account-profile\")\n response = self.client.patch(url, data=data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n acc.refresh_from_db()\n self.assertEqual(acc.email, data[\"email\"])", "title": "" }, { "docid": "0950da0d8965aa3476dcfaf7ffa00fbc", "score": "0.5949215", "text": "def test_get_user_addr_additional(self):\n app_settings.set(\n 'projectroles',\n 'user_email_additional',\n '{};{}'.format(USER_ADD_EMAIL, USER_ADD_EMAIL2),\n user=self.user,\n )\n self.assertEqual(\n get_user_addr(self.user),\n [self.user.email, USER_ADD_EMAIL, USER_ADD_EMAIL2],\n )", "title": "" }, { "docid": "34c6fe762a2ba151f2b87558bc0d84c4", "score": "0.59469897", "text": "def update_profile(request):\n name = request.POST[\"name\"]\n request.user.first_name = name[:name.rindex(\" \")]\n request.user.last_name = name[name.rindex(\" \") + 1:]\n request.user.__dict__[models.User.get_email_field_name()] = request.POST[\"email\"]\n request.user.admin.description = request.POST[\"description\"]\n request.user.admin.save()\n request.user.save()\n return json_response(True)", "title": "" }, { "docid": "f82113077a175c9fe4ca628b594cb247", "score": "0.59445953", "text": "def edit_custom_fields_remove_duplicate_emails(sender, instance, using, **kwargs):\n if not instance._state.adding: # check if object exists\n old_email = User.objects.get(pk=instance.pk).email\n if instance.email != old_email:\n for field in instance.site.organization.custom_fields.all():\n field.values.pop(old_email, None)\n field.save()", "title": "" }, { "docid": "190e29456b7444d124f3936ea11cf4a5", "score": "0.5938", "text": "def test_get_user_addr_invalid(self):\n self.user.email = INVALID_EMAIL\n self.assertEqual(get_user_addr(self.user), [])", "title": "" }, { "docid": "bbd99c5a89cbae9e1a89181bf9202013", "score": "0.5928249", "text": "def test_create_user_email_normalize(self):\n email = \"[email protected]\"\n\n user = get_user_model().objects.create_user(\n email=email,\n password='no se ocupa en el test'\n )\n\n self.assertEqual(user.email, email.lower())\n\n email2 = \"[email protected]\"\n user2 = get_user_model().objects.create_user(\n email=email2,\n password='no se ocupa en el test'\n )\n\n self.assertEqual(user2.email, email2)", "title": "" }, { "docid": "71f7f41c9c2207a4c20b3379e045ec07", "score": "0.5927182", "text": "def test_get_email_user_first_last_name(self):\n self.user_owner.first_name = 'Owner'\n self.user_owner.last_name = 'User'\n self.assertEqual(\n get_email_user(self.user_owner),\n 'Owner User ([email protected])',\n )", "title": "" }, { "docid": "01b9ebeb0f83b424f9083fc3ce01ee20", "score": "0.5926291", "text": "def test_create_user(cidc_api, clean_db, monkeypatch):\n user_id, other_user_id = setup_users(cidc_api, monkeypatch)\n with cidc_api.app_context():\n dup_email = Users.find_by_id(other_user_id).email\n\n client = cidc_api.test_client()\n\n dup_user_json = {\"email\": dup_email}\n new_user_json = {\"email\": \"[email protected]\"}\n\n # Registered users who aren't admins can't create arbitrary users\n res = client.post(\"users\", json=new_user_json)\n assert res.status_code == 401\n\n # Users who are admins can create arbitrary users\n make_admin(user_id, cidc_api)\n res = client.post(\"users\", json=new_user_json)\n assert res.status_code == 201\n\n # Even admins can't create users with duplicate emails\n res = client.post(\"users\", json=dup_user_json)\n assert res.status_code == 400", "title": "" }, { "docid": "ba0feaca39bb64cce02cb6a36d51ceec", "score": "0.5920965", "text": "def update(email, passwordHash, firstName, lastName, role):\n update_dict = \\\n {\n 'updatedOn': datetime.datetime.utcnow(),\n 'firstName': firstName,\n 'lastName': lastName,\n 'role': role\n }\n if passwordHash != None:\n update_dict['passwordHash'] = passwordHash\n\n doc = MongoUtil.user_collection().find_one_and_update(\n {\n '_id': email\n },\n {\n '$set': update_dict\n },\n projection=_user_fields,\n return_document=pymongo.ReturnDocument.AFTER\n )\n return doc", "title": "" }, { "docid": "185a1203062d006a4244baf4219fd06b", "score": "0.5917508", "text": "def updateUser(self, email, hasCorona=False):\n query_params = {\n \":email\": email,\n \":haCorona\": hasCorona,\n \":lastUpdated\": datetime.datetime.now()\n }\n\n self._db.update_item(\n TableName='person',\n Key = convertToDynamoItem(\n {\n \"email\": email\n }\n ),\n UpdateExpression=\"SET hasCoronoa = :hasCorona, lastUpdated = :lastUpdated\",\n ExpressionAttributeValues=convertToDynamoItem(query_params)\n )", "title": "" }, { "docid": "abe0194bde81355c7028b25a3a78d870", "score": "0.5911192", "text": "def populate_users():\n log.info('populate_users')\n users = get_all_users().body['members']\n\n for u in users:\n if not u['deleted']:\n s_id = u['id']\n s_name = u['name']\n is_admin = u['is_admin']\n is_owner = u['is_owner']\n # Create our custom unique user ID's (slackID + slackUserName)\n _id = s_name + s_id\n db_update_row(User(id=_id,\n slack_id=s_id,\n slack_name=s_name,\n is_admin=is_admin,\n is_owner=is_owner))", "title": "" }, { "docid": "480e29d7d9e961c366ac93557df975bc", "score": "0.5907169", "text": "def update_user(email, password, first_name, last_name, skill, course):\n from PEC.user.models import User\n user = User.query.filter_by(email=email).first()\n if user is not None:\n if password:\n user.set_password(password)\n if first_name:\n user.update(first_name=first_name, commit=False)\n if last_name:\n user.update(last_name=last_name, commit=False)\n if skill:\n print(skill)\n user.skills.clear()\n user.add_skill(*skill)\n if course:\n user.courses.clear()\n user.add_course(*course)\n user.save()\n else:\n click.echo('User does not exist')", "title": "" }, { "docid": "22c8b6b7aaa0f9212f33328d12beb5bf", "score": "0.5903375", "text": "def test_new_user_email_normalize(self):\n email = '[email protected]'\n user = get_user_model().objects.create_user(email, 'test232323')\n\n self.assertEqual(user.email, email.lower())", "title": "" }, { "docid": "ff647a9f9cf18a998b921a0c3f646d4e", "score": "0.59017557", "text": "def test_user_email_normalize(self):\n email = '[email protected]'\n user = get_user_model().objects.create_user(email, 'test123')\n\n self.assertEqual(user.email, email.lower())", "title": "" }, { "docid": "2f5c3d8198bb866ea0e4ef77e0b1c989", "score": "0.5891535", "text": "def make_account_changes(user, newaffiliation, newpassword, newphone_number):\n if newaffiliation != 'No Change':\n user.affiliation = newaffiliation\n \n #if new password field wasn't empty then new password is equal to confirm password(validator)- only update password in this case\n if newpassword != '':\n user.password = newpassword\n\n #can just do this even if they do not make any changes because default value in phone number field\n user.phone_number = newphone_number\n db.session.commit()", "title": "" }, { "docid": "52322846bc554eba8c38baa9580657c3", "score": "0.5888782", "text": "def addEmail(username, email):\n insert_sql = \"INSERT INTO email (email, username) VALUES (%s, %s)\"\n exec_commit(insert_sql, [email, username])", "title": "" }, { "docid": "57e1c8ee481581d0171fd6eb403ab8eb", "score": "0.58870715", "text": "def _updateUser(username,firstName,lastName,email,height,yearOfGradution,userProfilePhoto,deactivate,institution):\r\n\r\n user = PongUser.objects.get(username=username)\r\n user.setHeight(height)\r\n user.setGraduationYear(yearOfGradution)\r\n if userProfilePhoto:\r\n user.setPhoto(userProfilePhoto)\r\n else:\r\n user.setPhoto(None)\r\n user.setFirstName(firstName)\r\n user.setLastName(lastName)\r\n user.setEmail(email)\r\n user.setIsActive(not deactivate)\r\n user.setInstitution(institution)\r\n user.setHasUpdatedProfile(True)\r\n user.save()\r\n return", "title": "" }, { "docid": "04d70ebf0486f3b108542e109140bf6f", "score": "0.58865005", "text": "def test_edit_user(self):\n res_login = self.__login(\"[email protected]\")\n my_user = {\n 'email': \"[email protected]\",\n 'first': \"john\",\n 'last': \"doe\",\n 'minitial': \"f\",\n 'roles': [\"admin\", \"technician\"],\n 'isEdit': False,\n 'usrId': 3,\n }\n sres = self.client.post(\"/api/save_user\", data=my_user)\n existing_user = UserEntity.query.filter_by(email=my_user['email'])\n if existing_user.count() is 1:\n edited_user = my_user\n edited_user['first'] = 'bill'\n response = self.client.post(\"/api/edit_user\", data=edited_user)\n self.assertEqual(response._status_code, 200)\n #see if changed\n after_edit_user = UserEntity.query.filter_by(email=my_user['email'])\n self.assertEqual(after_edit_user.one().first, 'bill')\n else:\n self.fail('user not existing')\n print('edit user test')", "title": "" }, { "docid": "39fe10f587fec12eccd132a8c58162cd", "score": "0.5884948", "text": "def update_user(connection, user):\n sql = ''' UPDATE Users \n SET Username = ? , \n Email = ? , \n Password = ? \n WHERE uID = ?'''\n cursor = connection.cursor()\n cursor.execute(sql,user)\n connection.commit()", "title": "" }, { "docid": "49fffaf0c77742793dd3f7fffff28115", "score": "0.58738405", "text": "def test_update_cloud_user(self):\n pass", "title": "" }, { "docid": "6f3e5b46b3a89486323f16e4bcf2eb6b", "score": "0.5862944", "text": "def users_profile_update(self):\n email_query = request.args.get('email')\n if not email_query:\n self.logger.debug(messages.MISSING_FIELDS_ERROR)\n return messages.ERROR_JSON % messages.MISSING_FIELDS_ERROR, 400\n if email_query != auth.current_user().get_email() and not auth.current_user().is_admin():\n self.logger.debug(messages.USER_NOT_AUTHORIZED_ERROR)\n return messages.ERROR_JSON % messages.USER_NOT_AUTHORIZED_ERROR, 403\n content = request.form\n try:\n user = self.database.search_user(email_query)\n except UserNotFoundError:\n self.logger.debug(messages.USER_NOT_FOUND_MESSAGE % email_query)\n return messages.ERROR_JSON % (messages.USER_NOT_FOUND_MESSAGE % email_query), 404\n password = SecuredPassword.from_raw_password(content[\"password\"]) if \"password\" in content else None\n fullname = content[\"fullname\"] if \"fullname\" in content else None\n phone_numer = content[\"phone_number\"] if \"phone_number\" in content else None\n photo = Photo()\n if 'photo' in request.files:\n photo = Photo.from_bytes(request.files['photo'].stream)\n self.database.update_user(user, password=password, fullname=fullname,\n phone_number=phone_numer, photo=photo)\n return messages.SUCCESS_JSON, 200", "title": "" }, { "docid": "f61b599326c289bf5b1f320fefe5623f", "score": "0.585963", "text": "def test_get_email_user_no_email(self):\n self.user_owner.email = ''\n self.assertEqual(get_email_user(self.user_owner), 'owner')", "title": "" }, { "docid": "6dd3062e22b58cd5cdbd2db99215484a", "score": "0.5859077", "text": "def save(self, **kwargs):\n user = super().save(commit=False)\n if not user.pk:\n user.is_active = False\n send_mail = True\n else:\n send_mail = False\n # After knowing to send email or not we need to save\n # the user to the database.\n user.save()\n # To save any many-to-many relations we call save_m2m()\n self.save_m2m\n # create or update the user profile\n Profile.objects.update_or_create(\n user=user,\n defaults={\n 'name': self.cleaned_data['name'],\n 'slug': slugify(self.cleaned_data['name'])\n }\n )\n if send_mail:\n self.send_mail(user, **kwargs)\n return user", "title": "" }, { "docid": "95b561f1a15b92a6d24cf648853200c6", "score": "0.5857558", "text": "def add_user(self, email):\n # POST https://api.backupify.com/gapps/v1/domains/{DomainName}/users -d 'user[email]={UserEmail}'\n url = '%s/users' % (self.base_url)\n data = {\n 'user[email]': email,\n }\n return requests.post(url, json=data, headers=self.headers)", "title": "" }, { "docid": "698daee23bb2e8a428f878215fe3cec1", "score": "0.58418614", "text": "def update_user_email(request):\n if not request.POST['new_usermail']:\n msg = 'مينفعش الإيميل يبقى فاضي.'\n messages.add_message(request, messages.ERROR, msg)\n return render(request, 'profile/profile.html')\n if UserProfile.validate_mail(request.POST['new_usermail'], request.POST['new_usermail_confirmation']):\n UserProfile.change_mail(request.POST['new_usermail'], request.user)\n msg = 'تم تغيير الإيميل.'\n messages.add_message(request, messages.SUCCESS, msg)\n return render(request, 'profile/profile.html')\n else:\n msg = 'الإيميل مش صحيح أو فيه مشكلة.'\n messages.add_message(request, messages.ERROR, msg)\n return render(request, 'profile/profile.html')\n\n return redirect('home_user')", "title": "" }, { "docid": "be24813dc7d989201a0328fe134d8827", "score": "0.58403623", "text": "def test_create_user_email(self):\n new_user = UserProfile.objects.create_user(**self.user_data)\n self.assertEqual(mail.outbox[0].to, [new_user.email])\n self.assertEqual(len(mail.outbox), 1)", "title": "" }, { "docid": "e660a190fd092708a44fba98980582cf", "score": "0.58249104", "text": "def register_users(cls, new_users):\n\t\tcls._users = new_users", "title": "" }, { "docid": "bb4ecb250a9457e51799234fe5cf7e07", "score": "0.58203703", "text": "def save(self, domain_override=None,\n subject_template_name=None,\n email_template_name=None,\n use_https=False, token_generator=default_token_generator,\n from_email=None, request=None, html_email_template_name=None,\n extra_email_context=None):\n logger.info(self.cleaned_data)\n username = self.cleaned_data[\"username\"]\n email = self.cleaned_data[\"email\"]\n logger.info('{0} {1}'.format(username, email))\n for user in self.get_users(email=email, username=username):\n if not domain_override:\n current_site = get_current_site(request)\n site_name = current_site.name\n domain = current_site.domain\n else:\n site_name = domain = domain_override\n context = {\n 'email': email,\n 'domain': domain,\n 'site_name': site_name,\n 'uid': urlsafe_base64_encode(force_bytes(user.pk)),\n 'user': user,\n 'token': token_generator.make_token(user),\n 'protocol': 'https' if use_https else 'http',\n }\n if extra_email_context is not None:\n context.update(extra_email_context)\n logger.info(user.email)\n logger.info('Sending password recovery email to {0} - {1}'.format(user.username, user.email))\n tasks.send_email.delay(\n recipients=[user.email],\n subject='TLKFAA: Password Recovery',\n context=context,\n text_template='email/password_reset_email.txt',\n html_template='email/password_reset_email.html',\n bcc=[settings.DEBUG_EMAIL]\n )", "title": "" }, { "docid": "43ac8a9ca01d2adf4de2a436384ae8c9", "score": "0.58190715", "text": "def save(self, domain_override=None,\n email_template_name='registration/password_reset_email.html',\n use_https=False, token_generator=default_token_generator, from_email=None, \n request=None):\n for user in self.users_cache:\n html_email_template_name = email_template_name\n text_email_template_name='password_reset_email.txt'\n text_email_body_template = loader.get_template(text_email_template_name)\n html_email_body_template = loader.get_template(html_email_template_name)\n \n context = Context({\n 'email': user.email,\n 'uid': int_to_base36(user.id),\n 'first_name': user.first_name,\n 'token': token_generator.make_token(user),\n })\n context.update(get_basic_email_context())\n\n subject = ''.join(render_to_string('email_subject.txt', {\n 'message': \"Password Reset\"\n }, context).splitlines())\n \n text_email_body = text_email_body_template.render(Context(context)) \n html_email_body = html_email_body_template.render(Context(context))\n\n send_email(subject, text_email_body, [user.email], html_email_body)", "title": "" }, { "docid": "6f6719256bcccd57a245f3ad036b0180", "score": "0.58178204", "text": "def test_get_email_user_name(self):\n self.user_owner.name = 'Owner User'\n self.assertEqual(\n get_email_user(self.user_owner),\n 'Owner User ([email protected])',\n )", "title": "" }, { "docid": "0c007cd5fd37972fbd110d92baacce49", "score": "0.5806575", "text": "def setup_user_data(self, step: Step):\n from allauth.account.models import EmailAddress\n from allauth.utils import get_user_model\n step.context.user_data = step.table[0]\n user = get_user_model()(username=step.table[0]['username'])\n user.set_password(step.table[0]['password'])\n user.save()\n EmailAddress.objects.create(\n user=user, email=step.table[0]['email'],\n primary=True, verified=True)", "title": "" }, { "docid": "dac2e3f6a5baf253669814c490da1cca", "score": "0.5801011", "text": "def put(self):\n parser = reqparse.RequestParser(bundle_errors=True)\n parser.add_argument('firstname', required=True)\n parser.add_argument('lastname', required=True)\n parser.add_argument('email', required=True)\n args = parser.parse_args()\n\n g.user.firstname = args['firstname']\n g.user.lastname = args['lastname']\n g.user.email = args['email']\n db.session.commit()\n\n return g.user.serialize, 200", "title": "" }, { "docid": "f153bfda56275004a681d76ea68e7090", "score": "0.5783606", "text": "def test_update_user_profile(self):\n payload = {'name': 'new name', 'password': 'newdjango1234'}\n\n res = self.client.patch(ME_URL, payload)\n\n # refresh_from_db helper fn to update user with latest value from db\n self.user.refresh_from_db()\n\n self.assertEqual(self.user.name, payload['name'])\n self.assertTrue(self.user.check_password(payload['password']))\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "title": "" }, { "docid": "4542f4187910301f240abbe2e97d1dbb", "score": "0.5782478", "text": "def update_email(self, employee_id, email):\n return self.client.put(\n 'people/{employee_id}/email'.format(\n employee_id=employee_id\n ),\n json_body={\n 'email': email\n }\n )", "title": "" }, { "docid": "82286e9d548041972ff3ee6b94d12c61", "score": "0.5780309", "text": "async def send_adds(self, adds):\n json_data = { 'users': { 'user': adds } }\n params = { 'update_existing': 1,\n 'email_conflict_resolution': 2 }\n await self.session.post('users', json=json_data, params=params)", "title": "" }, { "docid": "3688829d5fa52717b6a92b00c45723f1", "score": "0.57801926", "text": "def edit_user(self, REQUEST):\n user_id = REQUEST.form['id']\n agent = self._get_ldap_agent(bind=True)\n errors = _session_pop(REQUEST, SESSION_FORM_ERRORS, {})\n user = agent.user_info(user_id)\n # message\n form_data = _session_pop(REQUEST, SESSION_FORM_DATA, None)\n if form_data is None:\n form_data = user\n\n try:\n orgs = agent.all_organisations()\n except ldap.SIZELIMIT_EXCEEDED:\n secondary_agent = self._get_ldap_agent(secondary=True)\n orgs = secondary_agent.all_organisations()\n orgs = [{'id': k, 'text': v['name'], 'text_native': v['name_native'],\n 'ldap': True} for k, v in orgs.items()]\n user_orgs = list(agent.user_organisations(user_id))\n if not user_orgs:\n org = form_data.get('organisation')\n if org:\n orgs.append(\n {'id': org, 'text': org, 'text_native': '', 'ldap': False})\n else:\n org = user_orgs[0]\n org_id = agent._org_id(org)\n form_data['organisation'] = org_id\n orgs.sort(lambda x, y: cmp(x['text'], y['text']))\n schema = user_info_edit_schema.clone()\n\n skip_email_validation_node = colander.SchemaNode(\n colander.Boolean(),\n title='',\n name='skip_email_validation',\n description='Skip extended email validation',\n widget=deform.widget.CheckboxWidget(),\n )\n\n # add the \"skip email validation\" field if email fails validation\n email = form_data.get('email')\n if email:\n email = email.strip()\n validity_status = validate_email(email, verify=False, verbose=True)\n if validity_status is not True:\n email_node = schema['email']\n pos = schema.children.index(email_node)\n schema.children.insert(pos + 1, skip_email_validation_node)\n\n # if the skip_email_validation field exists but is not activated,\n # add an extra validation to the form\n if not (form_data.get('edit-skip_email_validation') == 'on'):\n schema['email'].validator = colander.All(\n schema['email'].validator, check_valid_email)\n\n choices = [('', '-')]\n for org in orgs:\n if org['ldap']:\n if org['text_native']:\n label = u\"%s (%s, %s)\" % (org['text'], org['text_native'],\n org['id'])\n else:\n label = u\"%s (%s)\" % (org['text'], org['id'])\n else:\n label = org['text']\n choices.append((org['id'], label))\n widget = SelectWidget(values=choices)\n schema['organisation'].widget = widget\n\n # if 'disabled@' in form_data.get('email', ''):\n # user_dn = agent._user_dn(user_id)\n # form_data['email'] = \"disabled - %s\" % \\\n # agent.get_email_for_disabled_user_dn(user_dn)\n\n options = {'user': user,\n 'form_data': form_data,\n 'schema': schema,\n 'errors': errors,\n }\n self._set_breadcrumbs([(user_id, '#')])\n return self._render_template('zpt/users/edit.zpt', **options)", "title": "" }, { "docid": "604fe0ea39a4e94b1b2deebfe6eb9f6e", "score": "0.57723206", "text": "def fix_old_user(out, obj, attrnames, alternative_user):\r\n for attrname in attrnames:\r\n try:\r\n user_id = getattr(obj, attrname).id\r\n except User.DoesNotExist, err:\r\n out.write(\"Old %s user doesn't exist. Use current user %r.\" % (attrname, alternative_user))\r\n setattr(obj, attrname, alternative_user)", "title": "" }, { "docid": "14e0c38e36382c22cce9738b3d444460", "score": "0.5756488", "text": "def put(self, request):\n serializer = UserEditingSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = request.user\n user.first_name = serializer.validated_data['first_name']\n user.last_name = serializer.validated_data['last_name']\n user.birthdate = serializer.validated_data['birthdate']\n user.gender = serializer.validated_data['gender']\n user.city = serializer.validated_data['city']\n user.address = serializer.validated_data.get('address', None)\n user.save()\n return Response(data=UserBaseSerializer(user).data, status=status.HTTP_200_OK)", "title": "" }, { "docid": "05f8b6e44fd8eed2afa262bb37df45f5", "score": "0.5756239", "text": "def save(self, domain_override=None,\n subject_template_name='registration/password_reset_subject.txt',\n email_template_name='registration/password_reset_email.html',\n extra_email_context=dict(),\n use_https=False, token_generator=default_token_generator,\n from_email=None, request=None, html_email_template_name=None):\n from django.core.mail import send_mail\n UserModel = get_user_model()\n email = self.cleaned_data[\"email\"]\n active_users = UserModel._default_manager.filter(\n email__iexact=email, is_active=True)\n\n\n for user in active_users:\n # Make sure that no email is sent to a user that actually has\n # a password marked as unusable\n if not user.has_usable_password():\n continue\n\n # as we also want \"non users\" auth users to reset their password, we always\n # fake the concrete usermodel object.\n a = ConcreteUserModel(\n username=user.username,\n email=user.email,\n last_login=user.last_login,\n id=user.id,\n is_active=user.is_active,\n password=user.password,\n )\n a.pk = user.id\n\n a.confirm_account(template=email_template_name, subject='Passwort zurücksetzen')", "title": "" }, { "docid": "d1005822cf012bb3fe391ed7d4ecce58", "score": "0.5751076", "text": "def test_new_user_email_normalized(self):\n email = '[email protected]'\n user = get_user_model().objects.create_user(email, 'afdc1234')\n\n self.assertEqual(user.email, email.lower())", "title": "" }, { "docid": "29055c6a815401d9268ef34b9c8f94b8", "score": "0.5749678", "text": "def save(self):\n payload = self.context['payload']\n user = User.objects.get(username=payload['user'])\n user.is_verified = True\n user.save()", "title": "" }, { "docid": "6b04d7e2534b10c719e906971420742d", "score": "0.5741804", "text": "def test_get_user_addr_additional_invalid(self):\n app_settings.set(\n 'projectroles',\n 'user_email_additional',\n '{};{}'.format(USER_ADD_EMAIL, INVALID_EMAIL),\n user=self.user,\n )\n self.assertEqual(\n get_user_addr(self.user),\n [self.user.email, USER_ADD_EMAIL],\n )", "title": "" } ]
3db79cc0b13f76a9ed8a25932cff0f21
GET the listing prefix(header) to current heading node according to the current level and position
[ { "docid": "7841e7091f7749d4c3041758476861df", "score": "0.673707", "text": "def outline_prefix(self):\n # alternate the listing headers. recylce if needed\n list_type = (self.level - 1) % ListTypes.OPTIONS\n if list_type is ListTypes.NUMBER:\n return str(self.position + 1)\n elif list_type is ListTypes.ALPHA:\n return chr(ord('A') + self.position)\n elif list_type is ListTypes.ROMAN:\n return ListTypes.convertRoman(self.position + 1)\n elif list_type is ListTypes.LOWALPHA:\n return chr(ord('a') + self.position)\n elif list_type is ListTypes.STAR:\n return \"*\"\n else:\n return '+'", "title": "" } ]
[ { "docid": "55189f5bfe0380ae74651a5bf051da36", "score": "0.66593266", "text": "def _prefix_getter(self):\r\n if not self.children:\r\n return \"\"\r\n return self.children[0].prefix", "title": "" }, { "docid": "1c89337cb6bf2be4969b8108d04edffe", "score": "0.6187859", "text": "def get_prefix(self):\n\n return self.node[:self.kmer]", "title": "" }, { "docid": "62aab76290a1260f3049bd27987cba95", "score": "0.6114829", "text": "def prefix(self):\n return self.parsed.prefix", "title": "" }, { "docid": "6effe020a9b25051f0f596b9ff40366a", "score": "0.5925336", "text": "def get_prefix(self):\n return self._element.prefix", "title": "" }, { "docid": "02e401790738f12de0f2b7090a756251", "score": "0.5898389", "text": "def _get_prefix(self):\n return self.__prefix", "title": "" }, { "docid": "02e401790738f12de0f2b7090a756251", "score": "0.5898389", "text": "def _get_prefix(self):\n return self.__prefix", "title": "" }, { "docid": "02e401790738f12de0f2b7090a756251", "score": "0.5898389", "text": "def _get_prefix(self):\n return self.__prefix", "title": "" }, { "docid": "02e401790738f12de0f2b7090a756251", "score": "0.5898389", "text": "def _get_prefix(self):\n return self.__prefix", "title": "" }, { "docid": "4c76d6afa68b5440f7e4cf9d74dd8862", "score": "0.5801729", "text": "def _get_prefix(node):\n prefixes = set()\n\n for child in _get_inner_list_entries(node):\n prefix_node = node_seek.get_node_with_first_prefix(child)\n prefixes.add(prefix_node.prefix)\n\n counter = collections.Counter(prefixes)\n\n try:\n common = counter.most_common(1)[0]\n\n return common[0].strip(\"\\n\")\n except IndexError:\n return \"\"", "title": "" }, { "docid": "8f91bc941a61587254c0110f63041db3", "score": "0.5799939", "text": "def get_heading_level(heading):\n i = 0\n while( i < len(heading) and heading[i] == '#' ):\n i += 1\n\n return i", "title": "" }, { "docid": "512f29e999e38f1b3b3767dd0acc39da", "score": "0.57807744", "text": "def handle_toc_header(val: str) -> nodes.Element:\n para = addnodes.compact_paragraph(\"\", \"\", nodes.Text(val))\n item = nodes.list_item(\"\", para)\n item[\"classes\"].append(\"fs-1-2\")\n return item", "title": "" }, { "docid": "8c74aff83a59d7977689912e6c98f26f", "score": "0.57141215", "text": "def _adjust_prefix(nodes, index):\n try:\n marker = nodes[index + 1]\n except IndexError:\n return\n\n node = node_seek.get_node_with_first_prefix(marker)\n\n if isinstance(node, tree.EndMarker):\n node.prefix = \"\"\n\n return\n\n if hasattr(node, \"prefix\"):\n node.prefix = \"\\n\\n\"", "title": "" }, { "docid": "05a8fcc5620caa7c099513601260443a", "score": "0.5707547", "text": "def _prefix_getter(self):\r\n return self._prefix", "title": "" }, { "docid": "b6f72074f29cc632c035e2baa9a0e066", "score": "0.56790197", "text": "def getHeader():\n menuStart = 0\n for i,line in enumerate(self.menulist):\n if line == self.title:\n # print(i)\n menuStart = i\n break\n\n self.header = \"\\n\".join(self.menulist[:menuStart])\n self.title = self.menulist[menuStart]\n self.menulist = self.menulist[menuStart+1:]", "title": "" }, { "docid": "2ebbd38e44aceef19ceca765c3a1e067", "score": "0.56258893", "text": "def get_prefix(node):\n if '#' in node:\n name = node.split(\"#\")[-1]\n else:\n # there must be no # in the prefix e.g. schema.org/\n name = node.split(\"/\")[-1]\n return node[:-len(name)]", "title": "" }, { "docid": "b36f3d5302ba79c838c6ab07fa1b8ad4", "score": "0.56133264", "text": "def get_node_with_first_prefix(node):\n if hasattr(node, \"prefix\"):\n return node\n\n if not hasattr(node, \"children\"):\n return None\n\n for child in iter_nested_children(node):\n if hasattr(child, \"prefix\"):\n return child\n\n return None", "title": "" }, { "docid": "7352cda3f16aa86de0637f19c2013155", "score": "0.5597639", "text": "def prefix(self):\n return self._prefix", "title": "" }, { "docid": "7352cda3f16aa86de0637f19c2013155", "score": "0.5597639", "text": "def prefix(self):\n return self._prefix", "title": "" }, { "docid": "7352cda3f16aa86de0637f19c2013155", "score": "0.5597639", "text": "def prefix(self):\n return self._prefix", "title": "" }, { "docid": "7352cda3f16aa86de0637f19c2013155", "score": "0.5597639", "text": "def prefix(self):\n return self._prefix", "title": "" }, { "docid": "5508d65b1edc0dd5f2b2823c81bd4b7c", "score": "0.5569134", "text": "def prefix(self): # noqa: D401\n return self.reference.prefix", "title": "" }, { "docid": "33895e33ae1b7d396a52ac3c9bc30911", "score": "0.5515102", "text": "def __get_head_from_temp(self, num):\n look_for = 'mi<mk<header-ope<' + num + '\\n'\n found_head = 0\n string_to_return = ''\n line = 1\n while line:\n line = self.__read_from_head_obj.readline()\n if found_head:\n if line == 'mi<mk<header-clo\\n':\n return string_to_return\n string_to_return = string_to_return + line\n \n else:\n if line == look_for:\n found_head = 1", "title": "" }, { "docid": "ee7cbef300b9169d4694d1de4db6a801", "score": "0.55088943", "text": "def prefix(self) -> Optional[str]:\n return pulumi.get(self, \"prefix\")", "title": "" }, { "docid": "ee7cbef300b9169d4694d1de4db6a801", "score": "0.55088943", "text": "def prefix(self) -> Optional[str]:\n return pulumi.get(self, \"prefix\")", "title": "" }, { "docid": "ee7cbef300b9169d4694d1de4db6a801", "score": "0.55088943", "text": "def prefix(self) -> Optional[str]:\n return pulumi.get(self, \"prefix\")", "title": "" }, { "docid": "49bbbf6d0962e874d360fafe361a707a", "score": "0.5449697", "text": "def prefix(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"prefix\")", "title": "" }, { "docid": "f0662ee77eed310c45d5c0d7c72a4b32", "score": "0.5447478", "text": "def getPrefix(self):\n return self.__prefix", "title": "" }, { "docid": "6f6c8d02c63685fa3026a3068ea70cbd", "score": "0.5428804", "text": "def LocateHeader(self):\n try:\n nth = self.getparent(Header)\n except ValueError as msg:\n nth = list(self.backtrace(fn=lambda x:x))[-1]\n return nth", "title": "" }, { "docid": "31dce4c23b85e863d113f6b0ad1857eb", "score": "0.5418934", "text": "def prefix(self) -> str:\n if self.latest_node == DocumentNode:\n return \"\"\n elif self.latest_node is None:\n return self.newline_indent\n elif self.latest_node == BlockNode:\n return \"\\n\" + self.newline_indent\n else:\n return self.newline_indent", "title": "" }, { "docid": "db25a7e848f84872a60231980ed4f894", "score": "0.5415702", "text": "def getPrefix(self, **args):\n prefix = self[args['mode']]['prefix'][args['formatType']]\n return prefix", "title": "" }, { "docid": "5dd8d4a452d1cac8d28d95da10abe0f3", "score": "0.54142433", "text": "def _get_new_level(first_line, current_level):\n if not first_line:\n return current_level, ''\n\n chunks = re.split(r'^(\\*|#+)', first_line)\n if first_line[0] == '#': # starts with a markdown title token: #\n new_level = len(chunks[1]) # count token apparitions\n else: # starts with a list token: *\n new_level = current_level + 1 # it is the end of this path\n\n new_title = chunks[-1].strip()\n\n return new_level, new_title", "title": "" }, { "docid": "48ce64d6e2f19feefa804b7482db5d78", "score": "0.538527", "text": "def prefix(self) -> typing.Optional[str]:\n return self._values.get(\"prefix\")", "title": "" }, { "docid": "f15fdd4a0791f9ea4dbc6c0caf550055", "score": "0.5383657", "text": "def _get_filter_prefix_list_name(self):\n return self.__filter_prefix_list_name", "title": "" }, { "docid": "4fdf9913ed892ac7efed887645f5a2d6", "score": "0.5365455", "text": "def _get_hdr_start(self):\n return self.__hdr_start", "title": "" }, { "docid": "51980da8aa23f78d8e6d23fdd551a962", "score": "0.53566235", "text": "def parsed_prefix(self):\n return utils.prefixsplit(self.prefix)", "title": "" }, { "docid": "eeeb70435c4a3c226a4d0bde50292242", "score": "0.5354022", "text": "def header_level(self) -> int:\n if self.name in ('h1', 'h2', 'h3', 'h4', 'h5', 'h6'):\n return int(self.name[1:])\n return 0", "title": "" }, { "docid": "e922ea30910f0f099fbb76db423e6934", "score": "0.5345604", "text": "def prefix(self):\n if self.parent is None:\n return self._prefix\n return self.parent.prefix + self._prefix", "title": "" }, { "docid": "60bff54756a624fcad1cb667ca52af1e", "score": "0.53447425", "text": "def getNamePrefix(self):\n raise NotImplementedError", "title": "" }, { "docid": "b7107c75f1d283f3282d95a924d03627", "score": "0.5335786", "text": "def list_prefix(arg, opts, shell_opts):\n\n search_string = ''\n if type(arg) == list or type(arg) == tuple:\n search_string = ' '.join(arg)\n\n v = get_vrf(opts.get('vrf_rt'), default_var='default_list_vrf_rt', abort=True)\n\n if v.rt == 'all':\n vrf_text = 'any VRF'\n vrf_q = None\n else:\n vrf_text = vrf_format(v)\n vrf_q = {\n 'operator': 'equals',\n 'val1': 'vrf_rt',\n 'val2': v.rt\n }\n print(\"Searching for prefixes in %s...\" % vrf_text)\n\n col_def = {\n 'added': { 'title': 'Added' },\n 'alarm_priority': { 'title': 'Alarm Prio' },\n 'authoritative_source': { 'title': 'Auth source' },\n 'children': { 'title': 'Children' },\n 'comment': { 'title': 'Comment' },\n 'customer_id': { 'title': 'Customer ID' },\n 'description': { 'title': 'Description' },\n 'expires': { 'title': 'Expires' },\n 'free_addresses': { 'title': 'Free addresses' },\n 'monitor': { 'title': 'Monitor' },\n 'last_modified': { 'title': 'Last mod' },\n 'node': { 'title': 'Node' },\n 'order_id': { 'title': 'Order ID' },\n 'pool_name': { 'title': 'Pool name' },\n 'prefix': { 'title': 'Prefix' },\n 'status': { 'title': 'Status' },\n 'tags': { 'title': '#' },\n 'total_addresses': { 'title': 'Total addresses' },\n 'type': { 'title': '' },\n 'used_addresses': { 'title': 'Used addresses' },\n 'vlan': { 'title': 'VLAN' },\n 'vrf_rt': { 'title': 'VRF RT' },\n }\n # default columns\n columns = [ 'vrf_rt', 'prefix', 'type', 'tags', 'node', 'order_id', 'customer_id', 'description' ]\n\n # custom columns? prefer shell opts, then look in config file\n custom_columns = None\n if shell_opts.columns and len(shell_opts.columns) > 0:\n custom_columns = shell_opts.columns\n elif cfg.get('global', 'prefix_list_columns'):\n custom_columns = cfg.get('global', 'prefix_list_columns')\n\n # parse custom columns\n if custom_columns:\n # Clear out default columns, unless user whishes to append\n if custom_columns[0] != '+':\n columns = []\n\n # read in custom columns\n for col in list(csv.reader([custom_columns.lstrip('+') or ''], escapechar='\\\\'))[0]:\n col = col.strip()\n if col not in col_def:\n print(\"Invalid column:\", col, file=sys.stderr)\n sys.exit(1)\n columns.append(col)\n\n offset = 0\n # small initial limit for \"instant\" result\n limit = 50\n prefix_str = \"\"\n while True:\n res = Prefix.smart_search(search_string, { 'parents_depth': -1,\n 'include_neighbors': True, 'offset': offset, 'max_result': limit },\n vrf_q)\n\n if offset == 0: # first time in loop?\n if shell_opts.show_interpretation:\n print(\"Query interpretation:\")\n _parse_interp_prefix(res['interpretation'])\n\n if res['error']:\n print(\"Query failed: %s\" % res['error_message'])\n return\n\n if len(res['result']) == 0:\n print(\"No addresses matching '%s' found.\" % search_string)\n return\n\n # guess column width by looking at the initial result set\n for p in res['result']:\n for colname, col in col_def.items():\n val = getattr(p, colname, '')\n col['width'] = max(len(colname), col.get('width', 0),\n len(str(val)))\n\n # special handling of a few columns\n col_def['vrf_rt']['width'] = max(col_def['vrf_rt'].get('width', 8),\n len(str(p.vrf.rt)))\n col_def['prefix']['width'] = max(col_def['prefix'].get('width', 0)-12,\n p.indent * 2 + len(p.prefix)) + 12\n try:\n col_def['pool_name']['width'] = max(col_def['pool_name'].get('width', 8),\n len(str(p.pool.name)))\n except:\n pass\n # override certain column widths\n col_def['type']['width'] = 1\n col_def['tags']['width'] = 2\n\n col_header_data = {}\n # build prefix formatting string\n for colname, col in [(k, col_def[k]) for k in columns]:\n prefix_str += \"{%s:<%d} \" % (colname, col['width'])\n col_header_data[colname] = col['title']\n\n column_header = prefix_str.format(**col_header_data)\n print(column_header)\n print(\"\".join(\"=\" for i in range(len(column_header))))\n\n for p in res['result']:\n if p.display == False:\n continue\n\n col_data = {}\n try:\n for colname, col in col_def.items():\n col_data[colname] = str(getattr(p, colname, None))\n\n # overwrite some columns due to special handling\n col_data['tags'] = '-'\n if len(p.tags) > 0:\n col_data['tags'] = '#%d' % len(p.tags)\n\n try: \n col_data['pool_name'] = p.pool.name\n except:\n pass\n\n col_data['prefix'] = \"\".join(\" \" for i in range(p.indent)) + p.display_prefix\n col_data['type'] = p.type[0].upper()\n col_data['vrf_rt'] = p.vrf.rt or '-'\n\n print(prefix_str.format(**col_data))\n\n except UnicodeEncodeError as e:\n print(\"\\nCrazy encoding for prefix %s\\n\" % p.prefix, file=sys.stderr)\n\n if len(res['result']) < limit:\n break\n offset += limit\n\n # let consecutive limit be higher to tax the XML-RPC backend less\n limit = 200", "title": "" }, { "docid": "ca97d863a055c0c7f8e890082666ae32", "score": "0.53352", "text": "def prefix(self):\n return self.PREFIX", "title": "" }, { "docid": "508b5c1d34952a7768f8cb68fd46fb15", "score": "0.5296727", "text": "def head_one(self, text):\n add_level = '' + '#' * self.level\n return add_level + '# ' + text + \"\\n\"", "title": "" }, { "docid": "831d9b44709441b089de38d857e34463", "score": "0.527087", "text": "def prefixcurrvalidelft(self) :\n\t\ttry :\n\t\t\treturn self._prefixcurrvalidelft\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "91a67672699cc8b87c9514116f88c22e", "score": "0.52666384", "text": "def get_node_prefix(x):\n if x == '':\n return x\n else:\n re.match(\"^(.*)\\[[^\\[\\]]+?\\]$\", x).group(1)", "title": "" }, { "docid": "ec8d70c20b3a45cd8eb62ad62380a6c7", "score": "0.52558964", "text": "def header_index(self, prop):\n header = \"\"\n for row in range(self.tblGeneric.rowCount()):\n header = self.tblGeneric.verticalHeaderItem(row).text()\n if header and prop.upper() in header.upper():\n return row\n return -999", "title": "" }, { "docid": "e710d293724e7fc5fb4fd47dadc76356", "score": "0.5251518", "text": "def get_parent_header(block_header, db):\n return db.get_block_header_by_hash(block_header.parent_hash)", "title": "" }, { "docid": "f4a72034afbf010b6c79554dd428402b", "score": "0.5246798", "text": "def getHead(self) -> \"SoNode *\":\n return _coin.SoLightPath_getHead(self)", "title": "" }, { "docid": "3ae155895fca7d056c66f257e0767fcb", "score": "0.52447", "text": "def get_route_prefix(self):\n pass", "title": "" }, { "docid": "b397bd1ea22bc7c53222dc78230b4d48", "score": "0.524221", "text": "def _get_prefixes(self):\n return self.__prefixes", "title": "" }, { "docid": "dcb8510dec22c242eed4c96f6907b06c", "score": "0.5237772", "text": "def get_head(self, ind):\r\n return self.idx_to_word_index[self.toks[ind].head.idx]", "title": "" }, { "docid": "89f3e4d6eb885d22912c36a0406d8166", "score": "0.5217026", "text": "def scrape_name(self, tr):\n # div.left_subheaders > strong:nth-child(1) \n text = tr.find('div', attrs={'class': 'left_subheaders'}).text\n match = re.match('^(Hon.)?\\s*([^\\s][^\\(]*)\\s*(\\([^\\)]+\\))?', text.strip(), flags=re.IGNORECASE)\n return (match.group(3), match.group(2)) if match else None", "title": "" }, { "docid": "845f0d62df0535e7ade0be14b6851a5b", "score": "0.5204398", "text": "def get_head(self):\n\n return self.pos[0]", "title": "" }, { "docid": "d52d9fb14d70a3426a02fc5dd037ec33", "score": "0.51953995", "text": "def _assign_titles(self):\n with open(self.filename, 'r') as myfile:\n for line in myfile:\n if line.startswith(self.header_start):\n return line.rstrip('\\n').split('\\t')", "title": "" }, { "docid": "1c398552c21e0f90f0a167e53d61815b", "score": "0.51407176", "text": "def PrintDirHeader(bucket_listing_ref):\n print '%s:' % bucket_listing_ref.url_string.encode(UTF8)", "title": "" }, { "docid": "f13d3626468a2db7478bad3b3b4d42b6", "score": "0.5131966", "text": "def head_position(self):\n return self.parts[0]", "title": "" }, { "docid": "e163a0ecda028ba1bcb1cc313685d2f6", "score": "0.5130318", "text": "def getHead(self) -> \"SoNode *\":\n return _coin.SoPath_getHead(self)", "title": "" }, { "docid": "376ff5e350a48a67524e9f8330499fc0", "score": "0.51174545", "text": "async def get_prefixes(self, ctx):\n\n await ctx.invoke(self.list_prefix)", "title": "" }, { "docid": "6f16bd1f63ad2cbb127cb2c7cd7104b1", "score": "0.5090976", "text": "def prefix(self):\n return self.alias() if self.alias() else self.table()", "title": "" }, { "docid": "7a86537cdafe28eaaf8e3a81e336423d", "score": "0.5089499", "text": "def heading(self):\n return self.heading", "title": "" }, { "docid": "fb4aa38c2e88a5b3bf58223041f7b474", "score": "0.50790983", "text": "def directSubheadings(topHeading, headings):\n topLevel = topHeading.level\n directSubLevel = topLevel + 1\n for heading in headings[topHeading.headingsIdx + 1 : ]:\n if heading.level == directSubLevel:\n yield heading\n elif heading.level == topLevel:\n break\n else:\n continue", "title": "" }, { "docid": "65f68a0db45099c3a8dffc781456371f", "score": "0.50622034", "text": "def get_heading(self):\n return self.__heading", "title": "" }, { "docid": "65f68a0db45099c3a8dffc781456371f", "score": "0.50622034", "text": "def get_heading(self):\n return self.__heading", "title": "" }, { "docid": "7adbdecca8778e3e6b0f0f289cd29b2a", "score": "0.5059163", "text": "def test_get_user_group_headers_by_prefix(self):\n pass", "title": "" }, { "docid": "f2d292e0d72be1768e5ab960f415aaf8", "score": "0.50573444", "text": "def __parse_header(self):\n state = CurrentState(level=1)\n self.__parse_level(state, self.head_parse_tbl, self.__undefined)\n self.__check_msgs(_(\"HEAD (header)\"), state, None)", "title": "" }, { "docid": "f6e44d4e6031745552dff85466bbf512", "score": "0.50501186", "text": "def get_prefix(board, x, y, horizontal):\n return ''.join(__pre_suff_helper(board, x, y, horizontal, True))", "title": "" }, { "docid": "4a12e5f4f491310124fea5fe1eac572d", "score": "0.50466394", "text": "def get_part_prefix(self) -> str:\n return self._j_output_file_config.getPartPrefix()", "title": "" }, { "docid": "8eb64baa60ed5fcba7db6b9f4cf99d73", "score": "0.50417536", "text": "def getfirstmatchingheader(self, name):\r\n name = name.lower() + ':'\r\n n = len(name)\r\n lst = []\r\n hit = 0\r\n for line in self.headers:\r\n if hit:\r\n if not line[:1].isspace():\r\n break\r\n elif line[:n].lower() == name:\r\n hit = 1\r\n if hit:\r\n lst.append(line)\r\n return lst", "title": "" }, { "docid": "34092994576ef2a2967177b9e72f39aa", "score": "0.5034628", "text": "def getrawheader(self, name):\r\n\r\n lst = self.getfirstmatchingheader(name)\r\n if not lst:\r\n return None\r\n lst[0] = lst[0][len(name) + 1:]\r\n return ''.join(lst)", "title": "" }, { "docid": "41aba447601ee3951c49f691eae1fcf0", "score": "0.5031038", "text": "def startswith(self,prefix,start,end):\n \n \n return None", "title": "" }, { "docid": "41aba447601ee3951c49f691eae1fcf0", "score": "0.5030864", "text": "def startswith(self,prefix,start,end):\n \n \n return None", "title": "" }, { "docid": "41aba447601ee3951c49f691eae1fcf0", "score": "0.5030864", "text": "def startswith(self,prefix,start,end):\n \n \n return None", "title": "" }, { "docid": "41aba447601ee3951c49f691eae1fcf0", "score": "0.5030864", "text": "def startswith(self,prefix,start,end):\n \n \n return None", "title": "" }, { "docid": "41aba447601ee3951c49f691eae1fcf0", "score": "0.5030864", "text": "def startswith(self,prefix,start,end):\n \n \n return None", "title": "" }, { "docid": "41aba447601ee3951c49f691eae1fcf0", "score": "0.5030864", "text": "def startswith(self,prefix,start,end):\n \n \n return None", "title": "" }, { "docid": "41aba447601ee3951c49f691eae1fcf0", "score": "0.5030864", "text": "def startswith(self,prefix,start,end):\n \n \n return None", "title": "" }, { "docid": "41aba447601ee3951c49f691eae1fcf0", "score": "0.5030864", "text": "def startswith(self,prefix,start,end):\n \n \n return None", "title": "" }, { "docid": "e5e00a7edf8eeece6818279a966dfda4", "score": "0.50168073", "text": "async def list_prefix(self, ctx):\n\n _prefix = '\\n'.join(['myst pls ', 'myst '])\n\n if await self.dbp[str(ctx.guild.id)].find().count() <= 0:\n return await ctx.send(f'**My assigned prefixes for your server:**\\n\\n{_prefix}')\n\n prefixes = [x['_id'] async for x in self.dbp[str(ctx.guild.id)].find()]\n\n pages = SimplePaginator(title='Prefixes',\n ctx=ctx, bot=self.bot,\n colour=0x886aff,\n entries=prefixes,\n prepend='**•** - ',\n append='',\n footer='You may also mention me.')\n await pages.embed_creator()", "title": "" }, { "docid": "9e9b399ed4dee63e37041853f55d6760", "score": "0.5014019", "text": "def traverse_list_fwd(head):\n if head is None:\n return -1\n curr = head\n arr = []\n while curr.next != head:\n arr.append(curr.data)\n curr = curr.next\n arr.append(curr.data)\n return ' '.join(map(str, arr))", "title": "" }, { "docid": "8ba9afcfcda7807f89038f88ca7a8f52", "score": "0.500256", "text": "def findNodeInTree(self,p,headline):\n \n c = p.c\n for p in p.subtree_iter():\n h = headline.strip().lower()\n if p.headString().strip().lower() == h:\n return p.copy()\n return c.nullPosition()", "title": "" }, { "docid": "66e0ba8550036ef0761e19a1f0c1d0e0", "score": "0.49935704", "text": "def __handle_start_setext_heading_token(\n cls, output_html, next_token, transform_state\n ):\n _ = transform_state\n\n token_parts = [output_html]\n if output_html.endswith(\"</ol>\") or output_html.endswith(\"</ul>\"):\n token_parts.append(ParserHelper.newline_character)\n token_parts.extend(\n [\"<h\", \"1\" if next_token.heading_character == \"=\" else \"2\", \">\"]\n )\n return \"\".join(token_parts)", "title": "" }, { "docid": "b903cf6d1f3f23f6f08bd0b323d5cbb8", "score": "0.49772188", "text": "def _get_header(self):\n header = []\n for entry in self.archer:\n if entry.startswith('#'):\n header.append(entry)\n return header", "title": "" }, { "docid": "9f2018b71a90d59cea8b9dea8a22f93b", "score": "0.496918", "text": "def get_heading(line):\n head = re.split('[\\d\\W]', line.lower())\n # print head, \"head\"\n # head = split(line.lower())\n # parse [verse] number, if existing:\n number = \"\"\n numbers = re.split('\\D', line)\n for n in numbers:\n if n != \"\":\n number = n\n if 'order' in head:\n return 'order'\n if 'intro' in head:\n return 'intro'\n if 'verse' in head:\n return 'verse' # + str(number)\n if 'pre chorus' in line.lower():\n return 'pre chorus' # + str(number)\n if 'chorus' in head:\n return 'chorus'\n if 'bridge' in head:\n return 'bridge'\n if 'tag' in head:\n return 'tag'\n if 'refrain' in head:\n return 'refrain'\n if 'ending' in head:\n return 'ending'\n if 'outro' in head:\n return 'outro'\n if 'end' in head:\n return 'end'\n\n # if none of these, try single letters\n if 'v' in head:\n return 'verse' # + str(number)\n # if 'c' in head:\n # return 'chorus'\n # if 'b' in head:\n # return 'bridge'\n # if 'r' in head:\n # return 'refrain'\n return None", "title": "" }, { "docid": "9779ca7ffb83e4e24adfa1b67366035c", "score": "0.4967495", "text": "def prefix(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"prefix\")", "title": "" }, { "docid": "8d73fcb28bab97796991ee086df7d854", "score": "0.49646273", "text": "def prefix(delim, s):\r\n return s.split(delim)[0]", "title": "" }, { "docid": "904198bb7babf5c59dd8c17b2d285e91", "score": "0.49610326", "text": "def traverse_prefix(self, result=None):\n if result is None:\n result = []\n result.append(self._data)\n if self._left._data:\n self._left.traverse_infix(result)\n if self._right._data:\n self._right.traverse_infix(result)\n return result", "title": "" }, { "docid": "b5971e77688466a97b1804e8c7a11107", "score": "0.4957697", "text": "def handle_phrase_start(self, category, head_start_index=None):\n pass", "title": "" }, { "docid": "44d3d8a6bd2bf11cade2c60ca20c1db8", "score": "0.49429625", "text": "def getMarkerName(index):", "title": "" }, { "docid": "3f3862e81109e0f3354702db597a780f", "score": "0.4939571", "text": "def __handle_start_atx_heading_token(cls, output_html, next_token, transform_state):\n previous_token = transform_state.actual_tokens[\n transform_state.actual_token_index - 1\n ]\n\n token_parts = [output_html]\n if output_html.endswith(\"</ol>\") or output_html.endswith(\"</ul>\"):\n token_parts.append(ParserHelper.newline_character)\n elif previous_token.is_paragraph_end and not transform_state.is_in_loose_list:\n token_parts.append(ParserHelper.newline_character)\n token_parts.extend([\"<h\", str(next_token.hash_count), \">\"])\n return \"\".join(token_parts)", "title": "" }, { "docid": "af121db2c04daf125dd21f6fc60b5fe8", "score": "0.4937059", "text": "def _header_row_num(self) -> int:\n return self.header_levels if self.fmt.header else 0", "title": "" }, { "docid": "183c55145d0939f728f6f677baa6c966", "score": "0.49355292", "text": "def get_cur_line(key_name, value_list, prefix):\n property_content = get_property_content(value_list, prefix)\n if not property_content:\n return None\n cur_line = key_name + ': ' + property_content\n return cur_line", "title": "" }, { "docid": "59d07c7780d4d5f6900ed3f5c228b5a2", "score": "0.49332675", "text": "def get_heading(self):\n head_title = {\n \"name\": \"\",\n \"rev_no\": \"\",\n \"doc_no\": \"\",\n }\n head_object = self.env[\"report.heading\"]\n head_ids = head_object.search([], order=\"id\")\n if head_ids:\n head_rec = head_ids[0]\n if head_rec:\n head_title[\"name\"] = head_rec.name or \"\"\n head_title[\"rev_no\"] = head_rec.revision_no or \"\"\n head_title[\"doc_no\"] = head_rec.document_no or \"\"\n head_title[\"image\"] = head_rec.image or \"\"\n return head_title", "title": "" }, { "docid": "62392cf363b14051ef0474dfebcc2973", "score": "0.49319178", "text": "def node_find( fdt, node_prefix ):\n try:\n node = fdt.path_offset( node_prefix )\n except:\n node = -1\n\n return node", "title": "" }, { "docid": "8448882b5450767b0423077ec4f23683", "score": "0.493019", "text": "def robots_header():\n return ['id', 'parent1', 'parent2', 'battery_level']", "title": "" }, { "docid": "0baf0d461b2beadcdabb1c103903c255", "score": "0.49280608", "text": "def StartPrefix(self):\n if self.force_auto_sync:\n self.get('StartPrefix')\n return self._StartPrefix", "title": "" }, { "docid": "41c57aaf2447f25cabc8b0569555f343", "score": "0.4924894", "text": "def _page_heading(self, url_path, log_in=False):\n self._get_plantshare(url_path, log_in=log_in)\n return self.css1('h1').text", "title": "" }, { "docid": "4c29252b38e50d88e53fa574fe98fb02", "score": "0.49207452", "text": "def getAllNodesNames(self, startingNode=None):\n if startingNode is not None:\n snode = self.grid.find(startingNode)\n returnNames = [node.name for node in snode.iter()]\n else:\n returnNames = self.mappingLevelName.values()\n\n return returnNames", "title": "" }, { "docid": "c49e49e09b3c3a685f61f7ab9f0a5526", "score": "0.4909107", "text": "def head_index(self) -> int:\n raise NotImplementedError()", "title": "" }, { "docid": "d24f88ffa7e56a76f6e46cf6bd87f849", "score": "0.4908285", "text": "def prefix_include(self) -> Path:\n return self.prefix / \"include\"", "title": "" }, { "docid": "c3a71f900a3bb88008dd53028c70f52c", "score": "0.49075162", "text": "def subtree(self, prefix):\n\n # Start here and then walk out the edges for 'prefix'\n node = self.root\n\n for char in prefix:\n if char not in node:\n return None\n node = node[char]\n\n return Lexicon(root=node)", "title": "" }, { "docid": "9b149ca77efab40cd70ff6072a2c12c5", "score": "0.49042958", "text": "def get_first_prefix(test_file):\n f = open(test_file)\n prefix = f.readline()\n f.close()\n return prefix", "title": "" }, { "docid": "ad29d3f3e88f6f75a86685183c58e8ce", "score": "0.48932666", "text": "def startsWith(self, prefix):\n n=len(prefix)\n if not n:\n return False\n curnode=self.root\n for i in xrange(n):\n pos=ord(prefix[i])-97\n if curnode.childlist[pos]:\n curnode=curnode.childlist[pos]\n else:\n return False\n \n return True", "title": "" } ]
0eeefe075868164149395a919de1d1f2
Interpolate the array to a new, larger size. Uses scipy.misc.imresize. The ranges are interpolated accordingly.
[ { "docid": "8d03d5f217a7ead5cb9052db7c919c2f", "score": "0.61982614", "text": "def resize(self, new_ywidth, new_xwidth, order=1):\n # Check if scipy is available\n try:\n from scipy.ndimage import zoom\n except ImportError:\n logging.error(\n 'Module scipy is not available. scipy.misc.imresize is used for interpolation.')\n return\n\n xfactor = float(new_xwidth) / self.xwidth\n yfactor = float(new_ywidth) / self.ywidth\n self._zdata = zoom(self._zdata, (yfactor, xfactor), order=order)", "title": "" } ]
[ { "docid": "649b12961f9f06319afd48fa09a8382c", "score": "0.6544772", "text": "def resize_mri(img, size, interpolation=0):\n zoom_factors = np.asarray(size) / np.asarray(img.shape)\n # img_arr = img_arr.resize((256,256,128))\n return sp.ndimage.zoom(img, zoom_factors, order=interpolation)", "title": "" }, { "docid": "edd2e406cd07462b5078784fa0477c4f", "score": "0.65342444", "text": "def resize(array, factor):\n if factor == 1:\n return array\n else:\n dim_factors = [factor for _ in array.shape[:-1]] + [1]\n return scipy.ndimage.interpolation.zoom(array, dim_factors, order=0)", "title": "" }, { "docid": "e3e5c6120e90418692a82cd2a73ecc40", "score": "0.64200795", "text": "def _interpolate_bins(bins, new_size: int):\n bins_as_vec = np.asarray(bins)\n new_bins_x = np.linspace(bins_as_vec.min(), bins_as_vec.max(), new_size)\n new_bins = _resize_vector_function(bins)(new_bins_x)\n return new_bins", "title": "" }, { "docid": "039542770e14ded30b98ca14f813f846", "score": "0.6364013", "text": "def imresize(image, size):\n m, n = image.shape\n X = linspace(0, m - 1, size[0])\n Y = linspace(0, n - 1, size[1])\n kx, ky = min([m - 1, 3]), min([n - 1, 3])\n interp = RectBivariateSpline(\n arange(m), arange(n), image, kx=kx, ky=ky)\n resized = interp(X, Y)\n return resized", "title": "" }, { "docid": "a9173cfaf1738b68e3b82eb26b6a953b", "score": "0.631432", "text": "def rescale_numpy_array(a, old_range, new_range, new_dtype, truncate=False):\n assert isinstance(old_range, (tuple, list)) and isinstance(new_range, (tuple, list))\n old_min, old_max = old_range\n if a.min() < old_min or a.max() > old_max:\n if truncate:\n a = np.clip(a, old_min, old_max)\n else:\n raise Exception('array values are outside range %s' % (old_range,))\n new_min, new_max = new_range\n old_delta = old_max - old_min\n new_delta = new_max - new_min\n if old_delta == 0:\n return ((a - old_min) + (new_min + new_max)/2).astype(new_dtype)\n else:\n return (new_min + (a - old_min)*new_delta/old_delta).astype(new_dtype)", "title": "" }, { "docid": "893d197a96efad773079a91e9d660345", "score": "0.62107444", "text": "def _interpolate_weights(\n w: np.ndarray,\n new_size: int,\n):\n if new_size == w.shape[0]:\n return w\n else:\n _w = w\n if max(w.shape) <= 3:\n _w = _resize_matrix_function(w)(np.linspace(0, 1, 4),\n np.linspace(0, 1, 4))\n _w = _resize_matrix_function(\n _w, kind='cubic')(np.linspace(0, 1, new_size),\n np.linspace(0, 1, new_size))\n for i in range(new_size):\n _w[i, i] = 1\n return _w", "title": "" }, { "docid": "87db2e4e5714217863a7f060e4786010", "score": "0.6202134", "text": "def _resize(self, new_cap):\n \n B = self.make_array(new_cap) # New bigger array \n self.operations+=self.n\n\n for k in range(self.n): # Reference all existing values \n B[k] = self.A[k] \n \n self.A = B # Call A the new bigger array \n self.capacity = new_cap # Reset the capacity ", "title": "" }, { "docid": "d15320e08629ab6b4209967eecb78db1", "score": "0.619176", "text": "def _resize(self, new_cap):\n\n B = self.make_array(new_cap) # New bigger array\n\n for k in range(self.n): # Reference all existing values\n B[k] = self.A[k]\n\n self.A = B # Call A the new bigger array\n self.capacity = new_cap # Reset the capacity", "title": "" }, { "docid": "4cf8d0ac5a678443fa8d99b00e007e58", "score": "0.61751384", "text": "def _resize(self, new_cap: int):\n\n big = self.make_array(new_cap) # New bigger array\n\n for i in range(self.n): # Reference all existing values\n big[i] = self.arr[i]\n\n self.arr = big # Call A the new bigger array\n self.capacity = new_cap # Reset the capacity", "title": "" }, { "docid": "67f64614c1149476b9c28f0c651819d2", "score": "0.61717224", "text": "def _resize(self, size_capacity): # nonpublic utitity\n array_b = self._make_array(size_capacity) # new (bigger) array\n for k in range(self._n): # for each existing value\n array_b[k] = self._array_a[k]\n self._array_a = array_b # use the bigger array\n self._capacity = size_capacity", "title": "" }, { "docid": "1177d4bb01dbbcf5461b752ddb6cff68", "score": "0.613292", "text": "def resize(image, size=IMAGE_SIZE, interp=cv2.INTER_LINEAR):\n\n img = cv2.resize(image, size, interpolation=interp)\n\n #if np.max(img) > 1 :\n # img = (img / 255.).astype(np.float32)\n return img", "title": "" }, { "docid": "668fd6453e789c503ad2592e195c42a7", "score": "0.6130269", "text": "def resize(data=None, size=_Null, keep_ratio=_Null, interp=_Null, out=None, name=None, **kwargs):\n return (0,)", "title": "" }, { "docid": "4ea9bac9985e34da9c2bce94abe94865", "score": "0.61135924", "text": "def resize_image(im_arr, size):\n if size is not None and im_arr.shape[1] != size:\n if im_arr.shape[1] < size: # upsampling\n im_arr = resize(im_arr, (size, size), interpolation=INTER_CUBIC)\n else: # downsampling\n im_arr = resize(im_arr, (size, size), interpolation=INTER_AREA)\n return im_arr", "title": "" }, { "docid": "4ea9bac9985e34da9c2bce94abe94865", "score": "0.61135924", "text": "def resize_image(im_arr, size):\n if size is not None and im_arr.shape[1] != size:\n if im_arr.shape[1] < size: # upsampling\n im_arr = resize(im_arr, (size, size), interpolation=INTER_CUBIC)\n else: # downsampling\n im_arr = resize(im_arr, (size, size), interpolation=INTER_AREA)\n return im_arr", "title": "" }, { "docid": "0cf1491a45e31f375a74240416b65b14", "score": "0.6099532", "text": "def resize_img(i, s, interpolation=0):\n i = misc.toimage(i)\n return misc.fromimage(i.resize(s[::-1], interpolation))", "title": "" }, { "docid": "e8b070ebd582518ed2bf0f79eee07f29", "score": "0.6073765", "text": "def resample_image(arr, max_size=400):\n dim = np.max(arr.shape[0:2])\n if dim < max_size:\n max_size = dim\n x, y, _ = arr.shape\n sx = int(np.ceil(x / max_size))\n sy = int(np.ceil(y / max_size))\n img = np.zeros((max_size, max_size, 3), dtype=arr.dtype)\n arr = arr[0:-1:sx, 0:-1:sy, :]\n xl = (max_size - arr.shape[0]) // 2\n yl = (max_size - arr.shape[1]) // 2\n img[xl:arr.shape[0]+xl, yl:arr.shape[1]+yl, :] = arr\n return img", "title": "" }, { "docid": "5fbc180bee30a0eec29adb9a48fcc384", "score": "0.60612816", "text": "def _resize(self, c): # nonpublic utity\n B = self._make_array(c) # new (bigger) array\n for k in range(self._n): # for each existing value\n B[k] = self._A[k]\n self._A = B # use the bigger array\n self._capacity = c", "title": "" }, { "docid": "d1b34835edc62b83bc5c71eb6d17f679", "score": "0.6052699", "text": "def _resize(self, c): # nonpublic utitity\n B = self._make_array(c) # new (bigger) array\n for k in range(self._n): # for each existing value\n B[k] = self._A[k]\n self._A = B # use the bigger array\n self._capacity = c", "title": "" }, { "docid": "3c8b374049aabb966768696bf512d1ea", "score": "0.6009257", "text": "def resample(images, spacing, new_spacing=(1, 1, 1)):\n spacing = np.array(list(spacing))\n new_spacing = np.array(list(new_spacing))\n\n resize_factor = spacing / new_spacing\n new_real_shape = images.shape * resize_factor\n new_shape = np.round(new_real_shape)\n real_resize_factor = new_shape / images.shape\n new_spacing = spacing / real_resize_factor\n\n images = zoom(images, real_resize_factor)\n return images", "title": "" }, { "docid": "503c4982f97509a77495dc55c4cb5aa2", "score": "0.60055983", "text": "def expand_im(im,dest_size):\r\n return cv2.resize(im, (dest_size[1],dest_size[0]), interpolation = cv2.INTER_LINEAR)", "title": "" }, { "docid": "9ec16159d71bfb903b4d020fe2d2f3a0", "score": "0.6005151", "text": "def interp1d(arr, size):\n\n arr_interp = interp.interp1d(np.arange(arr.size), arr)\n return arr_interp(np.linspace(0, arr.size - 1, size))", "title": "" }, { "docid": "682fd6e0e33cf8f909e76668e0878686", "score": "0.5966012", "text": "def resample(\n image,\n new_resolution,\n old_resolution=1,\n err_to_larger=True,\n extrapolation_fill_value=None,\n origin=\"center\",\n method=\"linear\",\n image_is_coords=False,\n anti_aliasing=True,\n):\n\n # Validate inputs and define ndim & old_shape based on image_is_coords.\n image = _validate_ndarray(image) # Breaks alias.\n if image_is_coords:\n ndim = image.ndim - 1\n old_shape = image.shape[:-1]\n else:\n ndim = image.ndim\n old_shape = image.shape\n new_resolution = _validate_resolution(new_resolution, ndim)\n old_resolution = _validate_resolution(old_resolution, ndim)\n\n # Handle trivial case.\n if np.array_equal(new_resolution, old_resolution):\n return (\n image # Note: this is a copy of the input image and is not the same object.\n )\n\n # Compute new_coords and old_axes.\n if err_to_larger:\n new_shape = np.ceil(old_shape * old_resolution / new_resolution)\n else:\n new_shape = np.floor(old_shape * old_resolution / new_resolution)\n new_coords = _compute_coords(new_shape, new_resolution, origin)\n old_axes = _compute_axes(old_shape, old_resolution, origin)\n\n # Apply anti-aliasing gaussian filter if downsampling.\n if anti_aliasing:\n if image_is_coords:\n downsample_factors = np.insert(\n old_shape / new_shape, image.ndim - 1, values=0, axis=0\n )\n else:\n downsample_factors = old_shape / new_shape\n anti_aliasing_sigma = np.maximum(0, (downsample_factors - 1) / 2)\n gaussian_filter(\n image, anti_aliasing_sigma, output=image, mode=\"nearest\"\n ) # Mutates image.\n\n # Interpolate image.\n new_image = interpn(\n points=old_axes,\n values=image,\n xi=new_coords,\n bounds_error=False,\n fill_value=extrapolation_fill_value,\n )\n\n return new_image", "title": "" }, { "docid": "3b6222e3a7401792e579396b295d5814", "score": "0.59144825", "text": "def __resize_image(input, width, height, interpolation):\n return cv2.resize(input, (int(width), int(height)), 0, 0, interpolation)", "title": "" }, { "docid": "adbac7b583c810ff7cb75374abc8e974", "score": "0.59126306", "text": "def __resize_image(input, width, height, interpolation):\n return cv2.resize(input, ((int)(width), (int)(height)), 0, 0, interpolation)", "title": "" }, { "docid": "adbac7b583c810ff7cb75374abc8e974", "score": "0.59126306", "text": "def __resize_image(input, width, height, interpolation):\n return cv2.resize(input, ((int)(width), (int)(height)), 0, 0, interpolation)", "title": "" }, { "docid": "adbac7b583c810ff7cb75374abc8e974", "score": "0.59126306", "text": "def __resize_image(input, width, height, interpolation):\n return cv2.resize(input, ((int)(width), (int)(height)), 0, 0, interpolation)", "title": "" }, { "docid": "adbac7b583c810ff7cb75374abc8e974", "score": "0.59126306", "text": "def __resize_image(input, width, height, interpolation):\n return cv2.resize(input, ((int)(width), (int)(height)), 0, 0, interpolation)", "title": "" }, { "docid": "adbac7b583c810ff7cb75374abc8e974", "score": "0.59126306", "text": "def __resize_image(input, width, height, interpolation):\n return cv2.resize(input, ((int)(width), (int)(height)), 0, 0, interpolation)", "title": "" }, { "docid": "c4f437e660df11425ee37c063f2150f3", "score": "0.5893719", "text": "def ResampleDEM(self, newCellSize, method):\n # Create a blank copy with everything but the array\n newDEM = self.MetaCopy()\n\n self.log.debug(\"Resampling original data from {0}m to {1}m using {2} method\".format(self.cellWidth, newCellSize, method))\n arrayResampled = None\n\n XaxisOld, YaxisOld = np.ma.masked_array(np.mgrid[0:self.rows:self.cellWidth, 0:self.cols:abs(self.cellHeight)],\n (self.array.mask, self.array.mask))\n\n XaxisNew, YaxisNew = np.mgrid[0:self.rows:newCellSize, 0:self.cols:newCellSize]\n newMask = interpolate.griddata((XaxisOld.ravel(), YaxisOld.ravel()), self.array.mask.ravel(),\n (XaxisNew, YaxisNew), method='nearest', fill_value=np.nan)\n\n # Put us in the middle of the cell\n XaxisOld += abs(self.cellWidth) / 2\n YaxisOld += abs(self.cellHeight) / 2\n\n # Bilinear is a lot slower that the others and it's its own\n # method, written based on the\n # well known wikipedia article.\n if method == \"bilinear\":\n # Now we resample based on the method passed in here.\n fFactor = self.cellWidth / newCellSize\n newShape = (int(self.rows * fFactor), int(self.cols * fFactor))\n arrayResampled = bilinearResample(self.array, newShape)\n elif method == \"linear\" or method == \"cubic\" or method == \"nearest\":\n arrayResampled = interpolate.griddata((XaxisOld.ravel(), YaxisOld.ravel()), self.array.ravel(),\n (XaxisNew, YaxisNew), method=method, fill_value=np.nan)\n else:\n raise Exception(\"Resample Method: '{0}' not recognized\".format(method))\n\n # Set the new cell size and set the new array\n newDEM.cellWidth = newCellSize\n newDEM.cellHeight = -newCellSize\n newDEM.setArray(np.ma.masked_array(arrayResampled, newMask), False)\n self.log.debug(\"Successfully Resampled Raster\")\n return newDEM", "title": "" }, { "docid": "26708ef7168a90c016cd90682a526166", "score": "0.58239156", "text": "def resample(image, scan, new_spacing=None):\n if new_spacing is None:\n new_spacing = [1, 1, 1]\n\n spacing_list = [scan[0].SliceThickness]\n spacing_list.extend(scan[0].PixelSpacing)\n spacing = np.array(spacing_list, dtype=np.float32)\n\n # define variable resize factor using current spacing divided by new spacing given above\n resize_factor = spacing / new_spacing\n\n # dimension of the image array * resize factor\n new_real_shape = image.shape * resize_factor\n new_shape = np.round(new_real_shape)\n real_resize_factor = new_shape / image.shape\n\n # resample images with new resize factor\n image = scipy.ndimage.interpolation.zoom(image, real_resize_factor, mode='nearest')\n return image", "title": "" }, { "docid": "bca86a8129ad015321c1c3aa3e8ee0b4", "score": "0.5760109", "text": "def smart_resize(im, target_size=(256, 256)):\r\n if np.prod(im.shape[:2]) >= np.prod(target_size):\r\n interp_fn = cv2.INTER_AREA\r\n else:\r\n interp_fn = cv2.INTER_LINEAR\r\n return cv2.resize(im, (target_size[1], target_size[0]),\r\n interpolation=interp_fn)", "title": "" }, { "docid": "20124cff32221a3c367252c5aa3d4bc6", "score": "0.5750761", "text": "def resizeData(array, imageSize, FLAGpreserveIntValues = True):\n array = array.astype(float)\n arrayResize = (array - array.min()) / (array.max()-array.min()) # values need to be between 0 and 1 to use the resize function.\n arrayResize = resize(arrayResize,imageSize, preserve_range=True, mode = 'reflect')\n arrayResize = (arrayResize*(array.max()-array.min()))+array.min() # restore the original range of values.\n # If needed, restore the original set of integers values (used for masks and labels)\n if FLAGpreserveIntValues:\n arrayResize = np.around(arrayResize)\n #arrayResize[arrayResize < array.min()] = array.min()\n #arrayResize[arrayResize > array.max()] = array.max()\n rangeValues = np.unique(array)\n interLabelsTh = [ (x + rangeValues[i - 1])/2. for i, x in enumerate(rangeValues)][1:]\n LOWlimit = [arrayResize.min()-1] + interLabelsTh\n UPlimit = interLabelsTh + [arrayResize.max()+1]\n for label_i in range(0,len(rangeValues)):\n arrayResize[np.multiply(UPlimit[label_i] > arrayResize, arrayResize >= LOWlimit[label_i])] = rangeValues[label_i]\n return arrayResize", "title": "" }, { "docid": "734f9459068822ae991f0b3e69d93d92", "score": "0.57504344", "text": "def resize(self, new_cap):\n self.substitute_array = [0] * new_cap\n for iter in range(self.array_length): \n self.substitute_array[iter] = self.dynamic_array[iter] \n \n self.dynamic_array = self.substitute_array\n self.capacity = new_cap", "title": "" }, { "docid": "d3ed9264aac41522f9934288d28e56ba", "score": "0.5699504", "text": "def resize(self, *args):\n return _ida_pro.intvec_t_resize(self, *args)", "title": "" }, { "docid": "977cec748b46d3ae01bdb462e004b1fd", "score": "0.5694589", "text": "def resize(self, *args):\n return _asat_swig.range_vector_t_resize(self, *args)", "title": "" }, { "docid": "264ea48400f6d20446bfa357a795ac69", "score": "0.56929857", "text": "def resample_sitk_image(sitk_image, spacing=None, interpolator=None,\n fill_value=0):\n if isinstance(sitk_image, str):\n sitk_image = sitk.ReadImage(sitk_image)\n num_dim = sitk_image.GetDimension()\n if not interpolator:\n interpolator = 'linear'\n pixelid = sitk_image.GetPixelIDValue()\n\n if pixelid not in [1, 2, 4]:\n raise NotImplementedError(\n 'Set `interpolator` manually, '\n 'can only infer for 8-bit unsigned or 16, 32-bit signed integers')\n if pixelid == 1: # 8-bit unsigned int\n interpolator = 'nearest'\n\n orig_pixelid = sitk_image.GetPixelIDValue()\n orig_origin = sitk_image.GetOrigin()\n orig_direction = sitk_image.GetDirection()\n orig_spacing = np.array(sitk_image.GetSpacing())\n orig_size = np.array(sitk_image.GetSize(), dtype=np.int)\n\n if not spacing:\n min_spacing = orig_spacing.min()\n new_spacing = [min_spacing]*num_dim\n else:\n new_spacing = [float(s) if s else orig_spacing[idx] for idx, s in enumerate(spacing)]\n\n assert interpolator in _SITK_INTERPOLATOR_DICT.keys(),\\\n '`interpolator` should be one of {}'.format(_SITK_INTERPOLATOR_DICT.keys())\n\n sitk_interpolator = _SITK_INTERPOLATOR_DICT[interpolator]\n\n new_size = orig_size*(orig_spacing/new_spacing)\n new_size = np.ceil(new_size).astype(np.int) # Image dimensions are in integers\n # SimpleITK expects lists, not ndarrays\n new_size = [int(s) if spacing[idx] else int(orig_size[idx]) for idx, s in enumerate(new_size)]\n\n resample_filter = sitk.ResampleImageFilter()\n resampled_sitk_image = resample_filter.Execute(\n sitk_image,\n new_size,\n sitk.Transform(),\n sitk_interpolator,\n orig_origin,\n new_spacing,\n orig_direction,\n fill_value,\n orig_pixelid\n )\n\n return resampled_sitk_image, orig_spacing", "title": "" }, { "docid": "1d237560ac527e2f245ebade42e7c2da", "score": "0.56850004", "text": "def fixed_crop(src, x0, y0, w, h, size=None, interp=2):\n out = src[y0:y0 + h, x0:x0 + w, :]\n if size is not None and (w, h) != size:\n sizes = (h, w, size[1], size[0])\n out = cv2.resize(out, *size, interpolation=get_interp_method(interp, sizes))\n return out", "title": "" }, { "docid": "150446fc165b8bcc9febd3f658bb9a24", "score": "0.56847626", "text": "def stretch_data_range(self, new_max=255):\n\n data = self.data\n vmin_data = data.min()\n new_data = new_max*((data - vmin_data)/(data.max() - vmin_data))\n\n self.data = new_data", "title": "" }, { "docid": "1660a0a492c5ff713fce0c9f427bbf9f", "score": "0.5670642", "text": "def stretch(a):\n hist = histogram(a)\n im = arrayToImage(a)\n lut = []\n for b in range(0, len(hist), 256):\n # step size\n step = reduce(operator.add, hist[b:b+256]) / 255\n # create equalization lookup table\n n = 0\n for i in range(256):\n lut.append(n / step)\n n = n + hist[i+b]\n im = im.point(lut)\n return imageToArray(im)", "title": "" }, { "docid": "c520c03101a6d6e3abbe5e0bfe58668b", "score": "0.5654528", "text": "def scale_img(i, s, interpolation=0):\n i = misc.toimage(i)\n s = scale_tuple(i.size, s)\n return misc.fromimage(i.resize(s, interpolation))", "title": "" }, { "docid": "bee427c83cda9d08b50730af8af77585", "score": "0.56472", "text": "def resample(self, base, ingested, template):\n # Loads images\n template_im = nb.load(template)\n base_im = nb.load(base)\n # Aligns images\n target_im = nl.resample_img(base_im,\n target_affine=template_im.get_affine(),\n target_shape=template_im.get_data().shape,\n interpolation=\"nearest\")\n # Saves new image\n nb.save(target_im, ingested)\n pass", "title": "" }, { "docid": "cd0f62fb5ee78a3f5d0182ec76de9538", "score": "0.56386757", "text": "def imbatchresize(array, scale):\n assert array.shape[1] == 3\n assert len(array.shape) == 4\n imgs_sized = np.zeros((array.shape[0], array.shape[1], int(array.shape[2], int(array.shape[3]))))\n for i in range(len(array)):\n ii = imresize(array[i], scale)\n imgs_sized[i] = np.swapaxes(np.swapaxes(ii, 1, 2), 0, 1)", "title": "" }, { "docid": "49658fb52b201d19f00d9e17aea67e04", "score": "0.56322724", "text": "def _resize(self, c):\n newArr = self._make_array(c) # make new array\n for i in range(self._n): # copy elements to new array\n newArr[i] = self._A[i]\n self._A = newArr\n self._capacity = c", "title": "" }, { "docid": "d536ce5a7c381dbddcd39ed396fbfc68", "score": "0.563004", "text": "def min_resize(x, size, interpolation=cv2.INTER_LINEAR):\n w, h = map(float, x.shape[:2])\n if min([w, h]) != size:\n if w <= h:\n x = cv2.resize(x, (int(round((h/w)*size)), int(size)), interpolation=interpolation)\n else:\n x = cv2.resize(x, (int(size), int(round((w/h)*size))), interpolation=interpolation)\n return x", "title": "" }, { "docid": "ddae4be6b052e6f8bb72692297867a0d", "score": "0.56185263", "text": "def vtkImageResample(image, spacing, opt):\n reslicer = vtk.vtkImageReslice()\n reslicer.SetInputData(image)\n if opt=='linear':\n reslicer.SetInterpolationModeToLinear()\n elif opt=='NN':\n reslicer.SetInterpolationModeToNearestNeighbor()\n elif opt=='cubic':\n reslicer.SetInterpolationModeToCubic()\n else:\n raise ValueError(\"interpolation option not recognized\")\n\n #size = np.array(image.GetSpacing())*np.array(image.GetDimensions())\n #new_spacing = size/np.array(dims)\n\n reslicer.SetOutputSpacing(*spacing)\n reslicer.Update()\n\n return reslicer.GetOutput()", "title": "" }, { "docid": "b9b146f105bd73de6a6c4a7b66660610", "score": "0.5610217", "text": "def _resize_vector_function(v):\n _v = np.asarray(v)\n f = sp.interpolate.interp1d(\n np.linspace(_v.min(), _v.max(), len(v)), v, kind='cubic')\n return f", "title": "" }, { "docid": "33ff6711c2df90c1ff87ce2670759d8d", "score": "0.56035906", "text": "def _rescale(numpy_array):\n return numpy_array / 10000", "title": "" }, { "docid": "e4c2e15464be4f279c8fbe9df0a63460", "score": "0.56002975", "text": "def expand_image(image):\n n = image.shape[0]\n large_image = np.zeros((n + 20, n + 20))\n large_image[10:(n + 10), 10:(n + 10)] = image\n return large_image", "title": "" }, { "docid": "b1f364c65c3887db3d39c2b50a2cabf4", "score": "0.55889666", "text": "def resizeROIS(self, origSize, newSize):\n return self.mask.resizeROIS(origSize, newSize)", "title": "" }, { "docid": "4d918ec84bab643b1051b8bf4571f94b", "score": "0.558144", "text": "def interpolate(image):\n image_up = np.zeros((2*image.shape[0], 2*image.shape[1]))\n # Upsample\n image_up[::2, ::2] = image\n # Blur (we need to scale this up since the kernel has unit area)\n # (The length and width are both doubled, so the area is quadrupled)\n #return sig.convolve2d(image_up, 4*kernel, 'same')\n return ndimage.filters.convolve(image_up,4*kernel, mode='constant')", "title": "" }, { "docid": "453111962d085415db2f2b6ef0ad1d23", "score": "0.55734414", "text": "def resize_image(img_name):\n # new size definition\n width = 200\n height = 150\n new_size = width, height\n\n img = Image.open(img_name, 'r')\n resize = img.resize(new_size)\n array = np.array(resize)\n return array", "title": "" }, { "docid": "d88e9c2582fd45a79f4f40e723b57e41", "score": "0.5568455", "text": "def upsample(arr, xrate, yrate):\n ny, nx = arr.shape\n\n xi = np.linspace(0, arr.shape[1] - 1, round(nx * xrate)).reshape((1, -1))\n yi = np.linspace(0, arr.shape[0] - 1, round(ny * yrate)).reshape((-1, 1))\n return bilinear_interpolate(arr, xi, yi)", "title": "" }, { "docid": "22e1a92e04b3811bbc81720f9f8b1568", "score": "0.55596614", "text": "def _resize_arrays(self, new_size):\n\n self.times = np.resize(self.times,(new_size))\n self.pitch = np.resize(self.pitch,(new_size))\n self.roll = np.resize(self.roll,(new_size))\n self.heading = np.resize(self.heading,(new_size))\n self.heave = np.resize(self.heave,(new_size))", "title": "" }, { "docid": "3e4d26e699b16ea6f495dbb1ad928231", "score": "0.5551498", "text": "def _resize(input_data):\n rate = 1\n if input_data['img'].shape[0] > input_data['img'].shape[1]:\n if True: # input_data['img'].shape[1] < 512:\n rate = 512/input_data['img'].shape[1]\n seq = iaa.Sequential([\n iaa.Scale({'height': \"keep-aspect-ratio\", 'width': 512}, 'cubic')\n ])\n input_data['img'] = seq.augment_image(input_data['img'])\n else:\n if True: # input_data['img'].shape[0] < 512:\n rate = 512/input_data['img'].shape[0]\n seq = iaa.Sequential([\n iaa.Scale({'height': 512, 'width': \"keep-aspect-ratio\"}, 'cubic')\n ])\n input_data['img'] = seq.augment_image(input_data['img'])\n\n input_data['contour'] = [np.cast['int32'](contour*rate) for contour in input_data['contour']]\n input_data['center_point'] = [(np.cast['int32'](point[0] * rate),\n np.cast['int32'](point[1] * rate)) for point in input_data['center_point']]\n return input_data", "title": "" }, { "docid": "c94c91da2946c5f454888537ed62abb8", "score": "0.55116254", "text": "def resample_nib(image, new_size=None, new_size_type=None, image_dest=None, interpolation='linear', mode='nearest'):\n\n # set interpolation method\n dict_interp = {'nn': 0, 'linear': 1, 'spline': 2}\n\n # If input is an Image object, create nibabel object from it\n if type(image) == nib.nifti1.Nifti1Image:\n img = image\n elif type(image) == Image:\n img = nib.nifti1.Nifti1Image(image.data, image.hdr.get_best_affine())\n else:\n raise Exception(TypeError)\n\n if image_dest is None:\n # Get dimensions of data\n p = img.header.get_zooms()\n shape = img.header.get_data_shape()\n\n if img.ndim == 4:\n new_size += ['1'] # needed because the code below is general, i.e., does not assume 3d input and uses img.shape\n\n # compute new shape based on specific resampling method\n if new_size_type == 'vox':\n shape_r = tuple([int(new_size[i]) for i in range(img.ndim)])\n elif new_size_type == 'factor':\n if len(new_size) == 1:\n # isotropic resampling\n new_size = tuple([new_size[0] for i in range(img.ndim)])\n # compute new shape as: shape_r = shape * f\n shape_r = tuple([int(np.round(shape[i] * float(new_size[i]))) for i in range(img.ndim)])\n elif new_size_type == 'mm':\n if len(new_size) == 1:\n # isotropic resampling\n new_size = tuple([new_size[0] for i in range(img.ndim)])\n # compute new shape as: shape_r = shape * (p_r / p)\n shape_r = tuple([int(np.round(shape[i] * float(p[i]) / float(new_size[i]))) for i in range(img.ndim)])\n else:\n raise ValueError(\"'new_size_type' is not recognized.\")\n\n # Generate 3d affine transformation: R\n affine = img.affine[:4, :4]\n affine[3, :] = np.array([0, 0, 0, 1]) # satisfy to nifti convention. Otherwise it grabs the temporal\n logger.debug('Affine matrix: \\n' + str(affine))\n R = np.eye(4)\n for i in range(3):\n try:\n R[i, i] = img.shape[i] / float(shape_r[i])\n except ZeroDivisionError:\n raise ZeroDivisionError(\"Destination size is zero for dimension {}. You are trying to resample to an \"\n \"unrealistic dimension. Check your NIFTI pixdim values to make sure they are \"\n \"not corrupted.\".format(i))\n\n affine_r = np.dot(affine, R)\n reference = (shape_r, affine_r)\n\n # If reference is provided\n else:\n if type(image_dest) == nib.nifti1.Nifti1Image:\n reference = image_dest\n elif type(image_dest) == Image:\n reference = nib.nifti1.Nifti1Image(image_dest.data, image_dest.hdr.get_best_affine())\n else:\n raise Exception(TypeError)\n\n if img.ndim == 3:\n # we use mode 'nearest' to overcome issue #2453\n img_r = resample_from_to(\n img, to_vox_map=reference, order=dict_interp[interpolation], mode=mode, cval=0.0, out_class=None)\n\n elif img.ndim == 4:\n # TODO: Cover img_dest with 4D volumes\n # Import here instead of top of the file because this is an isolated case and nibabel takes time to import\n data4d = np.zeros(shape_r)\n # Loop across 4th dimension and resample each 3d volume\n for it in range(img.shape[3]):\n # Create dummy 3d nibabel image\n nii_tmp = nib.nifti1.Nifti1Image(img.get_data()[..., it], affine)\n img3d_r = resample_from_to(\n nii_tmp, to_vox_map=(shape_r[:-1], affine_r), order=dict_interp[interpolation], mode=mode,\n cval=0.0, out_class=None)\n data4d[..., it] = img3d_r.get_data()\n # Create 4d nibabel Image\n img_r = nib.nifti1.Nifti1Image(data4d, affine_r)\n\n # Convert back to proper type\n if type(image) == nib.nifti1.Nifti1Image:\n return img_r\n elif type(image) == Image:\n return Image(img_r.get_data(), hdr=img_r.header, orientation=image.orientation, dim=img_r.header.get_data_shape())", "title": "" }, { "docid": "a7d57e3b78eab91db0fb2bcd1339c958", "score": "0.5492118", "text": "def downscale_image(image: Tensor, size: tuple, mode: ImageUpscaleMode = ImageUpscaleMode.PADDING) -> Tensor:\n input_h, input_w = size\n if mode == ImageUpscaleMode.PADDING:\n image = image[:, :, :input_h, :input_w]\n else:\n image = F.interpolate(input=image, size=(input_h, input_w))\n\n return image", "title": "" }, { "docid": "5f43ad29015c04c4309227759adb19ac", "score": "0.5465984", "text": "def _resize_matrix_function(mx: np.ndarray, kind='linear'):\n x = np.linspace(0, 1, mx.shape[0])\n y = np.linspace(0, 1, mx.shape[1])\n f = sp.interpolate.interp2d(x, y, mx, kind=kind)\n return f", "title": "" }, { "docid": "6f9c06ff1ebde14dc486338319425e29", "score": "0.54523677", "text": "def resize(digits, row_size, column_size):\n\n return np.array([imresize(_, size=(row_size, column_size)) for _ in digits])", "title": "" }, { "docid": "b652e4620fc56fdfb99ded5e18134a3a", "score": "0.5446183", "text": "def interpol_lin(image_as_array, location_matrix, transformation_name, height, width, my_image, black=False):\n print(\"executing interpolation routine\")\n print(\"this may take a while...\")\n new_image = np.copy(image_as_array)\n for i in range(0, height):\n for j in range(0, width):\n if location_matrix[i][j][2] == 1:\n continue\n y = float(location_matrix[i][j][0])\n x = float(location_matrix[i][j][1])\n v0 = int(location_matrix[i][j][0])\n u0 = int(location_matrix[i][j][1])\n if black:\n if u0 < 0 or v0 < 0 or u0 > width-2 or v0 > height-2:\n new_image[i, j][0] = 0\n new_image[i, j][1] = 0\n new_image[i, j][2] = 0\n continue\n else:\n if u0 < 0:\n u0 = 0\n if v0 < 0:\n v0 = 0\n if u0 > width-2:\n u0 = width-2\n if v0 > height-2:\n v0 = height-2\n for k in range(0, 3):\n new_image[i, j][k] = int((1 - x + u0)*(1 - y + v0)*image_as_array[v0, u0][k] +\n (x - u0)*(1 - y + v0)*image_as_array[v0, u0 + 1][k] +\n (1 - x + u0)*(y - v0)*image_as_array[v0 + 1, u0][k] +\n (x - u0)*(y - v0)*image_as_array[v0 + 1, u0 + 1][k])\n return_image = Image.fromarray(new_image)\n return_image.save(my_image[:-4] + \"_\"+transformation_name + my_image[-4:])", "title": "" }, { "docid": "b548fd6026ea781643c38533e4e721d7", "score": "0.5443906", "text": "def resize(\n temps,\n tmin: Optional[int] = None,\n tmax: Optional[int] = None,\n year: Optional[int] = None,\n):\n if year is not None:\n if year == 2018:\n tmin, tmax = -2, 32\n elif year == 2019:\n tmin, tmax = -5, 35\n else:\n raise NotImplementedError(f\"Uknown year {year}\")\n # TODO: Remove, just a sanity check while I write my thesis.\n assert tmin < 0\n assert tmax > 30\n print(tmin, tmax)\n return interp1d(\n np.linspace(min(temps), max(temps), 1000), np.linspace(tmin, tmax, 1000)\n )(temps)", "title": "" }, { "docid": "fb871c37cf4680bab7f2531a9148da97", "score": "0.54434276", "text": "def resize(x, smaller_size):\n im = x\n b, c, h, w = im.shape\n\n if h < w:\n ratio = w / h\n h_res, w_res = smaller_size, ratio * smaller_size\n else:\n ratio = h / w\n h_res, w_res = ratio * smaller_size, smaller_size\n\n if min(h, w) < smaller_size:\n im_res = jax.image.resize(im, [b, c, int(h_res), int(w_res)], \"bilinear\")\n\n else:\n im_res = im\n return im_res", "title": "" }, { "docid": "53c85a53221c89cf8d12f6d87dd2ca5c", "score": "0.5433779", "text": "def _interpolate(self, grid: np.ndarray) -> np.ndarray:\n raise NotImplementedError", "title": "" }, { "docid": "a3d402ca558aa64faa198d2b396dd699", "score": "0.5427263", "text": "def image_transform_resize(img, new_shape):\n\n return resize(img, new_shape, preserve_range=True)", "title": "" }, { "docid": "c66afa849924a01e06ea85986bfd33a9", "score": "0.542106", "text": "def imresize(im,sz):\n pil_im = Image.fromarray(uint8(im))\n return array(pil_im.resize(sz))", "title": "" }, { "docid": "52c46be90c4056eb88f856856a41637b", "score": "0.5419502", "text": "def enlarge(self, x_more, y_more):\n self.img_dim = (self.img_dim[0] + x_more, self.img_dim[1] + y_more)\n img_temp = Image.new('RGB', self.img_dim, color=self.white)\n img_temp.paste(self.img, (0, 0))\n self.img = img_temp", "title": "" }, { "docid": "520f801774e2e090c62ee973dbdc24ad", "score": "0.54006255", "text": "def resize_bilinear(images, size, output_dtype=tf.float32):\n images = tf.image.resize_bilinear(images, size, align_corners=True)\n return tf.cast(images, dtype=output_dtype)", "title": "" }, { "docid": "6816d3bc34473f718493eb13bc804071", "score": "0.53982306", "text": "def interpolate(self):\n\n z = self.flux\n nans, za= self.nan_helper(z)\n z[nans]= np.interp(za(nans), za(~nans), z[~nans]).astype('float32')\n self.flux = z", "title": "" }, { "docid": "601767b88c62e13a059a4e91b580e74f", "score": "0.5390953", "text": "def upscale(self, x, h_size, w_size):\n [b, h, w, c] = [int(dim) for dim in x.get_shape()]\n\n return tf.image.resize_nearest_neighbor(x, (h_size, w_size))", "title": "" }, { "docid": "5ed787c497150a1ec9ef8b38bb9aa2b7", "score": "0.53902733", "text": "def resize(img, size, method=\"bilinear\"):\n if method == Image.BILINEAR or method == \"bilinear\":\n method = cv2.INTER_AREA\n elif method == Image.CUBIC or method == \"cubic\":\n method = cv2.INTER_CUBIC\n elif method == Image.NEAREST or method == \"nearest\":\n method = cv2.INTER_NEAREST\n else:\n raise RuntimeError(\"wrong method {}\".format(method))\n if not isinstance(size, (tuple, list)):\n size = (size, size)\n if isinstance(size ,list):\n size = tuple(size)\n im = np.array(img)\n im = cv2.resize(im, size, interpolation=method)\n return Image.fromarray(im)", "title": "" }, { "docid": "b75a0a38e7fd83d7647f159ceb85dda4", "score": "0.5387988", "text": "def rescale(self, image, min=20, max=255):\n image = image.astype('float')\n image -= image.min()\n image /= image.max()\n image = image * (max - min) + min\n\n return image", "title": "" }, { "docid": "6569adfe8ce5b19e684991a527a75603", "score": "0.5382615", "text": "def resize_short_within(img, short, max_size, mult_base=1, interp=2):\n h, w, _ = img.shape\n im_size_min, im_size_max = (h, w) if w > h else (w, h)\n scale = float(short) / float(im_size_min)\n if np.round(scale * im_size_max / mult_base) * mult_base > max_size:\n scale = float(np.floor(max_size / mult_base) * mult_base) / float(im_size_max)\n new_w, new_h = (int(np.round(w * scale / mult_base) * mult_base),\n int(np.round(h * scale / mult_base) * mult_base))\n return vf.resize(img, (new_h, new_w), get_interp_method(interp, (h, w, new_h, new_w)))", "title": "" }, { "docid": "ccf4410e6b6668207e8995322f7fb9f8", "score": "0.5378514", "text": "async def resize(\n src: List[int],\n dsize: List[int],\n fx: float = None,\n fy: float = None,\n interpolation: int = None,\n) -> List[int]:\n if len(dsize) == 2:\n src = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)\n else:\n dsize = dsize[:2]\n dsize = tuple(dsize)\n if interpolation is None:\n if dsize > src.shape:\n interpolation = cv2.INTER_CUBIC\n else:\n interpolation = cv2.INTER_AREA\n\n parameters = {k: v for k, v in locals().items() if v is not None}\n\n resized_image = cv2.resize(**parameters)\n return resized_image", "title": "" }, { "docid": "09d701a404b05f1f30f600a7dca3ad9a", "score": "0.5376019", "text": "def resize_image(path, min_size, interpolation='lanczos'):\n with Image.open(path) as image:\n w, h = image.size\n ratio = max(min_size / w, min_size / h)\n w, h = int(w * ratio), int(h * ratio)\n resample = get_PIL_interpolation(interpolation)\n image = image.resize((w, h), resample=resample)\n image.save(path)", "title": "" }, { "docid": "14c0d1e7f50a0820c12aed63839bd71a", "score": "0.5371231", "text": "def resize_image_2d_array(image_2d_array, height, width):\n im = Image.fromarray(image_2d_array)\n out = im.resize((height, width))\n return np.array(out)", "title": "" }, { "docid": "6c7b8417ebc273897199d3a27f606fb3", "score": "0.5367376", "text": "def ct_resize(data, zoom, cval=0.0):\r\n\r\n\tout_data = scipy.ndimage.interpolation.zoom(data, zoom = zoom, cval=cval)\r\n\r\n\treturn out_data", "title": "" }, { "docid": "398ae797d3128871b7d2bbf63b0cf676", "score": "0.53589165", "text": "def enlarge(arr):\n #j = N.array([[1,2],[3,4]]) \n l =N.repeat(arr, 2, axis = 0) \n enlarged =N.repeat(l, 2, axis = 1) \n return enlarged", "title": "" }, { "docid": "aedaf897a2fc00cd0381d96541516041", "score": "0.5356121", "text": "def resize_state(to_resize, new_size=(72,72)):\n # Iterate over channels (theano dimension ordering (ch, rows, cols) is assumed)\n resized = []\n for image in to_resize:\n data = Image.fromarray(image).resize(new_size)\n resized.append(np.asarray(data))\n return np.asarray(resized).squeeze()", "title": "" }, { "docid": "0bcd5e919c0ff6c8d3c81ce911f6023f", "score": "0.53557205", "text": "def refrence_free_3D_resample(image, transformation, interpolator,\n default_value, image_type=None, spacing=None,\n direction=None):\n extreme_indecies = list(itertools.product(*zip([0, 0, 0], image.GetSize())))\n extreme_points = [image.TransformIndexToPhysicalPoint(idx)\n for idx in extreme_indecies]\n inv_transform = transformation.GetInverse()\n # Calculate the coordinates of the inversed extream points\n extreme_points_transformed = [inv_transform.TransformPoint(point)\n for point in extreme_points]\n min_values = np.array((extreme_points_transformed)).min(axis=0)\n max_values = np.array((extreme_points_transformed)).max(axis=0)\n\n if spacing is None:\n spacing = image.GetSpacing()\n if direction is None:\n direction = image.GetDirection()\n # Minimal x,y, and coordinates are the new origin.\n origin = min_values.tolist()\n # Compute grid size based on the physical size and spacing.\n size = [int((max_val - min_val) / s)\n for min_val, max_val, s in zip(min_values, max_values, spacing)]\n return sitk.Resample(image, size, transformation, interpolator,\n outputOrigin=origin,\n outputSpacing=spacing,\n outputDirection=direction,\n defaultPixelValue=default_value,\n outputPixelType=image_type)", "title": "" }, { "docid": "7305497b9ea9b014c557c7d9d113a311", "score": "0.53505373", "text": "def _rescale(self, img, scale=PATCH_SIZE):\n return cv2.resize(img, scale, interpolation=cv2.INTER_CUBIC)", "title": "" }, { "docid": "bf34f0d8615d7b3f3b22a5a8034c9287", "score": "0.5346869", "text": "def rescale(a, new=(-0.5, 0.5), old=None):\n if old is None:\n old = (a.min(), a.max())\n old_min = old[0]\n new_min = new[0]\n new_range = new[1] - new[0]\n old_range = old[1] - old[0]\n return new_min + (a - old_min)/float(old_range) * new_range", "title": "" }, { "docid": "3cb50288df64f66676e42bbe0fcbaad8", "score": "0.5344745", "text": "def resize(image, dsize, interpolation=Interpolation.LINEAR, output=None):\n assert 2 <= len(image.shape) <= 4\n if output is not None:\n assert output.dtype == image.dtype\n assert len(output.shape) == len(image.shape)\n assert output.shape[:2] == (height, width)\n return _lycon2.resize(image, dsize, interpolation, output)", "title": "" }, { "docid": "7b60b5c671930675260d689b6e0ca415", "score": "0.53431606", "text": "def downscale(im, h, w):\n if isinstance(im, list):\n return [downscale(i, h, w) for i in im]\n else:\n if im.ndim == 2:\n im = im[:, :, np.newaxis]\n im_rs = ia.imresize_single_image(im, (h, w), interpolation=\"cubic\")\n return np.squeeze(im)\n else:\n return ia.imresize_single_image(im, (h, w), interpolation=\"cubic\")", "title": "" }, { "docid": "d48285b0b59cc2a9887deef8ef780cd4", "score": "0.5338", "text": "def rescale_data_volume(img_numpy, out_dim):\n depth, height, width = img_numpy.shape\n scale = [out_dim[0] * 1.0 / depth, out_dim[1] * 1.0 / height, out_dim[2] * 1.0 / width]\n return ndimage.interpolation.zoom(img_numpy, scale, order=0)", "title": "" }, { "docid": "bc43934c05c37951a9c5d2cef4717582", "score": "0.53230405", "text": "def resize(im, target_size, which_dim, interpolation='bicubic', clamp=None):\n h, w = im.shape[:2]\n\n if interpolation == 'bicubic':\n interpolation = cv2.INTER_CUBIC\n else:\n raise NotImplementedError(interpolation)\n\n if which_dim == 'horizontal':\n scale_factor = target_size / w\n elif which_dim == 'vertical':\n scale_factor = target_size / h\n else:\n raise ValueError(which_dim)\n\n im_resized = cv2.resize(im, None, fx=scale_factor, fy=scale_factor,\n interpolation=interpolation)\n\n if clamp is not None:\n min_val, max_val = clamp\n im_resized[im_resized < min_val] = min_val\n im_resized[im_resized > max_val] = max_val\n\n return im_resized", "title": "" }, { "docid": "2014e3ddd1d9a009aca82fad0457de82", "score": "0.5315572", "text": "def rescale_memmap(new_size, in_memmap, outfile):\n n_images = in_memmap.shape[0]\n original_size = in_memmap.shape[1]\n factor = new_size / original_size\n out = np.memmap(outfile, shape=(n_images, new_size, new_size, 3), mode='w+')\n\n for i, img in enumerate(in_memmap):\n if i % 100 == 0:\n print i\n out[i] = rescale(img, factor) * 255\n\n return out", "title": "" }, { "docid": "0e06b4acede9303f1881e92b2896de8d", "score": "0.53087944", "text": "def resize(img, size, interpolation=Image.BILINEAR):\n if not isinstance(img, Image.Image):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n if isinstance(size, int):\n w, h = img.size\n if (w <= h and w == size) or (h <= w and h == size):\n return img\n if w < h:\n ow = size\n oh = int(size * h / w)\n return img.resize((ow, oh), interpolation)\n else:\n oh = size\n ow = int(size * w / h)\n return img.resize((ow, oh), interpolation)\n else:\n return img.resize(size[::-1], interpolation)", "title": "" }, { "docid": "07340f5a61c4db4433c1d5aa3dfd8426", "score": "0.5300824", "text": "def adjust_pres_coords(array,x,y,xmax=1920/2,ymax=1080/2):\n newarray=pd.DataFrame()\n newarray[x]=array[x]+xmax\n newarray[y]=(array[y]-ymax)*-1\n return newarray", "title": "" }, { "docid": "d47655af9fcee3ddeeca008eb2526267", "score": "0.5298194", "text": "def rescale(self, fun):\n if self.bands != 1:\n raise ValueError('only single band images are currently supported')\n \n # load array\n mat = self.matrix()\n \n # scale values\n scaled = fun(mat)\n \n # assign newly computed array to raster object\n self.assign(scaled, band=0)", "title": "" }, { "docid": "402ee78c05117e393c48cc5f88662b12", "score": "0.5294475", "text": "def rescale_intensity(image, in_range='image', out_range='dtype'):\n dtype = image.dtype.type\n\n imin, imax = intensity_range(image, in_range)\n omin, omax = intensity_range(image, out_range, clip_negative=(imin >= 0))\n\n # Fast test for multiple values, operations with at least 1 NaN return NaN\n if numpy.isnan(imin + imax + omin + omax):\n return\n image = numpy.clip(image, imin, imax)\n\n if imin != imax:\n image = (image - imin) / float(imax - imin)\n return numpy.asarray(image * (omax - omin) + omin, dtype=dtype)", "title": "" }, { "docid": "65d5ac794a7361b693a0fbc19a3b6f1d", "score": "0.5290252", "text": "def unstretch(coordinates, transformed_img):\r\n\r\n\t\tunstretched_img = np.zeros(transformed_img.shape)\r\n\r\n\t\tfor i in coordinates:\r\n\t\t\tif coordinates[i] != (-1, -1):\r\n\t\t\t\tunstretched_img[i[0], i[1], :] = transformed_img[coordinates[i][0], coordinates[i][1], :]\r\n\t\t\telse:\r\n\t\t\t\tunstretched_img[i[0], i[1], :] = transformed_img[i[0], 0, :]\r\n\r\n\t\treturn unstretched_img.astype(\"uint8\")", "title": "" }, { "docid": "f7a101e82e4fedd0d7a546b707633bcc", "score": "0.52777857", "text": "def resize(self, scale: Number) -> None:\n self.size = np.floor(self.size * scale + 0.5).astype(int)", "title": "" }, { "docid": "b70e7a2b7ffcbb169d66c87b0fb6d723", "score": "0.52734995", "text": "def resize(self, new_capacity: int) -> None:\n\n if new_capacity < 0 or new_capacity < self.size:\n return\n\n else:\n new_array = StaticArray(new_capacity) # Creating a new array with the new capacity\n if new_capacity < self.data.size():\n for i in range(new_array.size()): # Copying all elements into new array\n new_array.set(i, self.data.get(i))\n\n else:\n for i in range(self.data.size()):\n new_array.set(i, self.data.get(i))\n\n self.data = new_array\n self.capacity = new_capacity # Setting references to the new array", "title": "" }, { "docid": "5d4d27bd056e496d1f7f6f45588d0844", "score": "0.5263899", "text": "def _interpolate(self, grid, roundup=False):\n # there's no point in working with the whole of the data array if it's\n # masked.\n useful_chunk = ndimage.find_objects(numpy.where(self.data.mask, 0, 1))\n assert (len(useful_chunk) == 1)\n my_xdim, my_ydim = self.data[useful_chunk[0]].shape\n\n if MEDIAN_FILTER:\n f_grid = ndimage.median_filter(grid, MEDIAN_FILTER)\n if MF_THRESHOLD:\n grid = numpy.where(\n numpy.fabs(f_grid - grid) > MF_THRESHOLD, f_grid, grid\n )\n else:\n grid = f_grid\n\n # Bicubic spline interpolation\n xratio = float(my_xdim) / self.back_size_x\n yratio = float(my_ydim) / self.back_size_y\n\n my_map = numpy.ma.MaskedArray(numpy.zeros(self.data.shape),\n mask=self.data.mask)\n\n # Remove the MaskedArrayFutureWarning warning and keep old numpy < 1.11\n # behavior\n my_map.unshare_mask()\n\n # Inspired by https://stackoverflow.com/questions/13242382/resampling-a-numpy-array-representing-an-image\n # Should be much faster than scipy.ndimage.map_coordinates.\n # scipy.ndimage.zoom should also be an option for speedup, but zoom dit not let me produce the exact\n # same output as map_coordinates. My bad.\n # I checked, using fitsdiff, that it gives the exact same output as the original code\n # up to and including --relative-tolerance=1e-15 for INTERPOLATE_ORDER=1.\n # It was actually quite a hassle to get the same output and the fill_value is essential\n # in interp1d. However, for some unit tests, grid.shape=(1,1) and then it will break\n # with \"ValueError: x and y arrays must have at least 2 entries\". So in that case\n # map_coordinates should be used.\n\n if INTERPOLATE_ORDER==1 and grid.shape[0]>1 and grid.shape[1]>1:\n x_initial = numpy.linspace(0., grid.shape[0]-1, grid.shape[0], endpoint=True)\n y_initial = numpy.linspace(0., grid.shape[1]-1, grid.shape[1], endpoint=True)\n x_sought = numpy.linspace(-0.5, -0.5 + xratio, my_xdim, endpoint=True)\n y_sought = numpy.linspace(-0.5, -0.5 + yratio, my_ydim, endpoint=True)\n\n primary_interpolation = interp1d(y_initial, grid, kind='slinear', assume_sorted=True,\n axis=1, copy=False, bounds_error=False,\n fill_value=(grid[:, 0], grid[:, -1]))\n transposed = primary_interpolation(y_sought).T\n\n perpendicular_interpolation = interp1d(x_initial, transposed, kind='slinear', assume_sorted=True,\n axis=1, copy=False, bounds_error=False,\n fill_value=(transposed[:, 0], transposed[:, -1]))\n my_map[useful_chunk[0]] = perpendicular_interpolation(x_sought).T\n else:\n slicex = slice(-0.5, -0.5 + xratio, 1j * my_xdim)\n slicey = slice(-0.5, -0.5 + yratio, 1j * my_ydim)\n my_map[useful_chunk[0]] = ndimage.map_coordinates(\n grid, numpy.mgrid[slicex, slicey],\n mode='nearest', order=INTERPOLATE_ORDER)\n\n # If the input grid was entirely masked, then the output map must\n # also be masked: there's no useful data here. We don't search for\n # sources on a masked background/RMS, so this data will be cleanly\n # skipped by the rest of the sourcefinder\n if numpy.ma.getmask(grid).all():\n my_map.mask = True\n elif roundup:\n # In some cases, the spline interpolation may produce values\n # lower than the minimum value in the map. If required, these\n # can be trimmed off. No point doing this if the map is already\n # fully masked, though.\n my_map = numpy.ma.MaskedArray(\n data=numpy.where(\n my_map >= numpy.min(grid), my_map, numpy.min(grid)),\n mask=my_map.mask\n )\n return my_map", "title": "" }, { "docid": "dd33e2d54d9051adc668db468265e837", "score": "0.5263204", "text": "def change_array_size(data,newsize):\r\n dim=data.shape\r\n if dim[0]!=newsize[0] or dim[1]!=newsize[1] or dim[2]!=newsize[2]:\r\n if dim[0]!=newsize[0]:\r\n pad_size=int(abs((dim[0]-newsize[0])/2))\r\n if dim[0]<newsize[0]:\r\n data=np.pad(data, ((pad_size, pad_size), (0, 0), (0, 0),(0,0)), 'constant')\r\n else:\r\n data=data[pad_size:dim[0]-pad_size,:,:,:]\r\n if dim[1]!=newsize[1]:\r\n pad_size=int(abs((dim[1]-newsize[1])/2))\r\n if dim[1]<newsize[1]:\r\n data=np.pad(data, ((0, 0), (pad_size, pad_size), (0, 0),(0,0)), 'constant')\r\n else:\r\n data=data[:,pad_size:dim[1]-pad_size,:,:]\r\n if dim[2]!=newsize[2]:\r\n pad_size=int(abs((dim[2]-newsize[2])/2))\r\n if dim[2]<newsize[2]:\r\n data=np.pad(data, ((0, 0), (0, 0), (pad_size, pad_size),(0,0)), 'constant')\r\n else:\r\n data=data[:,:,pad_size:dim[2]-pad_size,:]\r\n return data", "title": "" }, { "docid": "a556e59075b3bffd5b1b00c31638467b", "score": "0.5259523", "text": "def resizeROIS(self, origSize, newSize):\n newROIS = []\n \n ox, oy = origSize\n nx, ny = newSize\n xp = float(ox) / nx\n yp = float(oy) / ny\n \n for ROI in self.ROIS:\n nROI = []\n for pt in ROI:\n nROI.append ( (pt[0]*xp, pt[1]*yp) )\n newROIS.append ( ROI )\n\n return newROIS", "title": "" }, { "docid": "cd502ce7bc22d8551b446fd5738fb94c", "score": "0.5259037", "text": "def pad_image(arr, max_size=400):\n dim = np.max(arr.shape)\n img = np.zeros((dim, dim, 3), dtype=arr.dtype)\n xl = (dim - arr.shape[0]) // 2\n yl = (dim - arr.shape[1]) // 2\n img[xl:arr.shape[0]+xl, yl:arr.shape[1]+yl, :] = arr\n return resample_image(img, max_size=max_size)", "title": "" }, { "docid": "df92f77ebc1735d674cad066238214c9", "score": "0.52557075", "text": "def imresize(im,sz):\n pil_im = Image.fromarray(uint8(im))\n return np.array(pil_im.resize(sz))", "title": "" }, { "docid": "116184bc57444a569a143c2e482fd5e2", "score": "0.5248821", "text": "def autoscale(self, A):\n A = np.asanyarray(A)\n self.vmin = A.min()\n self.vmax = A.max()", "title": "" }, { "docid": "8608156437a4a9589d6c63969208e1a7", "score": "0.52430147", "text": "def resample(self, data: nib.nifti1.Nifti1Image) -> nib.nifti1.Nifti1Image:\n return resample_img(data, target_affine=self.affine,\n target_shape=self.shape, interpolation='nearest')", "title": "" }, { "docid": "5fff310e52e76cf1889a602cb67321a2", "score": "0.5241225", "text": "def ImageRescale1(array):\n maxa = array.max()\n mina = array.min()\n volume = 1.0*(array-mina)/(maxa-mina)\n return volume.astype(float)", "title": "" } ]
888e108f89e839d82cd32a669358aaf5
Format a dictionary representation of an SFF read as text.
[ { "docid": "07a1c0d96f1f963310d63f3555444875", "score": "0.0", "text": "def format_read(read):\n out = StringIO()\n out.write(format_read_header(read))\n out.write(format_read_data(read, read))\n return out.getvalue()", "title": "" } ]
[ { "docid": "c9f0c13491df0c21bcff05ccb5df1645", "score": "0.5651693", "text": "def dump_text(filename,the_dict):\n with open(filename,'w') as f:\n for the_temp in the_dict['temps'].values():\n f.write(\"{:d}\\n\".format(the_temp))", "title": "" }, { "docid": "4849649e7d40ae842b673a923251ea6b", "score": "0.5571337", "text": "def set_formatted_fields(self):\n #set the field info for this, to be shown on the web page\n s = \"\"\n for h in self.fields:\n mydict = h\n for mykey, myval in mydict.items():\n s = s + \" \" + mykey + \" : \" + myval\n if self.get_unit(mykey):\n s = s + ' ('+self.get_unit(mykey)+' '+self.get_scale(mykey)+' '+\\\n self.get_eventness(mykey)+') '\n mydt = self.get_datatype(mykey)\n if mydt:\n s = s + \" \" + mydt\n s = s + \"<br/>\"\n self.formatted_fields = s", "title": "" }, { "docid": "b7986ec94c2d0507d62faa0e7359c5c1", "score": "0.5507285", "text": "def _dict2str(self, d):\r\n return u''.join([u\"%s: '%s'\\n\" % (k, v) for k, v in d.items()])", "title": "" }, { "docid": "4d1706608d1fdccde779f730b07bc8b2", "score": "0.5487229", "text": "def format_to_printable_dict(self) -> dict:\n return format_dict_to_printable_dict(dataclasses.asdict(self))", "title": "" }, { "docid": "7690b04c0c145bb31735de9d833c993d", "score": "0.54436475", "text": "def dictstring(what,text='',filter=[],njust=35):\n def asString(v):\n txt = ''\n fmt0 = '{:8.6g} '\n fmt1 = '{} '\n fmt2 = '{:s} '\n if isinstance(v,bool):\n txt += fmt1.format(v)\n elif isinstance(v,str):\n txt += fmt2.format(v)\n elif isinstance(v,float) or isinstance(v,int) or isinstance(v,complex):\n txt += fmt0.format(v)\n else:\n txt += fmt1.format(v)\n return txt\n\n template = '=============================================='\n res = ''\n lt = len(template)\n lx = len(text)\n p1 = int((lt-lx)/2)\n p2 = int((lt+lx)/2)\n if p1 < 0:\n ueberschrift = text\n else:\n ueberschrift = template[:p1]+' {} '.format(text)+template[p2:]\n # print(' '+ueberschrift)\n res+= ' {}\\n'.format(ueberschrift)\n\n fmt = '{:>'+'{}s'.format(njust)+'} : '\n for k,v in sorted(what.items()):\n if k in filter:\n continue\n vars = ''\n if isinstance(v,tuple):\n for i in v: vars += asString(i)\n else:\n vars += asString(v)\n # print(fmt.format(k)+vars)\n res+=fmt.format(k)+'{}\\n'.format(vars)\n return res", "title": "" }, { "docid": "c2c75ea38c0b6c7035a68b74cb6052a6", "score": "0.53996456", "text": "def convert_to_text(DFA_dic, finals):\n first_row = \"State,\\ta,\\tb\"\n rows = [first_row]\n\n for state, transitions in DFA_dic.items():\n if is_final(state, finals):\n row = state + \"*\\t\"\n else:\n row = state + \"\\t\"\n\n for trans_state in transitions.values():\n row += trans_state + \"\\t\"\n rows.append(row)\n\n return \"\\n\".join(rows)", "title": "" }, { "docid": "783860bc7ef3acdb8bbd4b7e870ee95b", "score": "0.5381522", "text": "def to_txt(self, format: str = \"twd\") -> str:\n return {\n \"twd\": self._to_txt_twd,\n \"jol\": self._to_txt_jol,\n \"lackey\": self._to_txt_lackey,\n }.get(format, self._to_txt_twd)()", "title": "" }, { "docid": "04536509286418fb13bf1c4667b19211", "score": "0.52782553", "text": "def get_sfnt_dict(font):\n return { k: v for l, k, v in font.sfnt_names }", "title": "" }, { "docid": "b65917c391de163e84c862ee2122bdbe", "score": "0.524089", "text": "def dict_to_snakefile(cmds_dict, sf_dict):\n sf_out = ''\n for rn in sf_dict:\n sf_rule = 'rule {}:\\n'.format(rn)\n for rn2 in sf_dict[rn]:\n sf_rule += '\\t{}:\\n'.format(rn2)\n for l in sf_dict[rn][rn2]:\n if l:\n if type(sf_dict[rn][rn2]) is dict:\n sf_rule += '\\t\\t{k}=\\'{v}\\',\\n'.format(k=l, v=sf_dict[rn][rn2][l].replace('\\'', '\\\\\\''))\n elif type(l) is str:\n sf_rule += '\\t\\t\\'{}\\'\\n'.format(l.replace('\\'', '\\\\\\''))\n elif type(l) is int:\n sf_rule += '\\t\\t{}\\n'.format(l)\n sf_out += sf_rule\n sf_out += '\\tshell:\\n\\t\\t\\'\\'\\'\\n\\t\\t'\n sf_out += 'echo [$(date +%Y-%m-%d_%H:%M:%S)] started pipeline {}\\n\\t\\t'.format(rn)\n for l in cmds_dict[rn]:\n if l:\n if ' > ' in l:\n sf_out += '{} 2>> {{log}}\\n\\t\\t'.format(l)\n # sf_out += 'echo $({} 2>&1 ) >> {{log}} 2>&1\\n\\t\\t'.format(l)\n else:\n sf_out += '{} >> {{log}} 2>&1\\n\\t\\t'.format(l)\n sf_out += 'echo [$(date +%Y-%m-%d_%H:%M:%S)] finished pipeline {}\\n\\t\\t'.format(rn)\n sf_out += '\\'\\'\\'\\n\\n'\n return sf_out", "title": "" }, { "docid": "f6a1d44dc9ad0bf8fe988d3b81375340", "score": "0.5232041", "text": "def format_segment(seg):\n output_dict = {}\n output_dict[\"speakerInfo\"] = seg.speaker\n output_dict[\"startTimeSec\"] = float(seg.start)\n output_dict[\"endTimeSec\"] = float(seg.stop)\n output_dict[\"genderInfo\"] = {\"gender\": seg.label.split(\",\")[-1].replace(\">\", \"\")}\n output_dict[\"transcript\"] = seg.text\n output_dict[\"confidence\"] = seg.confidence\n\n if len(seg.formatted_text) > 0:\n output_dict[\"formatted_transcript\"] = seg.formatted_text\n\n return json.dumps(output_dict, ensure_ascii=True)", "title": "" }, { "docid": "65ee70e222e77c6f5a4d3a7b3c30bfea", "score": "0.5227473", "text": "def printdict(dictionary, w=100, fmt='%r', sort=True, max_v_lines=6):\n print(strdict(dictionary, w=w, fmt=fmt, sort=sort, max_v_lines=max_v_lines))", "title": "" }, { "docid": "62363f4ebf91cb03c9b598e64f075329", "score": "0.5193443", "text": "def sv_dictfile(filename, st):\n # Allowed types: dict\n if not isinstance(st, dict):\n return None\n\n # Retrieve list of keys\n kClist = st.keys()\n with open(filename, 'w') as f:\n for kC in kClist: # Key Camera\n kDlist = st[kC].keys()\n string = \"\"\n for kD in kDlist: # Key Direction ('en', 'ex')\n ptList = st[kC][kD]\n for pt in ptList: # for each point\n for x in pt:\n if kD == \"en\":\n x = x\n else:\n x = -x\n string += str(int(x)) + \" \"\n string += \"1 -1\\n\"\n print(string)\n f.write(string)", "title": "" }, { "docid": "21140eb692076d3631293804227a9564", "score": "0.5138427", "text": "def format_dict_to_printable_dict(arg: dict) -> dict:\n clean_dict = {}\n for k, v in arg.items():\n try:\n if isinstance(v, dict):\n clean_dict[k] = str(v)\n continue\n elif isinstance(v, list):\n clean_dict[k] = ' '.join([format_to_str(item)\n for item in v])\n else:\n clean_dict[k] = format_to_str(v)\n except TypeError:\n clean_dict[k] = str(v) # see what happens? yolo\n pass\n\n return clean_dict", "title": "" }, { "docid": "4a1b30e1df7c68f920769101372402c5", "score": "0.51327795", "text": "def format_input_text(self, inputs):\n\n return \", \".join([ \"{}={}\".format(k, self.print_var(v)) for (k, v) in inputs.items() ])", "title": "" }, { "docid": "147095c909640ba09f3f2b23f907db11", "score": "0.511361", "text": "def format_map(self, mapping):\n 讲完dict再讲这个", "title": "" }, { "docid": "c596fefc3d31a0c2e2855d13f3e4e347", "score": "0.51094246", "text": "def dumps(self): # pragma: no cover\n lines = list()\n for key, values in self.todict().items():\n head = key + \":\"\n lines.append(f\"{head:<15}\" + \"; \".join(str(x) for x in values))\n return \"\\n\".join(lines)", "title": "" }, { "docid": "4dcbf59c066b162d99fe234ef9a878a5", "score": "0.51074404", "text": "def _print_dict_as_table(self, data, title = None):\n\n for k in get_val_types():\n if k in data:\n data[k] = conv_to_str(data[k], k)\n\n headers = list(data.keys())\n table = text_tables.TRexTextTable('' if title is None else title)\n table.header(headers)\n\n table_data = []\n for h in headers:\n h_value = data.get(h, '-')\n table_data.append(str(h_value))\n\n table.add_row(table_data)\n\n table.set_cols_align(['c'] * len(headers))\n table.set_cols_width([max(len(h), len(str(d))) for h, d in zip(headers, table_data)])\n table.set_cols_dtype(['a'] * len(headers))\n\n text_tables.print_table_with_header(table, table.title, buffer = sys.stdout)", "title": "" }, { "docid": "318ae47d3273ce49845afefb4affdc10", "score": "0.50894916", "text": "def safe_format_file (s, mapping, fpath):\n\tfile_map = default_file_format_mapping (fpath)\n\tfile_map.update (mapping)\n\treturn safe_format (s, file_map)", "title": "" }, { "docid": "27e0d28f62795b645be6190e00826a1a", "score": "0.508708", "text": "def str_format(text, data, enc_char):\n for key in data:\n val = data[key]\n text = text.replace('{}{}{}'.format(enc_char, str(key), enc_char),\n str(val))\n return text", "title": "" }, { "docid": "19c11e7a9d57e0c3ec876eccc2b37885", "score": "0.5071258", "text": "def _format_dictionary(dict_to_format):\n if len(dict_to_format) == 0:\n return 'Empty result'\n\n for item in dict_to_format.values():\n if isinstance(item, (list, dict, tuple)):\n #Some of the values of the dictionary are list or dict,\n #So we can't show this as a simple table.\n return _format_object_as_tree(dict_to_format)\n \n result = ''\n \n terminal_width, terminal_height = utils.terminal_size()\n columns_width = (terminal_width - 9) / 2\n keys_column_width = columns_width\n values_column_width = columns_width\n \n if columns_width > 26: #Max width for keys column\n keys_column_width = 26\n values_column_width = terminal_width - 9 - keys_column_width\n \n table_border = ANSICOLORS.FBLUE % (' +' + \\\n '='*(keys_column_width) + '+' + \\\n '='*(values_column_width) + '+' + '\\n')\n if not keys_column_width%2 == 0:\n #Ensure that the widths are even.\n keys_column_width = keys_column_width -1\n values_column_width = values_column_width + 1\n \n table_header = table_border\n table_header += ANSICOLORS.FBLUE % (' |' + (' '*(keys_column_width/2-2)) + \\\n 'KEYS' + \\\n (' '*(keys_column_width/2-2)) +\\\n '|' + (' '*(values_column_width/2-3)) + \\\n 'VALUES' + \\\n (' '*(values_column_width/2-3)) +\\\n '|' + '\\n' )\n table_header += table_border \n result += table_header\n \n line = ANSICOLORS.FBLUE % ' | '\n for key in dict_to_format:\n value = dict_to_format[key]\n #Ensure that our keys and values are strings.\n key = str(key)\n if not isinstance(value, basestring):\n value = str(value)\n \n #Printing in KEYS column.\n #-2 is for those extra spaces between and after text in the column.\n if len(key) > keys_column_width - 2:\n key = key[:keys_column_width - 5] + '...'\n line += ('%-{0}s'.format(keys_column_width-2) % key) + \\\n ANSICOLORS.FBLUE % ' | '\n \n #Printing in VALUES column.\n if len(value) > values_column_width - 2:\n value = value[:values_column_width - 5] + '...'\n line += ('%-{0}s'.format(values_column_width-2) % value) + \\\n ANSICOLORS.FBLUE % ' |'\n \n result += line + '\\n'\n line = ANSICOLORS.FBLUE % ' | '\n\n result += table_border\n return result", "title": "" }, { "docid": "6578e15eef0b2c7cb07840795e106541", "score": "0.50656486", "text": "def _format_data(histo):\n return {\"b\": histo[0], \"h\": histo[1], \"dig\": histo[2]}", "title": "" }, { "docid": "48d9108b98fae83609b8ed7168caa160", "score": "0.5059849", "text": "def format_dict(self, data: Dict, indent: int) -> str:\n ind = \" \" * indent\n fmt = ' {}\"{}\": {},'\n lines = [\n fmt.format(ind, key, self.format_metadata(value, indent + 4, key))\n for key, value in data.items()\n ]\n\n return \"{{\\n{}\\n{}}}\".format(\"\\n\".join(lines), ind)", "title": "" }, { "docid": "644182a5f2745e267d5e00ec341447f0", "score": "0.5009459", "text": "def format_dict_booking(dict_order):\n text = ''\n length = len(dict_order.keys())\n list_length = [i for i in range(length)]\n keys = list(dict_order.keys())\n values = list(dict_order.values())\n for i, key, value in zip(list_length, keys, values):\n if length == 0 : #normally this case shouldn t occur\n return ('')\n\n elif i == length-1 and value==1: # end of the dic so add nothing at the end of the sentence (already a point in the fulfilment), value==1 means no 's' at the end of the key\n text = text + str(int(value))+' '+key\n elif i == length-1 and value>=2: # end of the dic so add nothing at the end of the sentence (already a point in the fulfilment), case several so we add a 's'\n text = text + str(int(value))+' '+key.split()[0]+'s '+format_list_for_message_client(key.split()[1:]).replace(',','') #a 's' is added to pizza -> pizzas and the reste of the name of the pizza is added\n\n elif length>=2 and i==length-2 and value==1: #case there are several keys (pizzas) and we are at the second last element of the dic, we add \"et\"\n text = text + str(int(value)) +' '+ key +' et '\n elif length>=2 and i==length-2 and value>=2: #case there are several keys (pizzas) and we are at the second last element of the dic, we add \"et\"\n text = text + str(int(value)) +' '+ key.split()[0] +'s '+format_list_for_message_client(key.split()[1:]).replace(',','')+' et '\n\n elif length>=2 and i<=length-2 and value==1:\n text = text + str(int(value)) +' '+ key +', '\n elif length>=2 and i<=length-2 and value>=2:\n text = text + str(int(value)) +' '+ key.split()[0] +'s '+format_list_for_message_client(key.split()[1:]).replace(',','')+', '\n return (text)", "title": "" }, { "docid": "467d89304059e4b66913d6b13a257072", "score": "0.50063026", "text": "def show_state_dict(d):\n for key, val in d.items():\n print(f\"{key:20s}\", type(val), val.shape)", "title": "" }, { "docid": "8bc7bbe660ef2e4b244614fe530ef088", "score": "0.50002563", "text": "def convertAndPrint(f):\n g = tokenize.generate_tokens(f.readline)\n token = []\n for l in g:\n token.append(l)\n line = \"\"\n understood = True\n variables = {}\n i=0\n (t, v, _, _,_) = token[i]\n while len(token)-i > 0:\n (t, v, _, _,_) = token[i]\n line,i,understood,variables = convertToken(token,line,t,v,i,understood,variables, '')\n i += 1\n print line,", "title": "" }, { "docid": "afa84aaa2d620e444395b76b4efbda8a", "score": "0.49580988", "text": "def _print_info_dict(info_dict):\n for key, stat in info_dict.items():\n print(f\"{key:>14}: {stat}\")", "title": "" }, { "docid": "89c755027b5201a4b0706292e87e06b5", "score": "0.49502262", "text": "def dump_channel_dict_as_text(a_dict, startswith=\"\"):\r\n channel_list_method = lambda d: d.keys()\r\n channel_access_check_method = lambda d, cn: True\r\n sample_access_method = lambda d, cn: d[cn]\r\n\r\n result = format_channel_table_generic(a_dict, channel_list_method,\r\n channel_access_check_method,\r\n sample_access_method,\r\n startswith)\r\n\r\n return result", "title": "" }, { "docid": "e9e357b73e5acd00d2d677583b21a2e6", "score": "0.49391776", "text": "def format_changer(string, format_dic):\n if not isinstance(string, str):\n string = str(string)\n for key, ele in format_dic.items():\n string = string.replace(key, ele)\n return string", "title": "" }, { "docid": "43ec4d6cc95653fae58115fc3af693b0", "score": "0.49284697", "text": "def formatted(self, format):\n data = {\n \"path\": self.filename,\n \"row\": self.lineno,\n \"col\": self.charno,\n # horrible hack for visual studio code\n \"code\": f\"W{self.message_id[1:]}\",\n \"text\": f\"[{self.tool}] {self.message}\",\n }\n if self.extramessage:\n data[\"text\"] += f\" ({self.extramessage})\"\n\n return format % data", "title": "" }, { "docid": "efef2e8c01b82aaf399e715b636ac51c", "score": "0.4928106", "text": "def format_read_data(read_data, read_header):\n out = StringIO()\n out.write('\\n')\n\n out.write('Flowgram:')\n for x in read_data['flowgram_values']:\n out.write('\\t%01.2f' % (x * 0.01))\n out.write('\\n')\n\n out.write('Flow Indexes:')\n current_index = 0\n for i in read_data['flow_index_per_base']:\n current_index = current_index + i\n out.write('\\t%d' % current_index)\n out.write('\\n')\n\n out.write('Bases:\\t')\n # Roche uses 1-based indexing\n left_idx = read_header['clip_qual_left'] - 1\n right_idx = read_header['clip_qual_right'] - 1\n for i, base in enumerate(read_data['Bases']):\n if (i < left_idx) or (i > right_idx):\n out.write(base.lower())\n else:\n out.write(base.upper())\n out.write('\\n')\n\n out.write('Quality Scores:')\n for score in read_data['quality_scores']:\n out.write('\\t%d' % score)\n out.write('\\n')\n\n return out.getvalue()", "title": "" }, { "docid": "8ea02ea8c5868b3dc3d0a4c439f9af66", "score": "0.49266168", "text": "def dump_section(name,dict,f=sys.stdout):\n f.write('[%s]%s' % (name,writer.newline))\n for k,v in dict.items():\n k = str(k)\n k = k.replace('=',r'\\=') # Escape = in name.\n # Quote if necessary.\n if len(k) != len(k.strip()):\n k = '\"'+k+'\"'\n if v and len(v) != len(v.strip()):\n v = '\"'+v+'\"'\n if v is None:\n # Don't dump undefined attributes.\n continue\n else:\n s = k+'='+v\n if s[0] == '#':\n s = '\\\\' + s # Escape so not treated as comment lines.\n f.write('%s%s' % (s,writer.newline))\n f.write(writer.newline)", "title": "" }, { "docid": "5f1c3386096f14957f9407311278366f", "score": "0.49235052", "text": "def to_latex_dict(self, splitlines=True):\n latex_stings = {}\n for entry in self.values():\n string = entry.to_latex()\n if splitlines:\n string = string.splitlines()\n latex_stings[(entry.type, entry.key)] = string\n return latex_stings", "title": "" }, { "docid": "b1c6a449ce6424038df49012b511f993", "score": "0.49144575", "text": "def __str__(self):\n #TODO: Make a fancy following section order\n string = \"Printing: \" + self.file_name + \"\\n\\n\"\n for key in self.data:\n string += \"~> Key: {}\\n fr {}\\n\".format(key, self.gb2fr[key])\n for keyword in DICO_KEYS:\n if keyword in self.data[key]:\n # Specific display for CHOIX and CHOIX1\n if keyword in ['CHOIX', 'CHOIX1']:\n string += ' {} = \\n'.format(keyword)\n # Integer choix\n if isinstance(self.data[key][keyword], dict):\n for idx, comment in self.data[key][keyword].items():\n string += ' - {} : {}\\n'.format(idx, comment)\n # String choix\n else:\n for val in self.data[key][keyword]:\n string += ' - {}\\n'.format(val)\n\n\n else:\n string += \" {} = {}\\n\"\\\n .format(keyword, self.data[key][keyword])\n\n return string", "title": "" }, { "docid": "636ba19ec4c24dd012200bba18d2e81c", "score": "0.4910688", "text": "def WriteFormatTest(self, f):\n self.type_handler.WriteFormatTest(self, f)", "title": "" }, { "docid": "fcdb21aa4c270ae31c00f61b4b2a8e6a", "score": "0.49011445", "text": "def handle_dictionary(self, d):\n i = 0\n self.start_object()\n for key, value in d.iteritems():\n # self.currentLoc += key+'.'\n self.stream.write(unicode(self.currentLoc))\n i += 1\n self.handle_simple(key)\n self.stream.write(u': ')\n self.handle_object(value)\n if i != len(d):\n self.stream.write(u', ')\n self.currentLoc = self.currentLoc[0:(len(self.currentLoc)-len(key)-1)]\n self.end_object()", "title": "" }, { "docid": "359febc182caa572be77108650d0d9bd", "score": "0.48985216", "text": "def format(self, value):\n entry = self.entry(value)\n return entry[self.entry_key] if self.entry_key else entry", "title": "" }, { "docid": "f0e6641c021fba584e859c2312eb5dd9", "score": "0.48934323", "text": "def event_string_from_dict(cls, event_dict):\n kwargs = event_dict.copy()\n kwargs['fft_re'] = '; '.join(map(\"{:.2f}\".format, kwargs['fft_re']))\n kwargs.update(kwargs.pop('line'))\n kwargs['phase'] = (str(kwargs['phase'])).replace('.', ',')\n kwargs.pop('alarms')\n kwargs['fft_img'] = '; '.join(map(\"{:.2f}\".format, kwargs['fft_img']))\n kwargs.update(kwargs.pop('power'))\n kwargs['peaks'] = '; '.join(map(\"{:.3f}\".format, kwargs['peaks']))\n kwargs['utc_time'] = kwargs['utc_time'].strftime(\"%Y-%m-%-d %H:%M:%S\")\n \n return EventFactory.EVENT_STRING.format(**kwargs)", "title": "" }, { "docid": "1d21801720fe3851fb63e8ab1a64f801", "score": "0.4890314", "text": "def fsaprint(self):\n for s in self.states:\n if self.states[s].sout == -1:\n self.fsa_ofp.write(\"%s \" % self.states[s].nstate)\n continue\n word = self.eps\n word = self.states[s].c\n if self.states[s].c.find(\"+\") > -1:\n pw = self.states[s].c.split(\"+\")\n self.states[s].c = pw[0]\n word = pw[1]\n if self.states[s].c == self.match:\n self.states[s].c = self.eps\n\n sym, weight = self._split_token(self.states[s].c)\n self.fsa_ofp.write(\"%s %s %s %s\\n\" % (self.states[s].nstate, self.states[s].sout, sym, weight))\n if not self.states[s].sout2 == None:\n self.fsa_ofp.write(\"%s %s %s %s\\n\" % (self.states[s].nstate, self.states[s].sout2, sym, weight))\n self.fsa_ofp.close()\n\n self.isyms_ofp.write(\"%s 0\\n\" % self.eps)\n for i, sym in enumerate(self.isyms):\n self.isyms_ofp.write(\"%s %d\\n\" % (sym, i + 1))\n self.isyms_ofp.close()\n return", "title": "" }, { "docid": "a593ae3fb1110dc3ff979111d493666e", "score": "0.48794106", "text": "def fileformat(dxm_state):", "title": "" }, { "docid": "e13bac9235b25b44b26d4de9d757f819", "score": "0.48749802", "text": "def format(self, record):\n\t\tpass", "title": "" }, { "docid": "08a3db6ed7ad4cf2c630c1dea5b04e5f", "score": "0.48720983", "text": "async def format_attributes(self):\n\n attributes = []\n bod = self.attributes['body']\n qui = self.attributes['quickness']\n str = self.attributes['strength']\n wil = self.attributes['willpower']\n itl = self.attributes['intelligence']\n cha = self.attributes['charisma']\n mag = self.attributes['magic']\n ess = self.attributes['essence']\n attributes.append(f\"B: {bod['base']}({bod['base'] + bod['modifier']})\")\n attributes.append(f\"Q: {qui['base']}({qui['base'] + qui['modifier']})\")\n attributes.append(f\"S: {str['base']}({str['base'] + str['modifier']})\")\n attributes.append(f\"W: {wil['base']}({wil['base'] + wil['modifier']})\")\n attributes.append(f\"I: {itl['base']}({itl['base'] + itl['modifier']})\")\n attributes.append(f\"C: {cha['base']}({cha['base'] + cha['modifier']})\")\n attributes.append(f\"M: {mag['base']}({mag['base'] + mag['modifier']})\")\n attributes.append(f\"E: {ess['base']}({ess['base'] + ess['modifier']})\")\n\n return \" \".join(attributes)", "title": "" }, { "docid": "36b4a9bb91449a2da9755171838aa8e1", "score": "0.48658615", "text": "def output(s):\n p = Parser()\n m = MapfileToDict()\n \n ast = p.parse(s)\n #print(ast)\n d = m.transform(ast)\n #print(d)\n pp = PrettyPrinter(indent=0, newlinechar=\" \", quote=\"'\")\n return pp.pprint(d)", "title": "" }, { "docid": "b11c96a4ded5c84f77c24f50a66a3f35", "score": "0.48577264", "text": "def nfvbench_input_to_str(nfvbench_input: dict) -> str:\n string = \"\"\n for key in ['user_label', 'frame_sizes', 'flow_count', 'rate', 'duration_sec']:\n if key in nfvbench_input:\n string += f\"{key}={nfvbench_input[key]} \"\n return string", "title": "" }, { "docid": "800e782e63534828fe588ce1fdc486e3", "score": "0.48480195", "text": "def _format_to_lines(params):\n f, args = params\n print(f)\n source, tgt = load_json(f, args.lower)\n return {'src': source, 'tgt': tgt}", "title": "" }, { "docid": "abfe77a81751ade645aa68047b2a5621", "score": "0.48442638", "text": "def WriteUnitConversionDict(self, f_name=\"\"):\n if f_name == \"\": f_name = \"unit_dict.py\"\n else: f_name += \"_unit_dict.py\"\n unit_dict_py = unit_dict_hdr.format(f_name)\n modxlabel = modxlabel_hdr\n if_str = 'if'\n # First writed out all the conversions\n for k, v in list(self._conv_dict.items()): \n unit_dict_py += unit_dict_entry.format(k, \n \"$\\\\\\\\rm {}$\".format(k), \n v, \n 'float', \n \"\",\n \"\",\n )\n # Make conversion dictionary \n for p in self._sim_params:\n dim_units = \"\"\n dim_conv = 1.0\n name, entry_dict = list(p.items())[0]\n # Get conversions to dimensionful units if param has dimentsions\n if entry_dict['dim']:\n # Loop over units and apply power\n for d, u in zip(entry_dict['dim'], entry_dict['units']):\n dim_units += \"$\\\\\\\\rm {}$ \".format(u) if d == 1 else \"$\\\\\\\\rm {}^{{{}}}$ \".format(u,int(d))\n dim_conv *= np.power(self._conv_dict[u], d)\n dim_units = dim_units[:-1] # Get rid of trailing space\n # Add entry to unit dictionary\n unit_dict_py += unit_dict_entry.format(entry_dict['shortcut'],\n dim_units,\n dim_conv, \n entry_dict['type'],\n entry_dict['symbol'],\n name,\n )\n # Add parenthesis around dimensions if they exist\n if entry_dict['dim']:\n dim_units = \"({})\".format(dim_units)\n # Add modify label entry to modxlabel function\n modxlabel += modxlabel_entry.format(if_str, \n entry_dict['shortcut'],\n name,\n entry_dict['symbol'],\n dim_units,\n )\n # Change if_str after one call\n if if_str == 'if': \n if_str = 'elif' \n # Put all pieces of UnitConversionDict together\n unit_dict_py += unit_dict_ftr + modxlabel + modxlabel_ftr\n # Write out file\n with open(f_name, 'w') as outfile:\n outfile.write(unit_dict_py)\n return", "title": "" }, { "docid": "a996e7c1d5808d8f66bddad614777b02", "score": "0.48424968", "text": "def dump_finish(dct, output_file, format, show_system_info, script_name,\n silent = False,\n key = \"system\",\n build_system_info_func = build_system_info):\n from mlflow_tools.display import dump_dct, write_dct\n if show_system_info:\n dct = { **{ key: build_system_info_func(script_name)}, **dct }\n if not silent:\n dump_dct(dct, format)\n if output_file and len(output_file) > 0:\n write_dct(dct, output_file, format)\n return dct", "title": "" }, { "docid": "c7c81073fda8f554cd8c74b3e563476b", "score": "0.48395815", "text": "def formatted_report(self, report_dict, margin='', inc_name=True):\n if inc_name:\n text = _MAGENTA + \"\\t%s\" % self.name + _RESET + '\\n'\n else:\n text = \"\"\n report_dict = sorted(report_dict.items(), key=lambda x: str(x[0]))\n for i, (key, value) in enumerate(report_dict):\n # ====== check value of key and value ====== #\n key = margin + str(key).replace('\\n', ' ')\n # ====== special cases ====== #\n if \"confusionmatrix\" in key.lower():\n value = print_confusion(value)\n else:\n value = str(value)\n # ====== multiple lines or not ====== #\n if '\\n' in value:\n text += _YELLOW + key + _RESET + \":\\n\"\n for line in value.split('\\n'):\n text += margin + ' ' + line + '\\n'\n else:\n text += _YELLOW + key + _RESET + \": \" + value + \"\\n\"\n return text[:-1]", "title": "" }, { "docid": "be57fa1c6e4866b263555835885bd2ca", "score": "0.48324454", "text": "def print_dict(title, dictionary):\n print(\"\\n### {} ###\".format(title))\n for acronym, metric_value in dictionary:\n print(\"{}: {}\".format(acronym, metric_value)) # Debug", "title": "" }, { "docid": "e2a99afee1020b3b07b87b325624bd61", "score": "0.48285696", "text": "def __str__(self):\n string = \"\"\"\nMEASUREMENTS OF CURRENT LEAF\n\nPATH {0}\n\nCM SCALE VERTICAL {1}\n HORIZONTAL {2}\n\nOTSU {3}\n\nPERIMETER CENTIMETERS {4}\n\nLENGTH CENTIMETERS {5}\n\nARRAY FILES PATH {6}\"\"\".format(\n self.data_dict['path'],\n self.data_dict['v_cm'],\n self.data_dict['h_cm'],\n self.data_dict['otsu'],\n self.data_dict['p'],\n self.data_dict['length'],\n self.data_dict['array_files'],\n )\n\n return string", "title": "" }, { "docid": "bcf0dc03d60c58dac93a5902cfdff863", "score": "0.48258525", "text": "def formatstring(self, elfclass):\n\n\t\tif elfclass == ELFCLASS32:\n\t\t\tn = 2\n\t\telse:\n\t\t\tn = 3\n\t\treturn \"\".join(map (lambda t: t[n], self.fields))", "title": "" }, { "docid": "c99588ce1a08e904bf088fab59fd8179", "score": "0.48170868", "text": "def print(self):\n\n for key, value in self.items():\n s = str(value).strip()\n print(\"%s = { %s } \" % (key, s))", "title": "" }, { "docid": "7e6f82b59f436492b2915256044ed7a3", "score": "0.48133737", "text": "def format(self, header=\"\", compact=True) -> str:\n lines = [\n \"\",\n f\"{header:=^140}\",\n ]\n\n for k, v in super().items():\n lines.append(\"\")\n lines.append(f\"{' ' + k + ' ':-^140}\")\n lines.append(pformat(v, width=120, compact=compact))\n\n lines.append(\"\")\n lines.append(\"=\" * 140)\n\n return \"\\n\".join(lines)", "title": "" }, { "docid": "0fc4a2131749c1ebc1dd836e92043318", "score": "0.48123103", "text": "def structure_to_print(residues):\n\n for res in residues:\n if res.struct[\"S\"]:\n res.struct[\"STRC\"] = \"S\"\n if res.struct[\"T\"]:\n res.struct[\"STRC\"] = \"T\"\n if res.struct[\"I\"]:\n res.struct[\"STRC\"] = \"I\"\n if res.struct[\"G\"]:\n res.struct[\"STRC\"] = \"G\"\n if res.struct[\"B\"]:\n res.struct[\"STRC\"] = \"B\"\n if res.struct[\"E\"]:\n res.struct[\"STRC\"] = \"E\"\n if res.struct[\"H\"]:\n res.struct[\"STRC\"] = \"H\"", "title": "" }, { "docid": "a0e052498c0fcdfb9f605bb978eba138", "score": "0.4811445", "text": "def print_dict(d):\n for k in d:\n if k.find('align') == -1:\n print(k + ':\\t' + str(d[k]))\n else:\n print(d[k])\n print()", "title": "" }, { "docid": "0244abbe21a81b79110519b54c321429", "score": "0.480837", "text": "def __format__(self, format_spec):\n try:\n return format(self.sval, format_spec)\n except ValueError:\n return super().__format__(format_spec)", "title": "" }, { "docid": "cbf3c9db168290b7fb7777705f43267a", "score": "0.4805382", "text": "def formats_h():", "title": "" }, { "docid": "06eed0aa590e35636427986f893d7b24", "score": "0.48053223", "text": "def render_text(self, outfd, data):\n\n # Summary file object\n summaryfo = None\n summaryinfo = data\n\n if self._config.DUMP_DIR == None:\n debug.error(\"Please specify a dump directory (--dump-dir)\")\n if not os.path.isdir(self._config.DUMP_DIR):\n debug.error(self._config.DUMP_DIR + \" is not a directory\")\n\n if self._config.SUMMARY_FILE:\n summaryfo = open(self._config.SUMMARY_FILE, 'wb')\n\n for summaryinfo in data:\n\n if summaryinfo['type'] == \"DataSectionObject\":\n\n outfd.write(\"DataSectionObject {0:#010x} {1:<6} {2}\\n\".format(summaryinfo['fobj'], summaryinfo['pid'], summaryinfo['name']))\n if len(summaryinfo['present']) == 0:\n continue\n\n of = open(summaryinfo['ofpath'], 'wb')\n\n for mdata in summaryinfo['present']:\n rdata = None\n if not mdata[0]:\n continue\n\n try:\n rdata = self.kaddr_space.base.read(mdata[0], mdata[2])\n except (IOError, OverflowError):\n debug.debug(\"IOError: Pid: {0} File: {1} PhysAddr: {2} Size: {3}\".format(summaryinfo['pid'], summaryinfo['name'], mdata[0], mdata[2]))\n\n if not rdata:\n continue\n\n of.seek(mdata[1])\n of.write(rdata)\n continue\n # XXX Verify FileOffsets\n #for zpad in summaryinfo['pad']:\n # of.seek(zpad[0])\n # of.write(\"\\0\" * zpad[1])\n\n if self._config.SUMMARY_FILE:\n json.dump(summaryinfo, summaryfo)\n of.close()\n\n elif summaryinfo['type'] == \"ImageSectionObject\":\n outfd.write(\"ImageSectionObject {0:#010x} {1:<6} {2}\\n\".format(summaryinfo['fobj'], summaryinfo['pid'], summaryinfo['name']))\n\n if len(summaryinfo['present']) == 0:\n continue\n\n of = open(summaryinfo['ofpath'], 'wb')\n\n for mdata in summaryinfo['present']:\n rdata = None\n if not mdata[0]:\n continue\n\n try:\n rdata = self.kaddr_space.base.read(mdata[0], mdata[2])\n except (IOError, OverflowError):\n debug.debug(\"IOError: Pid: {0} File: {1} PhysAddr: {2} Size: {3}\".format(summaryinfo['pid'], summaryinfo['name'], mdata[0], mdata[2]))\n\n if not rdata:\n continue\n\n of.seek(mdata[1])\n of.write(rdata)\n continue\n\n # XXX Verify FileOffsets\n #for zpad in summaryinfo['pad']:\n # print \"ZPAD 0x%x\"%(zpad[0])\n # of.seek(zpad[0])\n # of.write(\"\\0\" * zpad[1])\n\n if self._config.SUMMARY_FILE:\n json.dump(summaryinfo, summaryfo)\n of.close()\n\n elif summaryinfo['type'] == \"SharedCacheMap\":\n\n outfd.write(\"SharedCacheMap {0:#010x} {1:<6} {2}\\n\".format(summaryinfo['fobj'], summaryinfo['pid'], summaryinfo['name']))\n of = open(summaryinfo['ofpath'], 'wb')\n for vacb in summaryinfo['vacbary']:\n if not vacb:\n continue\n (rdata, mdata, zpad) = self.audited_read_bytes(self.kaddr_space, vacb['baseaddr'], vacb['size'], True)\n ### We need to update the mdata,zpad\n if rdata:\n try:\n of.seek(vacb['foffset'])\n of.write(rdata)\n except IOError:\n # TODO: Handle things like write errors (not enough disk space, etc)\n continue\n vacb['present'] = mdata\n vacb['pad'] = zpad\n\n if self._config.SUMMARY_FILE:\n json.dump(summaryinfo, summaryfo)\n of.close()\n\n else:\n return\n if self._config.SUMMARY_FILE:\n summaryfo.close()", "title": "" }, { "docid": "de60758ce271e6df8b07a0016429a96f", "score": "0.48011398", "text": "def emit(self, record):\n return self._dict_formatter.format(record)", "title": "" }, { "docid": "a82ccf8951523bde2036a8ad70da6305", "score": "0.47892973", "text": "def fmt(dictionary, prop) -> dict:\n\n dictionary['meta'] = {\n 'id': uuid.uuid1().hex,\n 'timestamp': int(time.time())\n }\n\n # Would be nice to have a better way of formatting this to utc\n dictionary['created_at'] = dictionary['datetime'] + 'Z'\n dictionary['sensor'] = dictionary['feature']\n dictionary['feature'] = prop\n dictionary['value'] = dictionary['results'][prop]\n\n return dictionary", "title": "" }, { "docid": "a2d33bd1e67d4b48703be0f90731d796", "score": "0.47850809", "text": "def __str__(self):\n ret_str = \"Dict vars:\\n\"\n for k, v in self.__dict__.items():\n ret_str += \"%30s => %-30s %s\\n\" % (str(k), str(v), str(type(v)))\n # ret_str += PathSet.states_to_str(self.states, self.direction)\n return ret_str", "title": "" }, { "docid": "cd85d7b113b867fe56737748507646c3", "score": "0.47844714", "text": "def formatted(self, fmt=None, types=False):\n if fmt is None:\n fmt = self.str_fmt\n\n items = list(self.items())\n items.sort()\n\n klens = []\n vlens = []\n for i, (k, v) in enumerate(items):\n lenk = len(str(k))\n if types:\n v = type(v).__name__\n lenv = len(str(v))\n items[i] = (k, v)\n klens.append(lenk)\n vlens.append(lenv)\n\n klen = min(20, max(klens))\n vlen = min(40, max(vlens))\n slist = [fmt.format(key, value, klen=klen, vlen=vlen) for key, value in items]\n return \"\".join(slist)", "title": "" }, { "docid": "35f8c8741801473bfc97541e2f558705", "score": "0.47710258", "text": "def toDetailedString(self):\n dataDict = self.__dict__\n ll = ['{']\n for ss in self.fieldNames:\n val = getattr(self,ss)\n if isinstance(val, frozenset) or isinstance(val, tuple):\n ll.append(\"'%s': <collection of length %s>,\\n\" % (ss, len(val)))\n else: \n ll.append(\"'%s':%s,\\n\" % (ss, val))\n ll.append('}')\n #\n result = ''.join(ll)\n return result", "title": "" }, { "docid": "561309740abdca4b693492e6d6d26d39", "score": "0.47670078", "text": "def describe(self, tr: dict) -> str:\n description = \"\"\n for (k, v) in tr.items():\n description += f'{k}: {v}\\n'\n\n return description", "title": "" }, { "docid": "85de726425bd943c0ea7b95de4cd17f1", "score": "0.47655347", "text": "def dictToStr(self, dic):\n return map(lambda it: \":\".join(it), dic.items())", "title": "" }, { "docid": "ea59d5c1a03ae560185bf1622b9894b6", "score": "0.47634137", "text": "def text_output_dict_grabbed(self):\n\n print(\"Prnted text output\")", "title": "" }, { "docid": "803628970090e14efb1cb7dd1da5af15", "score": "0.47629228", "text": "def print_dict(dictionary):\n for key,value in dictionary.items():\n print('{:20}: {:20}'.format(key.title(),str(value)))", "title": "" }, { "docid": "97eb74da11fca5d27383f4d942ba3852", "score": "0.4760368", "text": "def format_variant(self, variant):\n\n def get_value(variant, category, member):\n \"\"\"Return the correct value from the variant according to rules in config parser.\n vcf_fiels can be one of the following[CHROM, POS, ID, REF, ALT, QUAL, INFO, FORMAT, individual, other]\"\"\"\n # If information is on the core we can access it directly through the vcf key\n value = None\n # In this case we read straight from the vcf line\n if self.config_object[member]['vcf_field'] not in ['INFO', 'FORMAT', 'other', 'individual']:\n value = variant[self.config_object[member]['vcf_field']]\n\n # In this case we need to check the info dictionary:\n elif self.config_object[member]['vcf_field'] == 'INFO':\n value = variant['info_dict'].get(self.config_object[member]['vcf_info_key'], None)\n\n # Check if we should return a list:\n if value and self.config_object[member]['vcf_data_field_number'] != '1':\n value = value.split(self.config_object[member]['vcf_data_field_separator'])\n return value\n\n formated_variant = {}\n formated_variant['id'] = variant['variant_id']\n for category in self.config_object.categories:\n for member in self.config_object.categories[category]:\n if category != 'config_info':\n formated_variant[self.config_object[member]['internal_record_key']] = get_value(variant, category, member)\n\n return formated_variant", "title": "" }, { "docid": "7a278383c280225a4b0450324f3c903d", "score": "0.4757642", "text": "def format_print(self, val):\r\n\r\n output = \"\"\r\n header = list(val[\"header\"])\r\n data = val[\"data\"]\r\n\r\n max_len = [len(str(a)) for a in header]\r\n for para_tuple in data:\r\n max_len = [max_len[i] if max_len[i] > len(str(para_tuple[i])) else len(str(para_tuple[i]))\r\n for i in range(len(max_len))]\r\n generate_format = \"\"\r\n for len_str in max_len:\r\n generate_format += \"%-\" + str(len_str + 2) + \"s\"\r\n generate_format += \"\\n\"\r\n\r\n output += generate_format % tuple(header)\r\n for para_tuple in data:\r\n output += generate_format % tuple(para_tuple)\r\n return output", "title": "" }, { "docid": "1becdc193ea46e763efc222225119dfb", "score": "0.4757116", "text": "def format_map(self, mapping): # real signature unknown; restored from __doc__\n return \"\"", "title": "" }, { "docid": "af22a91fcc597e495f9dfd0a609aa48d", "score": "0.47532234", "text": "def __str__(self):\n items = ['{!r}: {!r}'.format(key, val) for key, val in self.items()]\n return '{' + ', '.join(items) + '}'", "title": "" }, { "docid": "af22a91fcc597e495f9dfd0a609aa48d", "score": "0.47532234", "text": "def __str__(self):\n items = ['{!r}: {!r}'.format(key, val) for key, val in self.items()]\n return '{' + ', '.join(items) + '}'", "title": "" }, { "docid": "a02354d20e5c49c1b3563614a72ddbb4", "score": "0.47403353", "text": "def to_string(str_info):\n all_str_info = []\n for i in range(len(str_info[\"set_files\"])):\n b_str = \"{}: \\n\\tSpk {}\\n\\tUnt {}\\n\\tPos {}\".format(\n i, str_info[\"spike_files\"][i], str_info[\"unit\"][i],\n str_info[\"txt_files\"][i])\n all_str_info.append(b_str)\n return \"\\n\".join(all_str_info)", "title": "" }, { "docid": "af7a2dcfa3867ba9f1b32ef4f0c9a79b", "score": "0.47400913", "text": "def write(self, fp):\n for s in self._sections[0]:\n fp.write(\"[%s]\\n\" % s[0])\n for k, v in s[1][0]:\n fp.write(\"%s = %s\\n\" % (k, str(v).replace('\\n', '\\n\\t')))\n fp.write(\"\\n\")", "title": "" }, { "docid": "bdafaad8f6741d79a70c3c40ce3cbb00", "score": "0.4731807", "text": "def __repr__(self):\n\n rep = [f' \\\"{k}\\\": {v}' for k, v in self.items()]\n return '{\\n %s}' % (',\\n '.join(rep))", "title": "" }, { "docid": "db5545f33cf6de473976efd22d7f7705", "score": "0.47312436", "text": "def format(**kwargs):", "title": "" }, { "docid": "02114988393ba275c5c4ed564c6f2e10", "score": "0.47224447", "text": "def statistics_formatted(self):\n res=[]\n kl=self.statistics.keys()\n kl.sort()\n for k in kl:\n v=self.statistics[k]\n res.append(\"\\t%s\" % helper.format_element_name(k, v))\n return \"\\n\".join(res)", "title": "" }, { "docid": "cce9164b5b0ca19a1255477951ccebf9", "score": "0.47210604", "text": "def unconverted_report(self):\n lines = []\n for key, value in list(self.items()):\n if not isinstance(value, UnconvertedValue):\n continue\n lines.append(\"Field %s - %s\" % (key, value))\n return \"\\n\".join(lines)", "title": "" }, { "docid": "facf1317fe447e35d43c711dacbacfbb", "score": "0.47185612", "text": "def format_dict(d, *, style='equals') -> str:\n code_block = '```{}\\n'.format('ini' if style == 'ini' else '')\n padding = len(max(d.keys(), key=len))\n\n for name, value in d.items():\n if style == 'equals':\n code_block += '{name: <{width}} = {value}\\n'.format(name=name, width=padding, value=value)\n elif style == 'ini':\n code_block += '{name: <{width}} {value}\\n'.format(name=f'[{name}]', width=padding + 2, value=value)\n\n code_block += '```'\n return code_block", "title": "" }, { "docid": "eac71ac4c091254e118052f85223698a", "score": "0.4715765", "text": "def print_request_format():\n print \"Could not pack this request\"\n print \"Request must be a dict of format\"\n print \"KEY\\t\\t\\t\\tBYTES\"\n print \"'command':\\t\\t\\t1\"\n print \"'offset_write':\\t\\t2\"\n print \"'length_write':\\t\\t2\"\n print \"'data_write'\\t\\tn\"\n print \"'offset_read:'\\t\\t2\"\n print \"'length_read':\\t\\t2\"", "title": "" }, { "docid": "1f854eb225b17ab0963eea2045a0b01b", "score": "0.4714345", "text": "def fio_to_dict(fio_output):\n fio_output_lines = fio_output.splitlines()\n for line_num, line in enumerate(fio_output_lines):\n if line == \"{\":\n break\n else:\n logger.info(line)\n fio_parseable_output = \"\\n\".join(fio_output_lines[line_num:])\n fio_report = yaml.safe_load(fio_parseable_output)\n return fio_report", "title": "" }, { "docid": "6c2202a3f66b9a961a10d0c1cf9f4b4d", "score": "0.47087252", "text": "def format_for_output(metric_dict):\n return [\"{}: {}\\n\".format(k, v) for k, v in metric_dict.items()] + [\"\\n\"]", "title": "" }, { "docid": "bc3f3bd767eedadccd364fd5b678f047", "score": "0.47032747", "text": "def __str__(self):\n items = ['{}: {}'.format(repr(k), repr(v)) for k, v in self.items()]\n return '{' + ', '.join(items) + '}'", "title": "" }, { "docid": "c3c89676384879bc67f797300d9813e1", "score": "0.4698426", "text": "def file_format_data(self) -> str:\n return self._file_format_data", "title": "" }, { "docid": "e7c573c652a26fe311616a15eaff8f56", "score": "0.4695677", "text": "def format_(what, val):\n\n safely_removed = [r'%k', r'%c', r'%i']\n\n for key in safely_removed:\n what = what.replace(key, '')\n\n all_keys = [r'%f', '%u', r'%F', '%U']\n multi = [r'%F', '%U']\n\n for key in all_keys:\n if key in what:\n if key not in multi:\n # Acording to the spec, multiple files in single formatters\n # must be dealt with by the implementations...\n val = val.split()[0]\n return what.replace(key, val)\n\n return what", "title": "" }, { "docid": "60d16bde587b817817d95a648b2e1708", "score": "0.46905133", "text": "def input_data():\n def factory(key_value):\n return (f\"{key} {value}\\n\" for key, value in key_value.items())\n\n return factory", "title": "" }, { "docid": "6987de4b973525daabfb8aa3f56bb46c", "score": "0.46828803", "text": "def perfoutput(self):\n\n # Add quotes if label contains a space character\n label = self.label\n if re_ws.search(label):\n label = \"'\" + self.label + \"'\"\n\n out = \"%s=%s%s;%s;%s;%s;%s\" % (\n label,\n self.value,\n self._nvl(self.uom),\n self._nvl(self.warning),\n self._nvl(self.critical),\n self._nvl(self.min_data),\n self._nvl(self.max_data),\n )\n\n # omit trailing ;;\n out = re_trailing_semicolons.sub('', out)\n\n return out", "title": "" }, { "docid": "2a467eb4507c13014cb3b239036036fc", "score": "0.46794012", "text": "def __repr__(self):\n return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self.items()])", "title": "" }, { "docid": "2a467eb4507c13014cb3b239036036fc", "score": "0.46794012", "text": "def __repr__(self):\n return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self.items()])", "title": "" }, { "docid": "d3f353bd361eff9f55c5e3905439965d", "score": "0.46694568", "text": "def dump_to_string(self, cnf, **kwargs):\n pass", "title": "" }, { "docid": "714e581c2af513a8c77cd71daf7d1b3d", "score": "0.46693802", "text": "def dictfile(self):", "title": "" }, { "docid": "808a4a3d3b98c1a1e74bb1077b540228", "score": "0.4666322", "text": "def pretty_dict_repr(d):\n lines = [' {0!r}: {1!r},'.format(k, v) for (k, v) in sorted(d.items())]\n return '\\n'.join(['{'] + lines + ['}'])", "title": "" }, { "docid": "a3ed699239b04e3614d56aa8121336b9", "score": "0.46652162", "text": "def format(value):", "title": "" }, { "docid": "11427e6a3e8d3f623b8ef74b8611c6ea", "score": "0.4660958", "text": "def __str__(self):\r\n return \"%s\\tNo:%s\\tlength:%s\\tMFE:%.2f\\n%s\\n%s\" % (self.id, str(self.no), str(self.length), self.MFE, self.sequence, self.sstructure)", "title": "" }, { "docid": "2ee3c72e299626bf27263007b17cb1cc", "score": "0.4656442", "text": "def print_dict(dict_to_print, message=''):\n for k1, v1 in dict_to_print.items():\n print(f'{message} {k1}: {len(v1)} ')", "title": "" }, { "docid": "51df607feeb87a4f0b6d99396da843c7", "score": "0.4647769", "text": "def output_features(fo, X, field=''):\n for t in range(len(X)):\n if field:\n fo.write('%s' % X[t][field])\n for a in X[t]['F']:\n if isinstance(a, str):\n fo.write('\\t%s' % escape(a))\n else:\n fo.write('\\t%s:%f' % (escape(a[0]), a[1]))\n fo.write('\\n')\n fo.write('\\n')", "title": "" }, { "docid": "51df607feeb87a4f0b6d99396da843c7", "score": "0.4647769", "text": "def output_features(fo, X, field=''):\n for t in range(len(X)):\n if field:\n fo.write('%s' % X[t][field])\n for a in X[t]['F']:\n if isinstance(a, str):\n fo.write('\\t%s' % escape(a))\n else:\n fo.write('\\t%s:%f' % (escape(a[0]), a[1]))\n fo.write('\\n')\n fo.write('\\n')", "title": "" }, { "docid": "0c46a98f1f8931d0ef6ac940289b706c", "score": "0.46422628", "text": "def print_dict(self, title='IoU', save_data=False):\n total = []\n for key in self.dict.keys():\n val = self.dict[key]\n avg_val = np.average(val)\n len_val = len(val)\n std_val = np.std(val)\n\n if key in self.save_dict.keys():\n self.save_dict[key].append([avg_val, std_val])\n else:\n self.save_dict[key] = [[avg_val, std_val]]\n\n print('Activity:%s, mean %s is %0.4f, std %s is %0.4f, length of data is %d' \\\n % (key, title, avg_val, title, std_val, len_val))\n\n total.extend(val)\n\n self.dict = {}\n avg_total = np.average(total)\n len_total = len(total)\n std_total = np.std(total)\n print('\\nOverall: mean %s is %0.4f, std %s is %0.4f, length of data is %d \\n' \\\n % (title, avg_total, title, std_total, len_total))\n\n if save_data:\n print('Save %s pickle file' % title)\n with open('img/%s.pickle' % title, 'wb') as f:\n pickle.dump(self.save_dict, f)", "title": "" }, { "docid": "25fa1605064847877491b0e01c0cbef6", "score": "0.46408245", "text": "def _uniques_formatter(self, uniques):\n return [{'f': feature, 'ks': list(keys)} for feature, keys in uniques.items()]", "title": "" }, { "docid": "34f57b9eade6bf4d95453225e4b94e45", "score": "0.46378493", "text": "def convert_text_dict(self, name:str, _dict:dict):\n values = _dict['values']\n if not is_valid_text(values):\n logger.error(values)\n return\n _term = values.get(\"term\")\n _is_exact = values.get(\"is_exact\", False)\n self.term(field=name, _term=_term, is_exact=_is_exact)", "title": "" }, { "docid": "5f0e4eb3a65db3dc645b68e58ffd5639", "score": "0.46375614", "text": "def __repr__(self):\n return 'Flutt(text={!r}, timestamp={!r})'.format(self.text, self.timestamp)", "title": "" }, { "docid": "6812fd9bb5624e3a452d77677ef5cc59", "score": "0.46332446", "text": "def write_as_slf(context:Context, fname:str):\n with open(fname, 'w') as fd:\n fd.write('[Lattice]\\n')\n fd.write('{}\\n{}\\n'.format(len(context.objects), len(context.attributes)))\n fd.write('[Objects]\\n')\n fd.write('\\n'.join(map(str, context.objects)) + '\\n')\n fd.write('[Attributes]\\n')\n fd.write('\\n'.join(map(str, context.attributes)) + '\\n')\n fd.write('[relation]\\n')\n for obj in context.objects:\n fd.write(' '.join('1' if (obj, att) in context.relations else '0'\n for att in context.attributes) + ' \\n')", "title": "" } ]
cb16b693e584944a7e77739f9d40aa4b
Retrieves a single instance of egtpPgwS5S8Range data from the server. Args
[ { "docid": "a7f1e55f5aeb78cb808fb3b652ebcdc4", "score": "0.0", "text": "def read(self, href):\n return self._read(href)", "title": "" } ]
[ { "docid": "9d93e9d987aeea759f2457eb5e6a9cd5", "score": "0.5590422", "text": "def _get_range(self):\n\n pass", "title": "" }, { "docid": "201c5d245c84868cbe476b5f4568f99a", "score": "0.5568858", "text": "def find(\n self,\n ChangeReportingMode=None,\n EnableEchoRequest=None,\n Enabled=None,\n IpType=None,\n N3CreateBearerReq=None,\n N3DeleteBearerReq=None,\n N3EchoReq=None,\n N3UpdateBearerReq=None,\n Name=None,\n ObjectId=None,\n T3CreateBearerReq=None,\n T3DeleteBearerReq=None,\n T3EchoReq=None,\n T3UpdateBearerReq=None,\n UseCpIp=None,\n UseUpIp=None,\n ):\n # type: (int, bool, bool, str, int, int, int, int, str, str, int, int, int, int, bool, bool) -> EgtpPgwS5S8Range\n return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))", "title": "" }, { "docid": "0f47efd62eda67f484c16039284ebe44", "score": "0.5461539", "text": "def _get_range(self):\n return self.__range", "title": "" }, { "docid": "377d7979e91ea74c9b2c90329ccc8606", "score": "0.5414561", "text": "def get_frame_range_from_shotgun(self):\r\n # we know that this exists now (checked in init)\r\n entity = self.app.context.entity\r\n\r\n sg_entity_type = self.app.context.entity[\"type\"]\r\n sg_filters = [[\"id\", \"is\", entity[\"id\"]]]\r\n\r\n sg_in_field = self.app.get_setting(\"sg_in_frame_field\")\r\n sg_out_field = self.app.get_setting(\"sg_out_frame_field\")\r\n fields = [sg_in_field, sg_out_field]\r\n\r\n #import time\r\n #start = time.time()\r\n data = self.app.shotgun.find_one(sg_entity_type, filters=sg_filters, fields=fields)\r\n #data = self.dbWrap.find_one(self.sg, sg_entity_type, sg_filters, fields)\r\n #print 'TIME: %s' % (time.time()-start)\r\n # check if fields exist!\r\n if sg_in_field not in data:\r\n raise tank.TankError(\"Configuration error: Your current context is connected to a Shotgun \"\r\n \"%s. This entity type does not have a \"\r\n \"field %s.%s!\" % (sg_entity_type, sg_entity_type, sg_in_field))\r\n\r\n if sg_out_field not in data:\r\n raise tank.TankError(\"Configuration error: Your current context is connected to a Shotgun \"\r\n \"%s. This entity type does not have a \"\r\n \"field %s.%s!\" % (sg_entity_type, sg_entity_type, sg_out_field))\r\n\r\n return ( data[sg_in_field], data[sg_out_field] )", "title": "" }, { "docid": "b720449958378c2a758da67a2604d6cc", "score": "0.5393849", "text": "def getDetailedGWData(self):\n\n gw_range = self.range\n min_gw = gw_range[0]\n max_gw = gw_range[1]\n gw_data = []\n url = f\"https://fantasy.premierleague.com/api/entry/{self.id}/event/\"\n\n for gw in range(min_gw, max_gw+1):\n gw_url = url + f\"{gw}/picks/\"\n gw_data.append(get(gw_url))\n\n #for gw in gw_data:\n # for pick in gw['picks']:\n # pick['points'] = Player(pick['element']).getSingleGWData(gw['entry_history']['event'])['total_points']\n\n return gw_data", "title": "" }, { "docid": "ff4adb1f5ee35e1665ebe7211b114435", "score": "0.5157045", "text": "def _GetRange(self):\n range_header = self.headers[\"Range\"]\n if not range_header:\n return (None, None)\n bytes_regex = re.compile(r\"^bytes=(\\d+)\\-(\\d+)?\")\n match = bytes_regex.search(range_header)\n if not match:\n return (None, None)\n if match.group(2) is not None:\n return (int(match.group(1)), int(match.group(2)))\n else:\n return (int(match.group(1)), None)", "title": "" }, { "docid": "4b8e5a1176c76e6d0697b8d16ebba017", "score": "0.51555294", "text": "def get_data_range(self, station):\n station = clean_station(station)\n route = f\"/data/{station}\"\n return self.get(route)", "title": "" }, { "docid": "0e15abb92ce3f1485d159e0c83056dd4", "score": "0.5105717", "text": "def get_range(self, name):\n index, length = Ship.get_index(name)\n if 1 == length:\n raise RuntimeError(\"Use Ship.get for single element data.\")\n else:\n return self.encoding[index:index + length]", "title": "" }, { "docid": "3581916cc9a58afeb93467a96a8827ab", "score": "0.5100943", "text": "def range(self):\n return self[\"range\"]", "title": "" }, { "docid": "73246be92e7c49726eb0585ff428b6db", "score": "0.49894643", "text": "def _get_range(self, stream_type, stream_slug, start, end):\n try:\n if stream_type == 'data':\n # Get the stream data within this time range\n # Use the stream API (instead of the data API) to get the data value base on the output units\n data = self.api.stream(str(stream_slug)).data.get(start=start, end=end, page_size=5000)\n elif stream_type == 'event':\n data = self.api.event().get(filter=str(stream_slug), start=start, end=end, page_size=5000)\n else:\n logger.error('Incorrect stream type')\n return []\n except HttpNotFoundError as e:\n logger.error(e)\n sys.exit(1)\n\n if data['count'] > 0:\n return data['results']\n return []", "title": "" }, { "docid": "421cbffda8ae13ef21a95f5c5a986a38", "score": "0.4891854", "text": "def getSummaryGWData(self):\n gw_range = self.range\n min_gw = gw_range[0]\n max_gw = gw_range[1]\n\n url = f\"https://fantasy.premierleague.com/api/entry/{self.id}/history/\"\n summary_data = get(url)\n\n summary_data['current'] = summary_data['current'][(min_gw-1):(max_gw)]\n\n return summary_data", "title": "" }, { "docid": "1acbf41b8333074168e6de3d87b6ee8a", "score": "0.4890086", "text": "def get_segment_routing_gb_range(device):\r\n try:\r\n out = device.parse(\"show segment-routing mpls gb\")\r\n except SchemaEmptyParserError:\r\n return None, None\r\n\r\n return out.get(\"label_min\"), out.get(\"label_max\")", "title": "" }, { "docid": "a43433714d7e2e4955a41b9c602026d6", "score": "0.48408616", "text": "def range(self):\r\n return parse_range_header(self.environ.get('HTTP_RANGE'))", "title": "" }, { "docid": "dddc73509845c4f5bdb7f28c93c7b354", "score": "0.4830782", "text": "def range(self):\n return parse_range_header(self.environ.get('HTTP_RANGE'))", "title": "" }, { "docid": "acd05617a5b92725a3ec466cee7fc638", "score": "0.48281986", "text": "def get(self, name):\n index, length = Ship.get_index(name)\n if 1 == length:\n return self.encoding[index].item()\n else:\n raise RuntimeError(\"Use Ship.get_range for multi-element data.\")", "title": "" }, { "docid": "2d42cbe01c35ef10a17ac2352379d71f", "score": "0.48153645", "text": "def _get_range(self, kwargs):\n value = kwargs.pop(self.range_keyname)\n return value", "title": "" }, { "docid": "c6eff04487cc492a8b069261f3962d91", "score": "0.47725567", "text": "def gw_retrieve(gw):\n\n\treturn gw", "title": "" }, { "docid": "d4ddeb51233be8f80b0363b91f8c5e0e", "score": "0.47187412", "text": "def get_random_range(self):\n raise NotImplementedError", "title": "" }, { "docid": "b51f8ceefbdef99c45f2d7e237258e52", "score": "0.46854198", "text": "def ens_srng(grid):\r\n return GridMath.ensembleRangeValues(grid)", "title": "" }, { "docid": "c398b6610256eda664ddaaf6b69cdea4", "score": "0.46838108", "text": "def _get_range(data):\n new_range = []\n if isinstance(data, int):\n new_range = [data]\n elif isinstance(data, str):\n # Could be '0-2000' or '0x0 - 0x700', where in the second case the range is specified in hexa.\n new_range = range(*[int(boundary, 0) for boundary in map(str.strip, data.split('-'))])\n elif isinstance(data, list):\n new_range = data\n return new_range", "title": "" }, { "docid": "e6014dff196b393c45352180fb90d210", "score": "0.46707457", "text": "def get(self):\n ee.Initialize(config.EE_CREDENTIALS, config.EE_URL)\n self.initData()\n #to do: error handling\n filterVals = {}\n filterVals['rem'] = float(self.request.get('minRem'))\n filterVals['bdp'] = float(self.request.get('minBDP'))\n filterVals['bda'] = float(self.request.get('minBDA'))\n filterVals['bdb'] = float(self.request.get('minBDB'))\n filterVals['bdm'] = float(self.request.get('minBDM'))\n filterVals['endp'] = float(self.request.get('minEndP'))\n filterVals['endv'] = float(self.request.get('minEndV'))\n\n region = self.request.get('region');\n\n #convert raw filter values to pixel values\n pixelVals = self.getPixelValues(filterVals, region)\n \n #get info for display using filters\n template_values = self.getLayer(pixelVals, region)\n self.response.headers['Content-Type'] = 'application/json'\n self.response.out.write(json.dumps(template_values))", "title": "" }, { "docid": "1b187c3e3fcce9737ffe9951cca8cd71", "score": "0.4650596", "text": "def _get_page_range(self):\n raise NotImplementedError", "title": "" }, { "docid": "e27b3a2b49216aa06567b91725d139af", "score": "0.46204275", "text": "def range_(self):\n return self._range", "title": "" }, { "docid": "5535dc9bd20a73e3a1df0e6aa48dc5f8", "score": "0.46184725", "text": "def timebound(agerange=None, ageunits=None):\n t0 = time()\n desc_obj = dict()\n\n # Log this request\n \n print(\"Request: \" + connexion.request.method + \" \" + connexion.request.base_url,\n connexion.request.args.to_dict() )\n \n # Set runtime options\n\n try:\n options = params.set_options(req_args=connexion.request.args,\n endpoint='misc')\n\n except ValueError as err:\n return connexion.problem(status=err.args[0],\n title=Status(err.args[0]).name,\n detail=err.args[1],\n type='about:blank')\n\n # Call parse function to check for parameter errors\n\n try:\n params.parse(req_args=connexion.request.args,\n options=options,\n db='pbdb',\n endpoint='timebound')\n\n except ValueError as err:\n return connexion.problem(status=err.args[0],\n title=Status(err.args[0]).name,\n detail=err.args[1],\n type='about:blank')\n\n # Determine time bounds and resolve geologic age if necessary\n\n try:\n early_age, late_age, \\\n col_hex, age_ref = ages.get_age(age_range=agerange,\n options=options)\n\n except ValueError as err:\n return connexion.problem(status=err.args[0],\n title=Status(err.args[0]).name,\n detail=err.args[1],\n type='about:blank')\n\n # Build returned metadata object\n\n desc_obj.update(aux.build_meta(options))\n\n desc_obj.update(aux.build_meta_sub(source=age_ref,\n t0=t0,\n sub_tag='geo_age',\n options=options))\n\n # Return data structure to client\n\n return_obj = {'early_age': early_age,\n 'late_age': late_age,\n 'ics_color': col_hex}\n\n return jsonify(metadata=desc_obj, records=return_obj)", "title": "" }, { "docid": "576761fc6194eca358149eb98f223dc7", "score": "0.4597863", "text": "def h5(self):\n return self.resource.h5", "title": "" }, { "docid": "684caa1778cd51730b37bad18736bebf", "score": "0.45961392", "text": "def get_regions(k5token):\n\n #URL for global contract service\n regionsURL = 'https://contract.gls.cloud.global.fujitsu.com/v1/regions'\n\n #creating empty list \n regionlist = []\n print \"*** Obtaining the current list of K5 regions\"\n try:\n response = requests.get(regionsURL,headers={'X-Auth-Token': k5token,'Content-Type': 'application/json','Accept': 'application/json'})\n #Process the return JSON body response\n regions = response.json()[\"regions\"]\n #For each region found\n for item in regions :\n #Store the ID value\n regionid = item.get(\"id\")\n regionlist.append(regionid)\n #then add it to the regional list object\n print \"****** Found \" + str(regionid) + \" Region\"\n return regionlist\n except:\n return \"*** Error obtaining Region List.\"", "title": "" }, { "docid": "011ae9a7a3ada9d6ac7d9519b2ea0ebf", "score": "0.45951587", "text": "def extract(self):\n lon_min, lon_max, lat_min, lat_max = self.coverage[0]\n xmin = int(lon_min*8 - BASE_LON8)\n xmax = int(lon_max*8 - BASE_LON8)\n ymin = int(lat_min*8 - BASE_LAT8)\n ymax = int(lat_max*8 - BASE_LAT8)\n begin = self.date2index(self.begin)\n if self.end:\n end = self.date2index(self.end)\n else:\n end = begin\n end += 1 # in case we want to slice begin:begin or alike\n str = \"Slicing as if [{:d}:{:d}][{:d}:{:d}][{:d}:{:d}]\".format(\n begin, end, ymin, ymax, xmin, xmax)\n log.debug(str)\n ds = open_url(URL)\n # print(ds.time[0])\n # return\n # time_range = inclusive_range(begin, end, 1000)\n time_range = range(begin, end-1, CHUNK) + [end]\n time_slices = [ slice(a, b) for a, b in zip(time_range[:-1], time_range[1:]) ]\n start = datetime.now()\n if self.csv:\n self._write_csv_header()\n for the_slice in time_slices:\n log.debug(\"Working on {:s}\".format(the_slice))\n t = linspace((the_slice.start-begin)/24., (the_slice.stop-begin)/24., the_slice.stop-the_slice.start, False)\n t2 = t + 1./24 - 0.00001\n all = {}\n # enumerate()\n for var, code in VARIABLES.iteritems():\n if self.want_quit:\n return\n if self.callback:\n self.callback(var, the_slice.start-begin, end-begin)\n ref = ds[var]\n arr = ref[the_slice, ymin:ymax, xmin:xmax]\n # time_len = arr.shape[0] # lat_len, lon_len\n for feature in self.coverage[1]:\n id = feature[0]\n x = feature[1] - 1 - xmin\n y = feature[2] - 1 - ymin\n v = arr[:, y, x]\n str = \"id={:s}, x={:d}, y={:d}, time={:s}, lon={:s}, lat={:s}\".format(\n id, x, y, v.data[1][:1] + v.data[1][-2:], v.data[2], v.data[3])\n log.debug(str)\n vv = squeeze(asarray(v.data[0]))\n if self.csv:\n if not id in all:\n # all[id] = (squeeze(v.data[1]) - EXCEL_OFFSET,) # Excel's serial date\n # requires numpy 1.7.0+\n # all[id] = (utcfromtimestamp_vec((squeeze(v.data[1]) - EPOCH_OFFSET_DAYS) * 86400.),) # datetime\n # all[id] = (d2str_vec(squeeze(v.data[1])),) # str\n # all[id] = (d2str_vec(squeeze(v.data[1])).tolist(),) # str\n # all[id] = (v.data[1].tolist(),)\n # +.1 to overcome rounding issues making it 1:59:59.999\n all[id] = (utcfromtimestamp_vec((squeeze(v.data[1]) - EPOCH_OFFSET_DAYS) * 86400. + .1).tolist(),) # datetime\n all[id] += (vv.tolist(),)\n else: # PIHM 3\n name = os.path.join(self.output, \"{:s}z{:s}.txt\".format(id, code))\n with open(name, 'a') as f:\n out = column_stack((t, vv, t2, vv))\n savetxt(f, out, '%.5f')\n if self.csv:\n for id, vars in all.iteritems():\n name = os.path.join(self.output, \"{:s}.csv\".format(id))\n with open(name, 'a') as f:\n # savetxt(f, column_stack(vars), '%.5f', delimiter=',')\n # savetxt(f, column_stack(vars), '%10.5s', delimiter=',')\n for row in zip(*vars):\n print(\"{:s},{:f},{:f},{:f}\".format(\n row[0].strftime(\"%Y-%m-%d %H:%M:%S\"), *row[1:]), file=f)\n td = datetime.now() - start\n s = \"It took me {:s}\".format(td)\n log.info(s)", "title": "" }, { "docid": "bd1eff7cfbd25dc05ec170cd0ad2fddd", "score": "0.4572791", "text": "def getDataSegment(self, tmin, tmax, chunkN=10000):\n\n # default expensive implementation....\n return self.getData().query('{} <= time <= {}'.format(tmin, tmax))", "title": "" }, { "docid": "4b093e7de4ea0796bf208a51819ccf99", "score": "0.45709386", "text": "def IpRangeMme(self):\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.iprangemme_f11ea433502d3383ef61d256580f15b9 import IpRangeMme\n return IpRangeMme(self)._select()", "title": "" }, { "docid": "efda266b1fc77fd2db79e774b0727b53", "score": "0.45435032", "text": "def iqr_get_results_range():\n s_idx = int(flask.request.args['s'])\n e_idx = int(flask.request.args['e'])\n geofilter_bbox = self._parse_geo_filter_arg(\n flask.request.args.get('geofilter', None)\n )\n self.log.info(\"Results fetch geo filter: %s\", geofilter_bbox)\n f_results = self._geofiltered_ordered_results(geofilter_bbox, e_idx)\n return flask.jsonify({\n \"results\": f_results[s_idx:e_idx]\n })", "title": "" }, { "docid": "bf6a1c7b7d11d20c2e26f9a4e6aeb783", "score": "0.4539754", "text": "def hg_request(endpoint):\n conn = httplib.HTTPSConnection(hg_url)\n headers = {\"User-Agent\": \"Automatic downstream classification script. If I'm overloading the server, please find royc on #treeherder\"}\n conn.request(\"GET\", endpoint, {}, headers)\n\n global request_count\n request_count += 1\n print \"{}. {}\".format(request_count, endpoint)\n\n return json.loads(conn.getresponse().read())", "title": "" }, { "docid": "e2ecf37412f7d08b81af72e1e9978a6f", "score": "0.45384178", "text": "def range(self):\n return self.__range", "title": "" }, { "docid": "30fa4f90c279ff994774131dfcb3b21f", "score": "0.45370737", "text": "def _get_trip_range(self):\n start_data = self._get_last('data', self._get_stream_slug(TRIP_VARS['TRIP_START']))\n end_data = self._get_last('data', self._get_stream_slug(TRIP_VARS['TRIP_END']))\n\n # pprint(start_data)\n # pprint(end_data)\n\n if start_data and end_data:\n return start_data['timestamp'], end_data['timestamp']\n return None, None", "title": "" }, { "docid": "59c4217835afc38aaa0e7f911c817745", "score": "0.4528211", "text": "def DnsRange(self):\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.dnsrange_5de7db75e86634c1d3cdcd23ef7563d8 import DnsRange\n return DnsRange(self)._select()", "title": "" }, { "docid": "80b9c3da34d92571dfec09078e122c34", "score": "0.45063227", "text": "def get_param_from_hf5(filename,data_desc,p_name,hemispherecode,LAT_BOUND):\n \n # open file\n try:\n f=h5py.File(filename,'r')\n except:\n return None,None,None\n sys.exit(\"Cannot open file %s\" % filename)\n\n # get lattitude\n lat = np.array(f.get(data_desc['lat']))\n\n # if data does not exist\n if f.get(data_desc[p_name]) is None or lat is None:\n print(\"\\n%s:%s not found in %s\" %(p_name,data_desc[p_name],filename))\n return None,None,None\n\n # Select coord above desired region\n if hemispherecode=='01':\n select_zone, = np.where(lat > LAT_BOUND)\n else:\n select_zone, = np.where(lat < -LAT_BOUND)\n\n if select_zone.size==0:\n print(\"\\n\\nNo data over %i N for file: %s\\n\" %(LAT_BOUND,filename))\n return None,None,None\n \n param = np.array(f.get(data_desc[p_name]))\n param = param[select_zone]\n units = f.get(data_desc[p_name]).attrs['units'].decode('UTF-8')\n\n if '_FillValue' in f.get(data_desc[p_name]).attrs.keys(): \n fill_value = f.get(data_desc[p_name]).attrs['_FillValue']\n param[param==fill_value] = np.nan\n\n param_is_flag=False \n if 'flag_values' in f.get(data_desc[p_name]).attrs.keys(): param_is_flag=True\n\n return param,units,param_is_flag", "title": "" }, { "docid": "f4d8eef76f2bdfe57093deb69f579519", "score": "0.44851094", "text": "def get_instance_info(self,instance_id):\n return self.client.lrange(instance_id,0,-1);", "title": "" }, { "docid": "351f7daaf74477a63e8fc8fff8ecf6cf", "score": "0.44840258", "text": "def get_range(value):\n return range(value)", "title": "" }, { "docid": "38a6b44e1f6518d2617245562552a002", "score": "0.44756213", "text": "def get_range(value):\n return range(value)", "title": "" }, { "docid": "7f476d8961c3a8b7e80ecd12cf7c055e", "score": "0.4471798", "text": "def range(self) -> Optional['outputs.GatewayRouteSpecHttpRouteMatchHeaderMatchRange']:\n return pulumi.get(self, \"range\")", "title": "" }, { "docid": "57967dadcb15e020a3a19707c679fb3e", "score": "0.44713357", "text": "def instance_view(self) -> 'outputs.CapacityReservationGroupInstanceViewResponse':\n return pulumi.get(self, \"instance_view\")", "title": "" }, { "docid": "df40ee4ecda0d7d24997428246057dc8", "score": "0.4435722", "text": "def query_range(self, query, start, end, step=None):\n if step is None:\n step = \"5s\"\n if start is None:\n start = datetime.utcnow()\n else:\n if isinstance(start, numbers.Number):\n start = datetime.utcfromtimestamp(start)\n elif start.tzinfo:\n start = start.astimezone(tz=pytz.utc)\n if end is None:\n end = datetime.utcnow()\n else:\n if isinstance(end, numbers.Number):\n end = datetime.utcfromtimestamp(end)\n elif end.tzinfo:\n end = end.astimezone(tz=pytz.utc)\n params = {\"query\": query,\n \"start\": start.strftime(self.DATE_FORMAT),\n \"end\": end.strftime(self.DATE_FORMAT),\n \"step\": step}\n resp = requests.get(self.base_url + \"query_range\", params=params)\n try:\n json_data = resp.json()\n except:\n log.debug(\"Failed to decode JSON from query_range: %r\", resp,\n exc_info=True)\n if resp.status_code != 200:\n raise PrometheusAPIError(\"Failed to query_range: %r\" % resp)\n else:\n raise PrometheusAPIError(\"Received status code 200, but JSON could \"\n \"not be decoded: %r\" % resp.text)\n if resp.status_code != 200:\n raise PrometheusAPIError(\"Failed to query_range: %r\" %\n json_data.get(\"error\"))\n return json_data[\"data\"]", "title": "" }, { "docid": "2e489600e8f472febda813440d5a75d6", "score": "0.44333982", "text": "async def async_gas_consumption(self, mprn, serial_number, period_from, period_to):\n async with aiohttp.ClientSession() as client:\n auth = aiohttp.BasicAuth(self._api_key, '')\n url = f'{self._base_url}/v1/gas-meter-points/{mprn}/meters/{serial_number}/consumption?period_from={period_from.strftime(\"%Y-%m-%dT%H:%M:%SZ\")}&period_to={period_to.strftime(\"%Y-%m-%dT%H:%M:%SZ\")}'\n async with client.get(url, auth=auth) as response:\n # Disable content type check as sometimes it can report text/html\n data = await response.json(content_type=None)\n if (\"results\" in data):\n data = data[\"results\"]\n results = []\n for item in data:\n item = self.process_consumption(item)\n\n # For some reason, the end point returns slightly more data than we requested, so we need to filter out\n # the results\n if as_utc(item[\"interval_start\"]) >= period_from and as_utc(item[\"interval_end\"]) <= period_to:\n results.append(item)\n \n return results\n \n return None", "title": "" }, { "docid": "a18099ef3afe246129b351424dc01792", "score": "0.4426795", "text": "def find_overhang5(self):\n n = len(self)\n for i in range(2,n):\n overhang = self[:i]\n if overhang.get_type() == \"5'-overhang\":\n return overhang", "title": "" }, { "docid": "e9b21a9106824eb99a83fc3136d47a5a", "score": "0.44212055", "text": "def get_last_five(cls) -> int:\n return cls._collection.find(sort=[(\"end_date\", -1)]).limit(5)", "title": "" }, { "docid": "13fb271c9a50b5ef8c393832220a5d22", "score": "0.4419905", "text": "def range(self) -> str:\n return self.__range", "title": "" }, { "docid": "f5598971e3644c82955c4823a2f24dce", "score": "0.44111705", "text": "def quality_range(self):\n return range(1, 5)", "title": "" }, { "docid": "85259c71fc0751048f53b81f56e2b5ee", "score": "0.4404482", "text": "def get_object(Path=None, Range=None):\n pass", "title": "" }, { "docid": "0f64a3ca857a55418da71c747faebfa7", "score": "0.44033226", "text": "def get_object(self, Path: str, Range: str = None) -> Dict:\n pass", "title": "" }, { "docid": "375c3d205892bffa14dfdc3636e6469a", "score": "0.44025877", "text": "def get_values(self):\r\n try:\r\n assert self._db_connection, {\r\n STATUS_KEY: HTTP_500_INTERNAL_SERVER_ERROR,\r\n MESSAGE_KEY: DB_ERROR}\r\n graph = []\r\n empty_dict = {\"data\": [],\r\n \"description\": None,\r\n \"unit\": None,\r\n \"min_data\": None,\r\n \"max_data\": None\r\n }\r\n dict1 = {}\r\n dict2 = {}\r\n dict3 = {}\r\n dict4 = {}\r\n dict5 = {\"data\": []}\r\n if self.equipment == COKE_DRUM_VALUE and self.module == OUTAGE_VALUE:\r\n query_params = {\r\n START_DATE_REQUEST: self.query_params.GET[START_DATE_REQUEST],\r\n END_DATE_REQUEST: self.query_params.GET[END_DATE_REQUEST]\r\n }\r\n\r\n if PRIMARY_TAG not in self.query_params.GET:\r\n \"\"\"\r\n This will return the outage multiline graph data without the primary tag\r\n \"\"\"\r\n multiline_tags = tuple(LIST_OF_OUTAGE_MULTILINE_TAGS)\r\n if query_params[START_DATE_REQUEST] and query_params[END_DATE_REQUEST]:\r\n multi_line = django_search_query_all(OUTAGE_MULTILINE.format(\r\n self.module,\r\n multiline_tags,\r\n query_params[START_DATE_REQUEST],\r\n query_params[END_DATE_REQUEST]))\r\n df_data = pd.DataFrame(multi_line)\r\n\r\n min_max = django_search_query_all(OUTAGE_MIN_MAX_DATA.format(\r\n self.module,\r\n multiline_tags))\r\n\r\n elif PRIMARY_TAG in self.query_params.GET:\r\n query_params[PRIMARY_TAG] = self.query_params.GET[PRIMARY_TAG]\r\n if query_params[START_DATE_REQUEST] and query_params[END_DATE_REQUEST] and query_params[\r\n PRIMARY_TAG]:\r\n \"\"\"\r\n This will return the outage multiline graph data with the primary tag\r\n \"\"\"\r\n LIST_OF_OUTAGE_MULTILINE_TAGS.append(query_params[\"primary_tag\"])\r\n tags = tuple(LIST_OF_OUTAGE_MULTILINE_TAGS)\r\n multi_line = django_search_query_all(OUTAGE_MULTILINE.format(\r\n self.module,\r\n tags,\r\n query_params[START_DATE_REQUEST],\r\n query_params[END_DATE_REQUEST]))\r\n df_data = pd.DataFrame(multi_line)\r\n min_max = django_search_query_all(OUTAGE_MIN_MAX_DATA.format(\r\n self.module,\r\n tags))\r\n LIST_OF_OUTAGE_MULTILINE_TAGS.pop()\r\n\r\n if PRIMARY_TAG in self.query_params.GET:\r\n if not df_data.empty:\r\n query_params[PRIMARY_TAG] = self.query_params.GET[PRIMARY_TAG]\r\n final_dict = {\r\n \"Coke Height\": dict1,\r\n \"Foam Height\": dict2,\r\n \"Current Outage\": dict3,\r\n query_params[PRIMARY_TAG]: dict4,\r\n \"x-axis\": dict5,\r\n \"online-drum\": []\r\n }\r\n else:\r\n final_dict = {\r\n \"Coke Height\": empty_dict,\r\n \"Foam Height\": empty_dict,\r\n \"Current Outage\": empty_dict,\r\n query_params[PRIMARY_TAG]: empty_dict,\r\n \"x-axis\": dict5,\r\n \"online-drum\": []\r\n }\r\n else:\r\n if not df_data.empty:\r\n final_dict = {\r\n \"Coke Height\": dict1,\r\n \"Foam Height\": dict2,\r\n \"Current Outage\": dict3,\r\n \"x-axis\": dict5,\r\n \"tags_list\": LIST_OF_OUTAGE_PRIMARY_TAGS,\r\n \"online-drum\": []\r\n }\r\n else:\r\n final_dict = {\r\n \"Coke Height\": empty_dict,\r\n \"Foam Height\": empty_dict,\r\n \"Current Outage\": empty_dict,\r\n \"x-axis\": dict5,\r\n \"tags_list\": LIST_OF_OUTAGE_PRIMARY_TAGS,\r\n \"online-drum\": []\r\n }\r\n df_min_max_data = pd.DataFrame(min_max)\r\n\r\n if not df_data.empty:\r\n df_data = df_data.where(pd.notnull(df_data) == True, None)\r\n df_data.sort_values(TIMESTAMP_KEY, ascending=True, inplace=True)\r\n data_now = df_data.groupby(TAG_NAME_REQUEST)\r\n df_time = df_data[TIMESTAMP_KEY].unique()\r\n data_online_coke = data_now.get_group(COKE_HEIGHT_TAG)\r\n data_online_foam = data_now.get_group(FOAM_HEIGHT_TAG)\r\n data_online_current = data_now.get_group(FOAM_HEIGHT_TAG)\r\n if not data_online_coke.empty:\r\n final_dict[\"online-drum\"] = list(data_online_coke[DRUM_ONLINE])\r\n elif not data_online_foam.empty:\r\n final_dict[\"online-drum\"] = list(data_online_foam[DRUM_ONLINE])\r\n elif not data_online_current.empty:\r\n final_dict[\"online-drum\"] = list(data_online_current[DRUM_ONLINE])\r\n old_dict = {}\r\n for name, group in data_now:\r\n old_dict[name] = list(group[TAG_VALUE])\r\n keys = []\r\n for key in old_dict.keys():\r\n keys.append(key)\r\n if COKE_HEIGHT_TAG in keys:\r\n unit = df_data[df_data[TAG_NAME_REQUEST].str.contains(COKE_HEIGHT)][UNIT].iloc[0]\r\n description = df_data[df_data[TAG_NAME_REQUEST].str.contains(COKE_HEIGHT)][DESCRIPTION].iloc[0]\r\n min_data = \\\r\n df_min_max_data[df_min_max_data[TAG_NAME_REQUEST].str.contains(COKE_HEIGHT)][\r\n MIN_VALUE].iloc[\r\n 0]\r\n max_data = \\\r\n df_min_max_data[df_min_max_data[TAG_NAME_REQUEST].str.contains(COKE_HEIGHT)][\r\n MAX_VALUE].iloc[\r\n 0]\r\n dict1[\"data\"] = old_dict[COKE_HEIGHT_TAG]\r\n dict1[\"unit\"] = unit\r\n dict1[\"description\"] = description\r\n dict1[\"min_data\"] = min_data\r\n dict1[\"max_data\"] = max_data\r\n elif COKE_HEIGHT_TAG not in keys:\r\n dict1[\"data\"] = None\r\n dict1[\"unit\"] = None\r\n dict1[\"description\"] = None\r\n dict1[\"min_data\"] = None\r\n dict1[\"max_data\"] = None\r\n if FOAM_HEIGHT_TAG in keys:\r\n unit = df_data[df_data[TAG_NAME_REQUEST].str.contains(FOAM_HEIGHT)][UNIT].iloc[0]\r\n description = df_data[df_data[TAG_NAME_REQUEST].str.contains(FOAM_HEIGHT)][DESCRIPTION].iloc[0]\r\n min_data = \\\r\n df_min_max_data[df_min_max_data[TAG_NAME_REQUEST].str.contains(FOAM_HEIGHT)][\r\n MIN_VALUE].iloc[0]\r\n max_data = \\\r\n df_min_max_data[df_min_max_data[TAG_NAME_REQUEST].str.contains(FOAM_HEIGHT)][\r\n MAX_VALUE].iloc[0]\r\n dict2[\"data\"] = old_dict[FOAM_HEIGHT_TAG]\r\n dict2[\"unit\"] = unit\r\n dict2[\"description\"] = description\r\n dict2[\"min_data\"] = min_data\r\n dict2[\"max_data\"] = max_data\r\n elif FOAM_HEIGHT_TAG not in keys:\r\n dict2[\"data\"] = None\r\n dict2[\"unit\"] = None\r\n dict2[\"description\"] = None\r\n dict2[\"min_data\"] = None\r\n dict2[\"max_data\"] = None\r\n if OUTAGE_TREND_TAG in keys:\r\n unit = df_data[df_data[TAG_NAME_REQUEST].str.contains(CURRENT_OUTAGE)][UNIT].iloc[0]\r\n description = df_data[df_data[TAG_NAME_REQUEST].str.contains(CURRENT_OUTAGE)][DESCRIPTION].iloc[\r\n 0]\r\n min_data = \\\r\n df_min_max_data[df_min_max_data[TAG_NAME_REQUEST].str.contains(CURRENT_OUTAGE)][\r\n MIN_VALUE].iloc[\r\n 0]\r\n max_data = \\\r\n df_min_max_data[df_min_max_data[TAG_NAME_REQUEST].str.contains(CURRENT_OUTAGE)][\r\n MAX_VALUE].iloc[\r\n 0]\r\n dict3[\"data\"] = old_dict[OUTAGE_TREND_TAG]\r\n dict3[\"unit\"] = unit\r\n dict3[\"description\"] = description\r\n dict3[\"min_data\"] = min_data\r\n dict3[\"max_data\"] = max_data\r\n elif OUTAGE_TREND_TAG not in keys:\r\n dict3[\"data\"] = None\r\n dict3[\"unit\"] = None\r\n dict3[\"description\"] = None\r\n dict3[\"min_data\"] = None\r\n dict3[\"max_data\"] = None\r\n\r\n if PRIMARY_TAG in self.query_params.GET:\r\n if query_params[PRIMARY_TAG] in keys:\r\n data_online_primary = data_now.get_group(query_params[PRIMARY_TAG])\r\n if not data_online_primary.empty:\r\n final_dict[\"online-drum\"] = list(data_online_primary[DRUM_ONLINE])\r\n else:\r\n print(\"sorry\")\r\n unit = \\\r\n df_data[df_data[TAG_NAME_REQUEST].str.contains(query_params[PRIMARY_TAG])][UNIT].iloc[0]\r\n description = \\\r\n df_data[df_data[TAG_NAME_REQUEST].str.contains(query_params[PRIMARY_TAG])][\r\n DESCRIPTION].iloc[0]\r\n min_data = \\\r\n df_min_max_data[\r\n df_min_max_data[TAG_NAME_REQUEST].str.contains(query_params[PRIMARY_TAG])][\r\n MIN_VALUE].iloc[0]\r\n max_data = \\\r\n df_min_max_data[\r\n df_min_max_data[TAG_NAME_REQUEST].str.contains(query_params[PRIMARY_TAG])][\r\n MAX_VALUE].iloc[0]\r\n dict4[\"data\"] = old_dict[query_params[PRIMARY_TAG]]\r\n dict4[\"unit\"] = unit\r\n dict4[\"description\"] = description\r\n dict4[\"min_data\"] = min_data\r\n dict4[\"max_data\"] = max_data\r\n else:\r\n dict4[\"data\"] = None\r\n dict4[\"unit\"] = None\r\n dict4[\"description\"] = None\r\n dict4[\"min_data\"] = None\r\n dict4[\"max_data\"] = None\r\n else:\r\n print(\"sorry\")\r\n dict5[\"data\"] = list(df_time)\r\n\r\n graph.append(final_dict)\r\n return JsonResponse(graph, safe=False)\r\n\r\n except AssertionError as e:\r\n log_error(\"Assertion error due to : %s\" + str(e))\r\n return JsonResponse({MESSAGE_KEY: e.args[0][MESSAGE_KEY]},\r\n status=e.args[0][STATUS_KEY])\r\n\r\n except Exception as e:\r\n log_error(\"Exception due to : %s\" + str(e))\r\n return JsonResponse({MESSAGE_KEY: EXCEPTION_CAUSE.format(\r\n traceback.format_exc())},\r\n status=HTTP_500_INTERNAL_SERVER_ERROR)", "title": "" }, { "docid": "4401ac5a5b0e7ba599161a44356d0300", "score": "0.4394565", "text": "def GetScalarRange(self):\n ...", "title": "" }, { "docid": "abc0bda81847416d7812345c44948884", "score": "0.43939194", "text": "def get(self):\n ee.Initialize(config.EE_CREDENTIALS, config.EE_URL)\n self.initData()\n\n targetRegion = self.request.get('region')\n if(not targetRegion == \"\"):\n self.updateRegion(targetRegion)\n else:\n for regionKey in self.regions.keys():\n self.updateRegion(regionKey)", "title": "" }, { "docid": "716e936eb533b7108c894f895c4a2572", "score": "0.4392177", "text": "def _fetch_range(self, obj_dict, start=None, end=None):\n if start is not None or end is not None:\n start = start or 0\n end = end or 0\n head = {'Range': 'bytes=%i-%i' % (start, end - 1)}\n else:\n head = None\n try:\n r = self.gcsfs._call('GET', obj_dict['mediaLink'],\n headers=head)\n data = r.content\n return data\n except RuntimeError as e:\n if 'not satisfiable' in str(e):\n return b''", "title": "" }, { "docid": "962babbc8585ab59a3cc71184e49b760", "score": "0.43838722", "text": "def get_range(reg):\n reg = _string_check(reg)\n return (None, None) if reg in (\n Register.DURATION, Register.TIME) else _RAW_RANGE", "title": "" }, { "docid": "1ab13a72839afe4ea87fcc6bd46e3280", "score": "0.43792513", "text": "def fetch(klass, conn):\n # TODO: validate response\n resp = conn.describe_regions()\n return klass(resp[\"Regions\"])", "title": "" }, { "docid": "21969aad2e5b6d9f10ad6db52e9e47fe", "score": "0.43761075", "text": "def reserved_ip_range(self) -> str:\n return pulumi.get(self, \"reserved_ip_range\")", "title": "" }, { "docid": "fd0761ff25940c32c2db4cb7b30b3311", "score": "0.4372534", "text": "def range(self) -> Optional['outputs.RouteSpecHttpRouteMatchHeaderMatchRange']:\n return pulumi.get(self, \"range\")", "title": "" }, { "docid": "05fa7ee8192fe61655bcadbc8cd32b51", "score": "0.43709877", "text": "def get(self, request, *args, **kwargs):\n import ast\n\n base_queryset = self.filter_queryset(self.get_queryset())\n interval = self.request.query_params.get('interval', '1d')\n per_host = ast.literal_eval(\n self.request.query_params.get('per_host', 'True'))\n\n data = LogData.ranged_log_agg(base_queryset, interval, per_host)\n serializer = self.serializer_class(data)\n\n return Response(serializer.data)", "title": "" }, { "docid": "9542bd3aaea91b23aa2702f048bb368a", "score": "0.4366135", "text": "def head(self, n=5):\n sql = f'SELECT * FROM data LIMIT {n}'\n response_data = self.fetch_query(sql=sql)\n try:\n gdf = gpd.GeoDataFrame(response_data)\n if 'geometry' in gdf:\n gdf = gdf.set_geometry('geometry')\n return gdf\n except:\n raise ValueError(f'Unable to get table {self.id}')", "title": "" }, { "docid": "9afeff8a56b428fc929f0f67ccb8ba7f", "score": "0.43390888", "text": "def _5qi(self):\n return self.__5qi", "title": "" }, { "docid": "9afeff8a56b428fc929f0f67ccb8ba7f", "score": "0.43390888", "text": "def _5qi(self):\n return self.__5qi", "title": "" }, { "docid": "ab9dfaa729d4430f4d5c8880fa2f2337", "score": "0.4336726", "text": "def GetOperation(self):\n\n gce_instance_client = self.project.GceApi().instances()\n request = gce_instance_client.get(\n instance=self.name, project=self.project.project_id, zone=self.zone)\n response = request.execute()\n return response", "title": "" }, { "docid": "a26907c6ddf9d6e806d397e2f7801b2d", "score": "0.43358657", "text": "def ip_range(self, start, end):\n return ip4_range(self.pg0.remote_ip4, start, end)", "title": "" }, { "docid": "5bc1a2c6061e60c0006fb84830e2d55f", "score": "0.43354812", "text": "def get_timeranges():\n\trequired_fields = {'source':'componentpublishers'}\n\tresponse = PreparedRequest('timeranges', required_fields).send()\n\n\treturn response", "title": "" }, { "docid": "ccc5438a8e4ab53b9d9603038d076918", "score": "0.43342665", "text": "def extract_snippet():\n\n inpath = '/lustre/pulsar/users/rprestag/1713+0747_global/raw/'\n rawfile = 'guppi_56465_J1713+0747_0006.0000.raw'\n g = GbtRaw(inpath + rawfile)\n data = g.extract(0,3)\n\n # Cedric idenfied radar intereference starting 24.5M samples\n # from the start of the GUPPI raw data file.\n start = 24500000\n end = start + 500000\n\n # channel 23 on web pages is 22 in disk file\n chan23 = (data[22,start:end,:])\n\n # save to a npy file\n np.save('chan23.npy', chan23)", "title": "" }, { "docid": "41f9b8e98108b3ef43b843ee7422d34d", "score": "0.43339393", "text": "def h5(self):\n return self._h5", "title": "" }, { "docid": "bd268d4976b21d10cd6db4f9c81755a7", "score": "0.43334383", "text": "def get_5g_info(self, test=False):\n theLog = \"Get 5G Info\"\n parseNode = f\".//{c.GET_5G_INFO}Response\"\n toParse = [\n 'NewEnable',\n 'NewSSIDBroadcast',\n 'NewStatus',\n 'NewSSID',\n 'NewRegion',\n 'NewChannel',\n 'NewWirelessMode',\n 'NewBasicEncryptionModes',\n 'NewWEPAuthType',\n 'NewWPAEncryptionModes',\n 'NewWLANMACAddress',\n ]\n\n theInfo = self._get(\n theLog, c.SERVICE_WLAN_CONFIGURATION, c.GET_5G_INFO,\n parseNode, toParse, test\n )\n\n return theInfo", "title": "" }, { "docid": "8adff419b83c8f1e1ecc6d15904f9616", "score": "0.43286598", "text": "def get_one_genome(self, params, token=None):\n\n callback_url = os.environ.get('SDK_CALLBACK_URL')\n if callback_url:\n print('fetching genome object using WsLargeDataIO')\n ws_large_data = WsLargeDataIO(callback_url)\n res = ws_large_data.get_objects(params)['data'][0]\n data = json.load(open(res['data_json_file']))\n else:\n print('fetching genome object using Workspace')\n ws_client = Workspace(self.ws_url, token=token)\n data = ws_client.get_objects2(params)[\"data\"][0][\"data\"]\n\n return data", "title": "" }, { "docid": "55c4af0cf28aa525256c939b7cff8d3b", "score": "0.4324199", "text": "def __call__(self, src, proxy = None, out = 5):\n try:\n data = self.queryTier0(src, proxy, out)\n gtnames = [str(di['global_tag']).replace(\"::All\", \"\") for di in data]\n return unique(gtnames)\n except TypeError, t:\n errStr = \"\"\"Cannot retrieve list of Global Tags used at Tier-0 from URL \\\"%s\\\"\"\"\" %(src,)\n if proxy:\n errStr += \"\"\" using proxy \\\"%s\\\"\"\"\" %(str(proxy),)\n errStr += \"\"\" with timeout \\\"%d\\\" since:\n\\t\\\"%s\\\"\"\"\" %(out, str(t))\n raise ValueError(errStr)", "title": "" }, { "docid": "fca4786a58a102a40ba51dcaa62ae199", "score": "0.4323879", "text": "def range(self) -> Optional['outputs.RouteSpecGrpcRouteMatchMetadataMatchRange']:\n return pulumi.get(self, \"range\")", "title": "" }, { "docid": "997aa09ec59d1b0134a487f3706c76a3", "score": "0.4322753", "text": "def get_range(self, scan_num, moment):\n dic = self.radial_records[self.scan_msgs[scan_num][0]][moment]\n ngates = dic['ngates']\n first_gate = dic['first_gate']\n gate_spacing = dic['gate_spacing']\n return np.arange(ngates) * gate_spacing + first_gate", "title": "" }, { "docid": "a563e2e03fabb63d98dc7d2a44bb2361", "score": "0.43185335", "text": "def get():\n self.s.poll()\n pos = self.s.actual_position\n temp = []\n axis = axis_max = axis_min = {}\n for i in self.axisInMachine:\n axis.update({\"%s\" % self.axis[i]: pos[i]})\n axis_max.update({\"%s\" % self.axis[i]: self.s.axis[i][\"max_position_limit\"]})\n axis_min.update({\"%s\" % self.axis[i]: self.s.axis[i][\"min_position_limit\"]})\n Rjson = {\"axis\": axis, \"axis_max\": axis_max, \"axis_min\": axis_min}\n return Rjson", "title": "" }, { "docid": "35d5ae5d61f2c9dbfb3b9c85767eac4c", "score": "0.4312333", "text": "def range(self) -> Optional['outputs.GatewayRouteSpecHttp2RouteMatchHeaderMatchRange']:\n return pulumi.get(self, \"range\")", "title": "" }, { "docid": "6257346c09d9077ad2d177aa48b4c03b", "score": "0.43115595", "text": "def _read_ogpwg_4(self, data: bytes, ndata: int):\n op2 = self.op2\n if op2.read_mode == 1:\n return ndata\n #print(' num_wide = %r' % self.num_wide)\n size = op2.size\n fmt_mo = mapfmt(b'36f', size)\n fmt_s = mapfmt(b'9f', size)\n fmt_mxyz = mapfmt(b'12f', size)\n fmt_iq = mapfmt(b'3f', size)\n\n MO = array(unpack(fmt_mo, data[:36*size]))\n MO = MO.reshape(6, 6)\n\n S = array(unpack(fmt_s, data[36*size:(36+9)*size]))\n S = S.reshape(3, 3)\n\n mxyz = array(unpack(fmt_mxyz, data[(36+9)*size:(36+9+12)*size]))\n mxyz = mxyz.reshape(3, 4)\n mass = mxyz[:, 0]\n cg = mxyz[:, 1:]\n\n IS = array(unpack(fmt_s, data[(36+9+12)*size:(36+9+12+9)*size]))\n IS = IS.reshape(3, 3)\n\n IQ = array(unpack(fmt_iq, data[(36+9+12+9)*size:(36+9+12+9+3)*size]))\n\n Q = array(unpack(fmt_s, data[(36+9+12+9+3)*size:(36+9+12+9+3+9)*size]))\n Q = Q.reshape(3, 3)\n\n #print(self.object_attributes())\n #print(self._count)\n #print(self.title)\n #print(self.subtitle)\n #print(self.label)\n #print(self.pval_step)\n #print(self.superelement_adaptivity_index)\n weight = GridPointWeight(\n op2.reference_point,\n MO, S, mass, cg, IS, IQ, Q,\n approach_code=op2.approach_code, table_code=op2.table_code,\n title=op2.title, subtitle=op2.subtitle, label=op2.label,\n superelement_adaptivity_index=op2.superelement_adaptivity_index,\n )\n str(weight)\n op2.grid_point_weight[op2.superelement_adaptivity_index] = weight\n #del self.reference_point\n return ndata", "title": "" }, { "docid": "1c390ef2c4986bda44edb1ea339cbbac", "score": "0.43074363", "text": "def _range(self):\n x, y = self.position\n x += 1\n y += 1 # correct to excel 1 based index\n # XW passes position tuples as row, column\n return self.sheet.i7e_sheet.range(y, x)", "title": "" }, { "docid": "dd5de00fc0e92dc117b275dca55cf895", "score": "0.4307173", "text": "def get_data(self, number_packets=None):\n logging.debug(\"getting data\")\n end_pt = self.device[0][(0, 0)][0]\n full_array = []\n # calculate how many packets of data to get from the amp device the usb_count param is\n # how many data points there are (+1 us for the 0xC000 sent at the end)\n # packet_size / 2 is because the data is converted to uint8 and minus 1 for 0 indexing\n # from the uint16 it is acquired in \"\"\"\n if not number_packets:\n number_packets = ((self.device_params.usb_count + 1) / (USB_IN_BYTE_SIZE / 2) - 1)\n\n logging.debug(\"get %d number of packets\", number_packets)\n count = 0\n running = True\n while number_packets + 1 > count and running:\n try:\n usb_input = self.device.read(end_pt.bEndpointAddress, USB_IN_BYTE_SIZE, 1000)\n _hold = convert_uint8_to_signed_int16(usb_input.tolist())\n full_array.extend(_hold)\n if TERMINATION_CODE in _hold:\n full_array = full_array[:full_array.index(TERMINATION_CODE)]\n logging.debug(\n \"got termination code at count: {0}, {1}\".format(count, len(full_array)))\n break\n count += 1\n except Exception as error:\n logging.debug(\"end of ENDPOINT\")\n logging.debug(error)\n running = False\n\n return full_array", "title": "" }, { "docid": "987e1b82979382fe4657227207196b4e", "score": "0.43033653", "text": "def grab_server(self):\n num_threads = 10\n\n ips = [i for i in xrange(2, 255)]\n ips.reverse()\n\n perfix_origin = self.cur_net_segment.net().strNormal()\n perfix = '.'.join(perfix_origin.split('.')[0:3]) + '.'\n\n server = None\n circle_num = 253/num_threads + int(253%num_threads!=0)\n for i in xrange(circle_num):\n start = i * num_threads\n end = start + num_threads\n\n pool = ThreadPool(num_threads=num_threads)\n for ip in ips[start:end]:\n pool.add_task(self.get_server, perfix + str(ip))\n pool.destroy()\n results = pool.show_results()\n for ip in results:\n if ip:\n server = ip\n break\n if server:\n break\n log.info(\"current server's ip we grab is: %s\" % server)\n return server", "title": "" }, { "docid": "4a278ef5341bb62c60c80f447f5132f0", "score": "0.43027142", "text": "def single_tagged_range(self) -> VlanLogicalMatchTopMatchSingleTaggedRange:\n return self._single_tagged_range", "title": "" }, { "docid": "845a285d641f7a70ec0fd8e3d7fef40e", "score": "0.4293334", "text": "def get_range(cls, message, patient):\n q = cls.all()\n q.filter('patient =', patient)\n q.filter('time_taken >=', message.start_time)\n q.filter('time_taken <=', message.end_time)\n q.order('time_taken')\n\n pdata_list = [ pdata.to_message() for pdata in q ]\n\n return PQuantDataListResponse(pdata_list=pdata_list)", "title": "" }, { "docid": "7d392190ac6041642007dc1880cfb41e", "score": "0.4284752", "text": "def get_sg_details():\n check = external_Id.get()\n if not check.strip():\n external_Id.set(\"Enter Valid string!!!\")\n else:\n obj = Dome9SG(sg_id=external_Id.get())\n sg_details_json = obj.get_sg_by_id()\n if \"Result\" in sg_details_json.keys():\n not_found()\n else:\n security_Group_Name.set(sg_details_json[\"securityGroupName\"])\n description.set(sg_details_json[\"description\"])\n vpc_Id.set(sg_details_json[\"vpcId\"])\n region_Id.set(sg_details_json[\"regionId\"])\n cloud_Account_Name.set(sg_details_json[\"cloudAccountName\"])\n AWS_AC_ID.set(obj.aws_dome9map[sg_details_json[\"cloudAccountId\"]])", "title": "" }, { "docid": "ce44ba9daa267abafc888f49cd9da126", "score": "0.42838007", "text": "def get(self, user_id):\n args = get_regions_reqparser.parse_args()\n\n if args[\"unit\"] == \"seconds\":\n from_unix = args[\"from\"] * 1_000_000_000\n to_unix = args[\"to\"] * 1_000_000_000\n elif args[\"unit\"] == \"milliseconds\":\n from_unix = args[\"from\"] * 1_000_000\n to_unix = args[\"to\"] * 1_000_000\n elif args[\"unit\"] == \"microseconds\":\n from_unix = args[\"from\"] * 1_000\n to_unix = args[\"to\"] * 1_000\n elif args[\"unit\"] == \"nanoseconds\":\n from_unix = args[\"from\"]\n to_unix = args[\"to\"]\n else:\n api.abort(\n 400,\n \"Unit needs to be either seconds, miliseconds, microseconds or nanoseconds.\",\n )\n\n # InfluxQL does not support substrings in quries...\n query = f\"SELECT * FROM user_locations WHERE user_id = '{user_id}' AND time >= {from_unix} AND time <= {to_unix};\"\n try:\n result = db.query(query)\n except Exception as e:\n logger.warning(f\"Error while querying Influx:\\n {str(e)}\")\n api.abort(500, \"Error while querying Influx.\")\n else:\n result_regions = [\n p[\"geohash\"][0] for p in result.get_points(measurement=\"user_locations\")\n ]\n return result_regions, 200", "title": "" }, { "docid": "5975b1c947d9e11684674c8e2cca6464", "score": "0.4283298", "text": "def getEntries(request,range):", "title": "" }, { "docid": "fdc40c23b83295b3d55063f4e7c40679", "score": "0.42824876", "text": "def range(self):\n\n raise NotImplementedError", "title": "" }, { "docid": "cd5b69cf2f40525618824c5e85aa6e9d", "score": "0.42788824", "text": "def secondary_ip_range(self) -> str:\n return pulumi.get(self, \"secondary_ip_range\")", "title": "" }, { "docid": "9c69f412e92665968955fba0abe5e402", "score": "0.4268382", "text": "async def get(self, name):\n reg = REGISTERS[name]\n response = await self.read_register(self.client, reg.register, reg.length)\n\n if reg.type == \"str\":\n result = response.decode(\"utf-8\").strip(\"\\0\")\n\n elif reg.type == \"u16\" and reg.unit == \"status_enum\":\n result = DEVICE_STATUS_DEFINITIONS[response.hex()]\n\n elif reg.type == \"u16\" and reg.unit == \"grid_enum\":\n tmp = int.from_bytes(response, byteorder=\"big\")\n result = GRID_CODES[tmp]\n\n elif reg.type == \"u32\" and reg.unit == \"epoch\":\n tmp = int.from_bytes(response, byteorder=\"big\")\n if self._time_offset is None:\n self._time_offset = self.get(\"time_zone\").value\n tmp2 = datetime.utcfromtimestamp(tmp - 60 * self._time_offset)\n # don't use local time information and use UTC time\n # which we got from systemtime - time zone offset.\n # not yet sure about the is_dst setting\n result = pytz.utc.normalize(pytz.utc.localize(tmp2, is_dst=True))\n\n elif reg.type == \"u16\" or reg.type == \"u32\":\n tmp = int.from_bytes(response, byteorder=\"big\")\n if reg.gain == 1:\n result = tmp\n else:\n result = tmp / reg.gain\n\n elif reg.type == \"i16\":\n tmp = int.from_bytes(response, byteorder=\"big\")\n if (tmp & 0x8000) == 0x8000:\n # result is actually negative\n tmp = -((tmp ^ 0xFFFF) + 1)\n if reg.gain == 1:\n result = tmp\n else:\n result = tmp / reg.gain\n\n elif reg.type == \"i32\":\n tmp = int.from_bytes(response, byteorder=\"big\")\n if (tmp & 0x80000000) == 0x80000000:\n # result is actually negative\n tmp = -((tmp ^ 0xFFFFFFFF) + 1)\n if reg.gain == 1:\n result = tmp\n else:\n result = tmp / reg.gain\n\n elif reg.type == \"alarm_bitfield16\":\n code = int.from_bytes(response, byteorder=\"big\")\n result = []\n alarm_codes = ALARM_CODES[name]\n for key in alarm_codes:\n if key & code:\n result.append(alarm_codes[key])\n\n elif reg.type == \"alarm_bitfield16_raw\":\n code = int.from_bytes(response, byteorder=\"big\")\n result = str(code)\n\n elif reg.type == \"state_bitfield16\":\n code = int.from_bytes(response, byteorder=\"big\")\n result = []\n for key in STATE_CODES_1:\n if key & code:\n result.append(STATE_CODES_1[key])\n\n elif reg.type == \"state_opt_bitfield16\":\n code = int.from_bytes(response, byteorder=\"big\")\n result = []\n for key in STATE_CODES_2:\n bit = key & code\n if bit:\n result.append(STATE_CODES_2[key][1])\n else:\n result.append(STATE_CODES_2[key][0])\n\n elif reg.type == \"state_opt_bitfield32\":\n code = int.from_bytes(response, byteorder=\"big\")\n result = []\n for key in STATE_CODES_3:\n bit = key & code\n if bit:\n result.append(STATE_CODES_3[key][1])\n else:\n result.append(STATE_CODES_3[key][0])\n\n else:\n result = int.from_bytes(response, byteorder=\"big\")\n\n return Result(result, reg.unit)", "title": "" }, { "docid": "f71ba891c9b556037f58e6b3cc1af8fe", "score": "0.42675066", "text": "def get(self, address):\n pass", "title": "" }, { "docid": "4943d3cb4b5c93dc51eb0fbdb5c330c7", "score": "0.4265662", "text": "def eGetRawS(Handle, pIOType, Channel, pValue, x1):\n pass", "title": "" }, { "docid": "a09c5d10f2a25c06278666112f0188c6", "score": "0.426257", "text": "def global_range(self):\n raise NotImplementedError", "title": "" }, { "docid": "a3f851cd990db3a3cf32753b5b3f672b", "score": "0.42579716", "text": "def GetOperation(self):\n\n gce_snapshot_client = self.project.GceApi().snapshots()\n request = gce_snapshot_client.get(\n snapshot=self.name, project=self.project.project_id)\n response = request.execute()\n return response", "title": "" }, { "docid": "73223ad9118a81932ca708050f4a60dc", "score": "0.42578402", "text": "async def fetch_async(\n self,\n end: Union[str, object] = values.unset,\n start: Union[str, object] = values.unset,\n ) -> UsageInstance:\n\n data = values.of(\n {\n \"End\": end,\n \"Start\": start,\n }\n )\n\n payload = await self._version.fetch_async(\n method=\"GET\", uri=self._uri, params=data\n )\n\n return UsageInstance(\n self._version,\n payload,\n sim_sid=self._solution[\"sim_sid\"],\n )", "title": "" }, { "docid": "e684be212cb48b40a93c0a6d98022733", "score": "0.42486933", "text": "async def fetch_async(\n self,\n end: Union[str, object] = values.unset,\n start: Union[str, object] = values.unset,\n ) -> \"UsageInstance\":\n return await self._proxy.fetch_async(\n end=end,\n start=start,\n )", "title": "" }, { "docid": "7a6f515834ca5a08e5728f3c3117d4f6", "score": "0.42476377", "text": "def range(self) -> Optional['outputs.RouteSpecHttp2RouteMatchHeaderMatchRange']:\n return pulumi.get(self, \"range\")", "title": "" }, { "docid": "b8efd7a778df0b742cb240146c940d29", "score": "0.42390168", "text": "def __call__(self):\n vmin, vmax = self.axis.get_view_interval()\n if vmax<vmin:\n vmin, vmax = vmax, vmin\n gran_name, gran_size, tick_count = get_granularity(vmax, self.mintickcount, self.maxtickcount, just_get_granularity = False, allow_non_sql_granularities = True)\n return range(int(vmin), int(vmax), int(gran_size))", "title": "" }, { "docid": "f81df2366f5af793ae794b64486851cd", "score": "0.42343512", "text": "def chunks(self):\n chunks = self.h5.attrs.get('chunks', None)\n if chunks is None:\n chunks = self.h5.chunks['latitude']\n\n return chunks", "title": "" }, { "docid": "cb07d5ab4a8f9cca79e5ae04b8dc0f4c", "score": "0.42337114", "text": "def get(self):\n\n return self.conn.network.find_subnet(self.name)", "title": "" }, { "docid": "0cac36cef68acf7659bf8165bad0238b", "score": "0.4232916", "text": "def get_range(self):\n return [self.start_time, self.end_time]", "title": "" }, { "docid": "e0d98248e3239118eb621aea7a997279", "score": "0.423081", "text": "def getResponse(self, req):\n ack = yield self.request(req)\n if (not ack):\n #raise Error('bad request to gauge')\n returnValue('bad request to gauge')\n else:\n \n nb_enq = yield self.writeENQ()\n resp = yield self._read(self.getNumBytes(req))\n if resp == '':\n returnValue('gauge off or not responding')\n else:\n resp = self.strip(resp)\n returnValue(resp)", "title": "" }, { "docid": "d4d91ecac64472b407a9d8030212d77a", "score": "0.42306644", "text": "def get_observation(self, offering, properties, time_range):\n return get_observation(self._get_api_url(),\n self._get_procedure(offering),\n properties, time_range)", "title": "" }, { "docid": "2ee2bdb5072a8815d8f578f5cc24d8d9", "score": "0.42237267", "text": "def getRange(self, param):\n\t\tif param == \"Method\":\n\t\t\treturn (\"Mean\",)", "title": "" }, { "docid": "56166089e5a85943955680f0d9a13768", "score": "0.4216047", "text": "def get_instance_info(self,instance_id,start_time):\n self.__reconnect_db__();\n sql_cmd='''select cpu_usage,mem_free,mem_max,nic_in,nic_out,disk_read,disk_write,monitor_time uuid from vm_monitor where instance_id='%s' and monitor_time>='%s' '''%(instance_id,start_time);\n #print \" in get instance info by id sql cmd=%s\"%sql_cmd;\n self.cursor.execute(sql_cmd);\n return self.cursor.fetchall();", "title": "" }, { "docid": "197d86f314653dc7b3e5e398f537f715", "score": "0.42031607", "text": "def _get_range_header(self):\n range_header = self.headers.getheader(\"Range\")\n if range_header is None:\n return (None, None)\n if not range_header.startswith(\"bytes=\"):\n print \"Not implemented: parsing header Range: %s\" % range_header\n return (None, None)\n regex = re.compile(r\"^bytes=(\\d+)\\-(\\d+)?\")\n rangething = regex.search(range_header)\n if rangething:\n from_val = int(rangething.group(1))\n if rangething.group(2) is not None:\n return (from_val, int(rangething.group(2)))\n else:\n return (from_val, None)\n else:\n print 'CANNOT PARSE RANGE HEADER:', range_header\n return (None, None)", "title": "" }, { "docid": "abd824b2f06389f98d0e3754994a2dcf", "score": "0.41967982", "text": "def ptp_get_one(self):", "title": "" } ]
d3abaaaadfb5d3eb00b3d6973bdd0eb0
Redirect the user to authorize the client, and get them verification code
[ { "docid": "666ef409caa8b159baa1ccf8098a3d30", "score": "0.65969336", "text": "def get_user_authorization(request_token):\n authorize_url = AUTHORIZE_URL\n authorize_url = authorize_url.format(request_token=request_token)\n print 'Please go here and authorize: ' + authorize_url\n return raw_input('Please input the verifier: ')", "title": "" } ]
[ { "docid": "4e8f86e0e9a605919054f72914dd03a3", "score": "0.78330654", "text": "def authorize(req, resp):\n api.redirect(resp, location=authorize_url())", "title": "" }, { "docid": "3f7f60126b7a2fec15682ebc60a264e2", "score": "0.7126038", "text": "def _authorization_code_flow(self):\n options = {\n 'client_id': self.options.get('client_id'),\n 'response_type': 'code',\n 'redirect_uri': self._redirect_uri()\n }\n url = '%s%s/connect' % (self.scheme, self.host)\n self._authorize_url = '%s?%s' % (url, urlencode(options))", "title": "" }, { "docid": "aef7e82b787bd5e18f00b9f616cc9955", "score": "0.7029746", "text": "def oauth2callback():\n app.logger.debug(\"Entering oauth2callback\")\n flow = client.flow_from_clientsecrets(\n CLIENT_SECRET_FILE,\n scope= SCOPES,\n redirect_uri=flask.url_for('oauth2callback', _external=True))\n app.logger.debug(\"Got flow\")\n if 'code' not in flask.request.args:\n app.logger.debug(\"Code not in flask.request.args\")\n auth_uri = flow.step1_get_authorize_url()\n return flask.redirect(auth_uri)\n else:\n app.logger.debug(\"Code was in flask.request.args\")\n auth_code = flask.request.args.get('code')\n credentials = flow.step2_exchange(auth_code)\n flask.session['credentials'] = credentials.to_json()\n app.logger.debug(\"Got credentials\")\n return flask.redirect(flask.url_for('respond', meeting=flask.session['meeting'], ID=flask.session['ID']))", "title": "" }, { "docid": "8e83d132bc3d2e7929fbe6f05c7e454d", "score": "0.7009494", "text": "def auth_user():\n state = str(datetime.datetime.now().timestamp()) if 'state' not in session else session['state']\n tenant = env('tenant_id')\n client_id = env('client_id')\n client_secret = env('client_secret')\n redir_url = env('redirect_url')\n\n if not r.args.get('code'):\n session['state'] = state\n return redirect(get_authorize_url(tenant, client_id, state, redir_url))\n else:\n code = r.args.get('code')\n returned_state = r.args.get('state')\n if state != returned_state:\n print(state)\n print(returned_state)\n raise SystemError(\"Response state doesn't match request state\")\n\n token = get_token_with_auth_code(tenant, client_id, client_secret, code, redir_url)\n if 'access_token' in token:\n add_token_to_cache(client_id, tenant, token)\n return Response(json.dumps({'status': 'ok', 'message': 'token acquired'}), content_type=CT)\n else:\n raise ValueError(\"token response malformed\")", "title": "" }, { "docid": "5042d3e56ca9b294196f75a9541d792c", "score": "0.68857896", "text": "def authorized():\n code = request.args['code']\n token_resp = strava_client.exchange_code_for_token(\n client_id=app.config['STRAVA_CLIENT_ID'],\n client_secret=app.config['STRAVA_CLIENT_SECRET'],\n code=code\n )\n strava_client.access_token = token_resp[\"access_token\"]\n strava_client.refresh_token = token_resp[\"refresh_token\"]\n athlete = strava_client.get_athlete()\n\n # need something to store the athlete or access token\n # to allow us to get more information\n # profile = check_profile(athlete, access_token)\n # flask_login.login_user(profile)\n return redirect(url_for('main.logged_in', profile_id=athlete.id))", "title": "" }, { "docid": "3549ad0048c01aded43ecd69e249af12", "score": "0.682054", "text": "def auth_view(request):\n\n FLOW.params['state'] = xsrfutil.generate_token(settings.SECRET_KEY,\n request.user)\n\n # If we lose a refresh token at some point, this will allow a re-auth to give us one.\n FLOW.params['approval_prompt'] = 'force'\n\n authorize_url = FLOW.step1_get_authorize_url()\n return HttpResponseRedirect(authorize_url)", "title": "" }, { "docid": "13890415f7249011e9798349c73ddf2a", "score": "0.68102837", "text": "def confirm_redirect_uri(self, client_id, code, redirect_uri, client, request, *args, **kwargs):\n auth_code = AuthorizationCode.objects.get(application=client, code=code)\n return auth_code.redirect_uri_allowed(redirect_uri)", "title": "" }, { "docid": "ecc160a372b9e59cac3004d554f4389d", "score": "0.6762997", "text": "def authorized():\n if str(flask.session['state']) != str(flask.request.args['state']):\n raise Exception('state returned to redirect URL does not match!')\n response = MSGRAPH.authorized_response()\n flask.session['access_token'] = response['access_token']\n return flask.redirect('/graphcall')", "title": "" }, { "docid": "f67640816e5e88e4828e59314bee025d", "score": "0.6759112", "text": "def callback():\n # If we're coming back from Globus Auth in an error state, the error\n # will be in the \"error\" query string parameter.\n if 'error' in request.args:\n flash(\"You could not be logged into the portal: \" +\n request.args.get('error_description', request.args['error']))\n return redirect(url_for('home'))\n\n # Set up our Globus Auth/OAuth2 state\n redirect_uri = url_for('callback', _external=True)\n\n client = _load_dlhub_client()\n client.oauth2_start_flow(redirect_uri, refresh_tokens=False)\n\n # If there's no \"code\" query string parameter, we're in this route\n # starting a Globus Auth login flow.\n if 'code' not in request.args:\n # TODO (lw): Should this be used?\n # additional_authorize_params = (\n # {'signup': 1} if request.args.get('signup') else {})\n\n auth_uri = client.oauth2_get_authorize_url()\n return redirect(auth_uri)\n else:\n # If we do have a \"code\" param, we're coming back from Globus Auth\n # and can start the process of exchanging an auth code for a token.\n code = request.args.get('code')\n tokens = client.oauth2_exchange_code_for_tokens(code)\n # id_token = tokens.decode_id_token(client)\n session.update(\n tokens=tokens.by_resource_server,\n is_authenticated=True\n )\n\n return redirect(url_for('home'))", "title": "" }, { "docid": "88b9e81b146cc3c70227c1bfc29f61aa", "score": "0.6745198", "text": "def authorized():\n MSGRAPH.redirect_uri_handler()", "title": "" }, { "docid": "c6568b497a7bf3c1c206e5dcb86843c2", "score": "0.6729471", "text": "def redirectToYellowAntAuthenticationPage(request):\n # Generate a unique ID to identify the user when YA returns an oauth2 code\n user = User.objects.get(id=request.user.id)\n state = str(uuid.uuid4())\n\n # Save the relation between user and state so that we can identify the user\n # when YA returns the oauth2 code\n YellowAntRedirectState.objects.create(user=user.id, state=state)\n\n # Redirect the application user to the YA authentication page.\n # Note that we are passing state, this app's client id,\n # oauth response type as code, and the url to return the oauth2 code at.\n return HttpResponseRedirect(\"{}?state={}&client_id={}&response_type=code&redirect_url={}\".format\n (settings.YELLOWANT_OAUTH_URL, state, settings.YELLOWANT_CLIENT_ID,\n settings.YELLOWANT_REDIRECT_URL))", "title": "" }, { "docid": "7c134b958322b2858ee4fe363e0659dc", "score": "0.67254525", "text": "def authorize(*args, **kwargs):\n if request.method == 'GET':\n client_id = kwargs.get('client_id')\n redirect_uri = kwargs.get('redirect_uri')\n client = local.model.get_oauth2_client(client_id)\n return template(\"oauth2_authorize\", client=client, redirect_uri=redirect_uri)\n elif request.method == 'POST':\n # Return True back to the authorize_handler wrapper iff confirmed.\n confirm = request.forms.get('confirm', 'no')\n return confirm == 'yes'", "title": "" }, { "docid": "f8980b34258a84b4c7d7aac7d4475b60", "score": "0.6664375", "text": "def get(self):\n client_id = self.get_argument(\"client_id\")\n response_type = self.get_argument(\"response_type\")\n redirect_uri = self.get_argument(\"redirect_uri\")\n scope = self.get_argument(\"scope\")\n\n # before check if the client pass client_id, redirect_url and response_type\n # we accept only token \"Implicit Grant Flow\"\n # if all is ok we need to check if the client_id and the redirect_uri are correct\n # if not we return a 403 to the client\n if response_type in (\"token\",\"code\"):\n try:\n client_check = Client()\n exist = client_check.get(client_id=client_id)\n #check the redirect_uri parameter\n if exist['redirect_uri'] != redirect_uri:\n #have an error, return a 403\n raise tornado.web.HTTPError(403,\"redirect uri problem\")\n # redirect to login page\n self.redirect((\"/auth/login?client_id=%s&response_type=%s&redirect_uri=%s&scope=%s\")%(client_id,\n response_type,\n urllib.quote_plus(redirect_uri),\n scope))\n except ObjectDoesNotExist, e:\n raise tornado.web.HTTPError(403)\n else:\n raise tornado.web.HTTPError(400,\"The accepted values for response type are token or code\")", "title": "" }, { "docid": "473cbd2bff6f8140d1549e725ae2a1de", "score": "0.66054255", "text": "def requestAuthCode(self):\n pass", "title": "" }, { "docid": "5c9180bf1e83420a200c325ede0f13f9", "score": "0.65484095", "text": "def authorize_url(self):\n \"\"\" use in the template \"\"\"\n url = \"%s?client_id=%s&response_type=code&redirect_uri=%s\" %(\n self.login_url, self.client_id, quote(self.redirect_uri)\n )\n\n if getattr(self, 'scope', None):\n url = '%s&scope=%s' %(url, '+'.join(self.scope))\n\n return url", "title": "" }, { "docid": "9388941290815f4c3f23b04e427cb27b", "score": "0.65472054", "text": "def try_vk_auth():\n vk_auth_page = url_for('auth.vk_auth', _external=True)\n req_url = 'https://oauth.vk.com/authorize?client_id=' + vk_client_id + \\\n '&scope=email&redirect_uri=' + vk_auth_page + \\\n '&response_type=code&v=5.52'\n return redirect(req_url)", "title": "" }, { "docid": "f106a774c5184bea00c16283d4c0f75f", "score": "0.6543225", "text": "def authorize_redirect(self, extended_permissions, callback_uri=None,\r\n cancel_uri=None, callback=None):\r\n return self.authenticate_redirect(callback_uri, cancel_uri,\r\n extended_permissions,\r\n callback=callback)", "title": "" }, { "docid": "f106a774c5184bea00c16283d4c0f75f", "score": "0.6543225", "text": "def authorize_redirect(self, extended_permissions, callback_uri=None,\r\n cancel_uri=None, callback=None):\r\n return self.authenticate_redirect(callback_uri, cancel_uri,\r\n extended_permissions,\r\n callback=callback)", "title": "" }, { "docid": "d78a97b74bdc24fc1ba3765582257bd8", "score": "0.6517915", "text": "def authorize():\n flow = google_auth_oauthlib.flow.Flow.from_client_secrets_file(\n CLIENT_SECRETS_FILE, scopes=SCOPES)\n flow.redirect_uri = flask.url_for('oauth2callback', _external=True)\n authorization_url, state = flow.authorization_url(\n # This parameter enables offline access which gives your APPlication\n # both an access and refresh token.\n access_type='offline',\n # This parameter enables incremental auth.\n include_granted_scopes='true')\n\n # Store the state in the session so that the callback can verify that\n # the authorization server response.\n flask.session['state'] = state\n\n return flask.redirect(authorization_url)", "title": "" }, { "docid": "50542489ae1d8de9b73410622a7c2f9a", "score": "0.64845127", "text": "def _get_code(self, req):\n args = req.args\n error = args.getfirst('error')\n state = args.getfirst('state', '')\n code = args.getfirst('code')\n expected_state = req.session.pop(self.STATE_SKEY, None)\n\n if error is not None:\n raise AuthenticationFailed(error)\n elif not expected_state or strings_differ(state, expected_state):\n raise AuthenticationError(\"incorrect 'state' in redirect\")\n elif not code:\n raise AuthenticationError(\"no 'code' returned in redirect\")\n return code", "title": "" }, { "docid": "1bbf805716c3681170d1ade8ecd28f4c", "score": "0.64833176", "text": "def test_oauth_authorize_success(user, client, testapp):\n # Goes to authorize endpoint\n res = testapp.get('/oauth/authorize',\n params={'client_id': client.client_id,\n 'response_type': 'code',\n 'redirect_uri': client.default_redirect_uri,\n 'scope': ' '.join(client.default_scopes)})\n # Redirected to homepage\n res = res.follow()\n # Fills out login form\n login_form = res.forms['loginForm']\n login_form['username'] = user.email\n login_form['password'] = 'myPrecious'\n # Submits\n res = login_form.submit().follow()\n\n # Sees authorization confirm form\n authorize_form = res.forms['authorizeForm']\n assert authorize_form['confirm'].value == 'y'\n\n # Submits confirmation and is redirected to '<redirect_uri>/?code=<grant.code>'.\n res = authorize_form.submit().follow()\n grant = Grant.query.filter_by(client_id=client.client_id, user_id=user.id).first()\n assert grant is not None\n assert res.status_code == 308\n assert res.location == client.default_redirect_uri + '/?code={}'.format(grant.code)", "title": "" }, { "docid": "fe058c1c8b14f3b3c0f43099c7856c6c", "score": "0.6473238", "text": "def authorize(self, callback=None):\n assert callback is not None, 'Callback is required OAuth2'\n # Since we need the\n # callback for the access_token_url we need to keep it in the\n # session.\n params = dict(self.authorize_params)\n params['redirect_uri'] = callback\n params['client_id'] = self.client_id\n session[self.name + '_oauthredir'] = callback\n url = add_query(self.authorize_url, params)\n return redirect(url)", "title": "" }, { "docid": "b47d762146cb6afb23d2e2ae8e27a435", "score": "0.6465591", "text": "def get(self):\n user, credentials = _GetCredentials()\n\n if not credentials or credentials.invalid:\n _RedirectForOAuth(self, user)\n else:\n self.redirect(\"/\")", "title": "" }, { "docid": "6632d5eef14a9458e1abcfc38ed752a6", "score": "0.6445556", "text": "def authorize():\n sender = bottle.request['REMOTE_ADDR']\n if not sender.startswith('10.'):\n logging.info('Untrusted %s requested %s with data %s',\n sender, bottle.request.fullpath, bottle.request.forms.items())\n bottle.abort(401, 'Your request does not include the right authorization.')", "title": "" }, { "docid": "06972b1520146cd5688e7838bc7c9f87", "score": "0.6425005", "text": "def callback(request):\n try:\n user = authenticate(request=request)\n login(request, user)\n next_url = request.GET.get('next', settings.LOGIN_REDIRECT_URL) # redirects to /protected/\n return redirect(next_url)\n except Exception as err:\n logger.exception(\"An error occurred while processing OAuth2 \"\n \"callback: {}\".format(request.build_absolute_uri()))\n raise err", "title": "" }, { "docid": "f4f8536430743ccd6a243f3869ef14e9", "score": "0.6415011", "text": "def oauth_authorize(provider):\n if not current_user.is_anonymous:\n return redirect(url_for('home'))\n oauth = OAuthSignIn.get_provider(provider)\n return oauth.authorize()", "title": "" }, { "docid": "095951d6d8b5e50f27123804765f281a", "score": "0.6405889", "text": "def oauth_authorize():\n return osm.authorize(callback=url_for('oauth_authorized',\n next=request.args.get('next') or request.referrer or None))", "title": "" }, { "docid": "7094e82b37ac368c9624e906dbbfe4d6", "score": "0.6382216", "text": "def unauthorized():\n return redirect(get_auth_flow().start())", "title": "" }, { "docid": "e2a42fa370cd350a8956e6e714b547f2", "score": "0.6381336", "text": "def get(self, request):\n from aiohttp import web\n from oauthlib.oauth2 import MismatchingStateError\n from oauthlib.oauth2 import InsecureTransportError\n\n hass = request.app['hass']\n response = web.HTTPFound('/')\n\n try:\n code = request.query.get('code')\n hass.data[DOMAIN][API].request_token(code=code)\n hass.async_add_job(setup, hass, self.config)\n hass.components.persistent_notification.dismiss(NOTIFICATION_CB_ID)\n hass.components.persistent_notification.create(\n \"Somfy has been successfully authorized!\",\n title=NOTIFICATION_TITLE,\n notification_id=NOTIFICATION_CB_ID\n )\n except MismatchingStateError:\n _LOGGER.error(\"OAuth state not equal in request and response.\",\n exc_info=True)\n except InsecureTransportError:\n _LOGGER.error(\"Somfy redirect URI %s is insecure.\", request.url,\n exc_info=True)\n\n return response", "title": "" }, { "docid": "37ef5295412f2726ef8b663136097683", "score": "0.6362885", "text": "def authorize(request):\n # redirect with the authorization codes from drchrono\n code = request.GET.get(\"code\", \"\")\n\n if not code:\n messages.error(request, 'Please Authorize this Application with drchrono.')\n\n params = {\"redirect\": \"http%3A//127.0.0.1%3A8000/medications/authorize\",\n \"client_id\": os.environ[\"DRCHRONO_MEDS_CLIENT_ID\"],\n \"scope\": \"user:read patients:read patients:write calendar:read calendar:write clinical:read clinical:write\"}\n\n return render(request, \"medications/index.html\", params)\n\n tokens = api.get_tokens(code)\n # get doctor id\n doc = utils.get_doctor(tokens)\n request.session['user'] = doc.doc_id\n # redirect to patient listing page\n return redirect(\"patients/\")", "title": "" }, { "docid": "5cb304e34011b48ae429442cd65801ab", "score": "0.63555956", "text": "def process(self, request, response, environ):\n data = self.authorize(request, response, environ,\n self.scope_handler.scopes)\n\n if isinstance(data, Response):\n return data\n\n code = self.token_generator.generate()\n expires = int(time.time()) + self.token_expiration\n\n auth_code = AuthorizationCode(client_id=self.client.identifier,\n code=code, expires_at=expires,\n redirect_uri=self.client.redirect_uri,\n scopes=self.scope_handler.scopes,\n data=data[0], user_id=data[1])\n\n self.auth_code_store.save_code(auth_code)\n\n response.add_header(\"Location\", self._generate_location(code))\n response.body = \"\"\n response.status_code = 302\n\n return response", "title": "" }, { "docid": "254d22dddda9967a93cd91eff414e000", "score": "0.63427263", "text": "def authenticate_redirect(self, callback_uri=None):\n http = httpclient.AsyncHTTPClient()\n http.fetch(self._oauth_request_token_url(callback_uri=callback_uri), self.async_callback(\n self._on_request_token, self._OAUTH_AUTHENTICATE_URL, None))", "title": "" }, { "docid": "4f62ed2ca495de9753155ab1eff08538", "score": "0.6330066", "text": "def authorize(secrets_json):\n flow = flow_from_clientsecrets(secrets_json,\n scope='https://www.googleapis.com/auth/webmasters.readonly',\n redirect_uri='urn:ietf:wg:oauth:2.0:oob')\n # Create an httplib2.Http object and authorize it with our credentials\n authorize_url = flow.step1_get_authorize_url()\n print('Go to the following link in your browser: ' + authorize_url)\n code = input('Enter verification code: ').strip()\n credentials = flow.step2_exchange(code)\n http = httplib2.Http(cache=\"cache\")\n http = credentials.authorize(http)\n return http", "title": "" }, { "docid": "1ae06989de60e5336fd94a18f57383c2", "score": "0.6310487", "text": "def bunq_oauth_reauthorize():\n oauthdata = storage.get_value(\"bunq2IFTTT\", \"bunq_oauth\")\n storage.store_large(\"bunq2IFTTT\", \"bunq_oauth_new\", oauthdata)\n redirect_url = request.url_root + \"auth\"\n url = \"https://oauth.bunq.com/auth?response_type=code\"\\\n \"&client_id=\" + oauthdata[\"client_id\"] + \\\n \"&redirect_uri=\" + redirect_url\n return render_template(\"message.html\", msgtype=\"primary\", msg=\\\n \"Make sure the following URL is included as a redirect url:\"\\\n \"<br><br><b>\" + redirect_url + \"</b><br><br>\"\\\n 'Then click <a href=\"' + url + '\">this link</a>')", "title": "" }, { "docid": "94fe3b316785af6f8ba76bd67050366c", "score": "0.6310456", "text": "def oauth_authorize_view(request, do_redirect=_do_oauth_redirect):\n \n return do_redirect(request, False)", "title": "" }, { "docid": "8197346291c2a8c14dbc606b2cf9f5bf", "score": "0.6304883", "text": "def handle_callback():\n global TOKENS\n\n try:\n TOKENS[\"user_token\"] = get_user_token(flask.request.args.get(\"code\"))\n return flask.redirect(\"/\")\n except NotAuthorizedException:\n return 'Access was not granted or authorization failed', 403\n except:\n raise", "title": "" }, { "docid": "146521a9418f76ec5c6f0e8f6c6f4057", "score": "0.62939245", "text": "def test_get_auth_code(self):\n query_string = [('grant_type', 'grant_type_example'),\n ('client_id', 'client_id_example'),\n ('redirect_uri', 'redirect_uri_example')]\n response = self.client.open(\n '//IntelligentAgent/oauth20/authorize',\n method='GET',\n content_type='application/json',\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "title": "" }, { "docid": "db9aa3adef089696a1a7f5c5896d983d", "score": "0.6284529", "text": "def dispatch(self, request):\n action = request.GET.get('action', AuthAction.AUTHENTICATE)\n SocialLogin.stash_state(request)\n client = self.get_client(request, action=action)\n return HttpResponseRedirect(client.get_login_url())", "title": "" }, { "docid": "36eff264e9ef5588651c7a0258bf9694", "score": "0.62798256", "text": "def oauth2callback():\n app.logger.debug(\"Entering oauth2callback\")\n flow = client.flow_from_clientsecrets(\n CLIENT_SECRET_FILE,\n scope=SCOPES,\n redirect_uri=flask.url_for('oauth2callback', _external=True))\n # Note we are *not* redirecting above. We are noting *where*\n # we will redirect to, which is this function.\n\n # The *second* time we enter here, it's a callback\n # with 'code' set in the URL parameter. If we don't\n # see that, it must be the first time through, so we\n # need to do step 1.\n app.logger.debug(\"Got flow\")\n if 'code' not in flask.request.args:\n app.logger.debug(\"Code not in flask.request.args\")\n auth_uri = flow.step1_get_authorize_url()\n return flask.redirect(auth_uri)\n # This will redirect back here, but the second time through\n # we'll have the 'code' parameter set\n else:\n # It's the second time through ... we can tell because\n # we got the 'code' argument in the URL.\n app.logger.debug(\"Code was in flask.request.args\")\n auth_code = flask.request.args.get('code')\n credentials = flow.step2_exchange(auth_code)\n flask.session['credentials'] = credentials.to_json()\n # Now I can build the service and execute the query,\n # but for the moment I'll just log it and go back to\n # the main screen\n app.logger.debug(\"Got credentials\")\n return flask.redirect(flask.url_for('join', meetcode=flask.session['meetcode']))", "title": "" }, { "docid": "59d11864862d0e68f5d89cb2ef987047", "score": "0.62693596", "text": "def authorized_handler(self, request):\n if 'code' in request.args:\n data = self.handle_oauth2_response(request.args.get('code'))\n self.free_request_token()\n return data\n else:\n return None", "title": "" }, { "docid": "7163871433ebea4823ef4954ad799bb2", "score": "0.6256175", "text": "def oidcCallback():\n if app.oidcClient is None:\n raise exceptions.NotImplementedException()\n response = dict(flask.request.args.iteritems(multi=True))\n aresp = app.oidcClient.parse_response(\n message.AuthorizationResponse,\n info=response,\n sformat='dict')\n sessState = flask.session.get('state')\n respState = aresp['state']\n if (not isinstance(aresp, message.AuthorizationResponse) or\n respState != sessState):\n raise exceptions.NotAuthenticatedException()\n\n args = {\n \"code\": aresp['code'],\n \"redirect_uri\": app.oidcClient.redirect_uris[0],\n \"client_id\": app.oidcClient.client_id,\n \"client_secret\": app.oidcClient.client_secret\n }\n atr = app.oidcClient.do_access_token_request(\n scope=\"openid\",\n state=respState,\n request_args=args)\n\n if not isinstance(atr, message.AccessTokenResponse):\n raise exceptions.NotAuthenticatedException()\n\n atrDict = atr.to_dict()\n if flask.session.get('nonce') != atrDict['id_token']['nonce']:\n raise exceptions.NotAuthenticatedException()\n key = oic.oauth2.rndstr(SECRET_KEY_LENGTH)\n flask.session['key'] = key\n app.tokenMap[key] = aresp[\"code\"], respState, atrDict\n # flask.url_for is broken. It relies on SERVER_NAME for both name\n # and port, and defaults to 'localhost' if not found. Therefore\n # we need to fix the returned url\n indexUrl = flask.url_for('index', _external=True)\n indexParts = list(urlparse.urlparse(indexUrl))\n if ':' not in indexParts[1]:\n indexParts[1] = '{}:{}'.format(socket.gethostname(), app.myPort)\n indexUrl = urlparse.urlunparse(indexParts)\n response = flask.redirect(indexUrl)\n return response", "title": "" }, { "docid": "c79dc3bbc95fc4b5b7808dc11dd33084", "score": "0.6246772", "text": "def authorize(event, context):\n pass", "title": "" }, { "docid": "d8750b125a7e5a29fddea598350a912a", "score": "0.624576", "text": "def oauth_authorized(resp):\n next_url = request.args.get('next') or url_for('index')\n if resp is None:\n flash(u'You denied the request to sign in.')\n return redirect(next_url)\n session['osm_token'] = (\n resp['oauth_token'],\n resp['oauth_token_secret']\n )\n print(resp)\n flash('You were signed in')\n return redirect(next_url)", "title": "" }, { "docid": "6e30787c507e6b386b843867f8f012a2", "score": "0.62239", "text": "def devpiserver_authcheck_always_ok(request):", "title": "" }, { "docid": "1960f9e2e6e200ada557de8a4417416e", "score": "0.62155366", "text": "def us_verify_link():\n if not all(v in request.args for v in [\"email\", \"code\"]):\n m, c = get_message(\"API_ERROR\")\n if _security.redirect_behavior == \"spa\":\n return redirect(get_url(_security.login_error_view, qparams={c: m}))\n do_flash(m, c)\n return redirect(url_for_security(\"us_signin\"))\n\n user = _datastore.find_user(email=request.args.get(\"email\"))\n if not user or not user.active:\n if not user:\n m, c = get_message(\"USER_DOES_NOT_EXIST\")\n else:\n m, c = get_message(\"DISABLED_ACCOUNT\")\n if _security.redirect_behavior == \"spa\":\n return redirect(get_url(_security.login_error_view, qparams={c: m}))\n do_flash(m, c)\n return redirect(url_for_security(\"us_signin\"))\n\n totp_secrets = user.us_get_totp_secrets()\n if \"email\" not in totp_secrets or not _security._totp_factory.verify_totp(\n token=request.args.get(\"code\"),\n totp_secret=totp_secrets[\"email\"],\n user=user,\n window=config_value(\"US_TOKEN_VALIDITY\"),\n ):\n m, c = get_message(\"INVALID_CODE\")\n if _security.redirect_behavior == \"spa\":\n return redirect(\n get_url(\n _security.login_error_view,\n qparams=user.get_redirect_qparams({c: m}),\n )\n )\n do_flash(m, c)\n return redirect(url_for_security(\"us_signin\"))\n\n if (\n config_value(\"TWO_FACTOR\")\n and \"email\" in config_value(\"US_MFA_REQUIRED\")\n and (config_value(\"TWO_FACTOR_REQUIRED\") or is_tf_setup(user))\n ):\n return tf_login(user, primary_authn_via=\"email\")\n\n login_user(user, authn_via=[\"email\"])\n after_this_request(_commit)\n if _security.redirect_behavior == \"spa\":\n # We do NOT send the authentication token here since the only way to\n # send it would be via a query param and that isn't secure. (logging and\n # possibly HTTP Referer header).\n # This means that this can only work if sessions are active which sort of\n # makes sense - otherwise you need to use /us-signin with a code.\n return redirect(\n get_url(_security.post_login_view, qparams=user.get_redirect_qparams())\n )\n\n do_flash(*get_message(\"PASSWORDLESS_LOGIN_SUCCESSFUL\"))\n return redirect(get_post_login_redirect())", "title": "" }, { "docid": "657fb78550f9ec31fa3c0c0ebd7ebe4a", "score": "0.6209151", "text": "def authenticate(env):\n url_parameters = {\n 'audience': env['audience'],\n 'scope': env['scopes'],\n 'response_type': env['response_type'],\n 'redirect_uri': env['callback_url'],\n 'client_id': env['client_id'],\n 'code_challenge': env['code_challenge'].replace('=', ''),\n 'code_challenge_method': env['code_challenge_method'],\n 'state': env['state']\n }\n\n authentication_url = env['authorize_url'] + \\\n urllib.parse.urlencode(url_parameters)\n\n webbrowser.open_new(authentication_url)\n server = ServerThread(app)\n server.start()\n while not received_callback:\n sleep(1)\n server.shutdown()\n\n return code", "title": "" }, { "docid": "3c7dcd927ecf75c9926409c23cf5c343", "score": "0.61953115", "text": "def OAuthRedirectAuthorizationBackend(backend, code):\n\tif code is None:\n\t\ttry:\n\t\t\tbackend_class = get_backend(settings.AUTHENTICATION_BACKENDS, backend)\n\t\t\tauthorization_url = backend_class.AUTHORIZATION_URL\n\t\t\turl_parameters = {\n\t\t\t\t'redirect_uri': ('http://{}{}').format(\n\t\t\t\t\tSite.objects.get_current().domain,\n\t\t\t\t\treverse.reverse('rest-social-email-auth:user-social-login', args=(backend,))\n\t\t\t\t),\n\t\t\t\t'response_type': backend_class.RESPONSE_TYPE,\n\t\t\t\t'scope': [scope for scope in backend_class.DEFAULT_SCOPE if scope != 'openid'][0],\n\t\t\t\t'client_id': config('SOCIAL_AUTH_' + backend.upper().replace('-', '_') + '_KEY')\n\t\t\t}\n\t\t\t\n\t\t\tfinal_url = authorization_url + '?' + urllib.parse.urlencode(url_parameters)\n\n\t\texcept:\n\t\t\traise CustomException(code=status.HTTP_501_NOT_IMPLEMENTED, detail=__('Missing Backend'))\n\n\t\treturn redirect(final_url)\n\n\tdata = {\n\t\t\"code\": code,\n\t\t\"redirect_uri\": ('http://{}{}').format(Site.objects.get_current().domain,\n\t\t\t\t\t\t\t\t\t\t\t reverse.reverse('rest-social-email-auth:user-social-login', args=(backend,))),\n\t\t\"provider\": backend\n\t}\n\n\treturn data", "title": "" }, { "docid": "a2d9e1be08ecdbbf98f08ee90acf14da", "score": "0.61945134", "text": "async def get_authorization_code(request):\n try:\n json_body = await request.json()\n except:\n json_body = {}\n\n params = await request.post()\n response_type = params.get('response_type', None)\n response_type = json_body.get('response_type', response_type)\n if response_type is None:\n raise HTTPBadRequest(reason='response_type is missing')\n\n if response_type not in ['code', 'url']:\n raise HTTPBadRequest(reason='response_type needs to be code or url')\n\n client_id = params.get('client_id', None)\n client_id = json_body.get('client_id', client_id)\n if client_id is None:\n raise HTTPBadRequest(reason='client_id is missing')\n\n scopes = params.get('scopes', None)\n if scopes is None:\n raise HTTPBadRequest(reason='scopes is missing')\n\n if not isinstance(scopes, list):\n scopes = scopes.split(',')\n scopes = json_body.get('scopes', scopes)\n\n service_token = params.get('service_token', None)\n service_token = json_body.get('service_token', service_token)\n if service_token is None:\n raise HTTPBadRequest(reason='service_token is missing')\n\n db = request.app['settings']['db_tauths']\n\n # We check the service token\n with (await db) as redis:\n service_client_id = await redis.get(service_token)\n\n if service_client_id is None:\n raise HTTPBadRequest(reason='Invalid Service Token')\n\n # We need to check if the client is ok for the scope\n # Table of valid clients and scopes\n config = request.app['settings']['db_config']\n ttl = request.app['settings']['ttl_auth_code']\n secret = request.app['settings']['jwtsecret']\n debug = request.app['settings']['debug']\n\n for scope in scopes:\n if not config.hasScope(scope):\n log.error('Not valid scope ' + scope)\n return HTTPUnauthorized(reason=\"Wrong scope\")\n\n if not config.hasClient(client_id):\n # S'hauria de reenviar a authentificacio de l'usuari per acceptar-ho\n log.error('Not valid client_id ' + client_id)\n return HTTPUnauthorized(reason=\"Wrong client id\")\n\n # If its ok create a authorization code\n auth_code = uuid.uuid4().hex\n\n db = request.app['settings']['db_cauths']\n\n # We store the client\n for scope in scopes:\n client_scope = str(client_id)\n with (await db) as redis:\n await redis.set(auth_code + '::' + scope, client_scope)\n await redis.expire(auth_code, ttl)\n\n # We log it\n if debug:\n log.warn('Auth Code from Client : %s', client_id)\n\n # if its ok redirect to get_access_token\n token = jwt.encode(\n {\n 'iat': datetime.utcnow(),\n 'exp': datetime.utcnow() + timedelta(seconds=ttl),\n 'auth_code': auth_code\n },\n secret,\n algorithm='HS256')\n\n if response_type == 'url':\n redirect_uri = params.get('redirect_uri', None)\n if redirect_uri is None:\n raise HTTPBadRequest(reason='redirect_uri is missing')\n\n response = HTTPFound(location=redirect_uri + '?code=' + token)\n else:\n response = Response(body=token, content_type='text/plain')\n\n # origin = request.headers.get('Origin', None)\n # if origin and origin in plone.oauth.CORS:\n # response.headers['Access-Control-Allow-Origin'] = origin\n # elif origin:\n # return HTTPUnauthorized(\"Wrong Origin\")\n\n return response", "title": "" }, { "docid": "ded1afbe20b556ad8d3028eef785f7bf", "score": "0.6171914", "text": "def authenticate_redirect(self, callback_uri=None, callback=None):\r\n http = self.get_auth_http_client()\r\n http.fetch(self._oauth_request_token_url(callback_uri=callback_uri),\r\n self.async_callback(\r\n self._on_request_token, self._OAUTH_AUTHENTICATE_URL,\r\n None, callback))", "title": "" }, { "docid": "ded1afbe20b556ad8d3028eef785f7bf", "score": "0.6171914", "text": "def authenticate_redirect(self, callback_uri=None, callback=None):\r\n http = self.get_auth_http_client()\r\n http.fetch(self._oauth_request_token_url(callback_uri=callback_uri),\r\n self.async_callback(\r\n self._on_request_token, self._OAUTH_AUTHENTICATE_URL,\r\n None, callback))", "title": "" }, { "docid": "1f5cc82660a219812175e6faa07d09b6", "score": "0.6159028", "text": "def get(self):\n\n # oauth dance, which ends up at the callback url\n client = oauth.TwitterClient(CONSUMER_KEY, CONSUMER_SECRET,\n SIGNIN_CALLBACK_URL)\n return self.redirect(client.get_authenticate_url())", "title": "" }, { "docid": "2959782741f20361a19f0f05d75fdcb6", "score": "0.61584306", "text": "def login_callback(self):\n code = request.args.get('code')\n\n google_provider_cfg = get_google_provider_cfg()\n token_endpoint = google_provider_cfg[\"token_endpoint\"]\n\n # Send request for token with the code received in the callback\n token_url, headers, body = client.prepare_token_request(\n token_endpoint,\n authorization_response=request.url,\n redirect_url=request.base_url,\n code=code\n )\n\n token_response = requests.post(\n token_url,\n headers=headers,\n data=body,\n auth=(yeti_config.oidc.client_id, yeti_config.oidc.client_secret),\n )\n\n client.parse_request_body_response(json.dumps(token_response.json()))\n # Our client can now get information on the user\n\n userinfo_endpoint = google_provider_cfg[\"userinfo_endpoint\"]\n uri, headers, body = client.add_token(userinfo_endpoint)\n userinfo_response = requests.get(uri, headers=headers, data=body)\n\n # userinfo_response.json().get(\"email_verified\")\n # unique_id = userinfo_response.json()[\"sub\"]\n user_email = userinfo_response.json()[\"email\"]\n # picture = userinfo_response.json()[\"picture\"]\n # users_name = userinfo_response.json()[\"given_name\"]\n\n user = user_management.authenticate_user(user_email)\n\n token = jwt.encode({\n 'sub': user.email,\n 'iat': datetime.utcnow(),\n 'exp': datetime.utcnow() + timedelta(days=30),\n }, yeti_config.core.secret_key, algorithm='HS512')\n\n session.clear()\n session['token'] = token\n\n # Return JSON here instead of a redirect if we want popup-winow login.\n # return {'authenticated': True, 'user': user.email}\n return redirect('/')", "title": "" }, { "docid": "a5348dcbae3d1630c1c9e608cf7ae7f9", "score": "0.61570424", "text": "def authorize(login_code, client_id, client_secret, redirect_uri):\n headers = {\n 'authorization': authorization_header(client_id, client_secret),\n 'content-type': 'application/x-www-form-urlencoded',\n 'accept': 'application/json'\n }\n data = {\n 'grant_type': 'authorization_code',\n 'redirect_uri': redirect_uri,\n 'code': login_code\n }\n res = requests.post(token_uri, headers=headers, data=urlencode(data))\n Logger().info(to_curl(res.request))\n if res.status_code == 200:\n return res.json()\n else:\n Logger().error(\"Auth failed: %s\" % res.status_code)\n Logger().error(\"Auth failed: %s\" % res.json())", "title": "" }, { "docid": "890225dd183e63901b49a85e895e8e16", "score": "0.6154822", "text": "def authorize(self, response_type='code',\n scope='read,profile:read_all,activity:read_all',\n approval_prompt='auto'\n ):\n if not (self.client_id or self.client_secret):\n self.client_id = int(input('Enter Client ID: '))\n self.client_secret = input('Enter Client Secret: ')\n \n self.oauth_params = {\"client_id\": self.client_id,\n \"response_type\": response_type,\n \"redirect_uri\": \"http://localhost:8000/authorization_successful\",\n \"scope\": scope,\n \"approval_prompt\": approval_prompt\n }\n url = 'https://www.strava.com/oauth/authorize?' + urllib.parse.urlencode(self.oauth_params)\n webbrowser.get().open(url)\n success_url = urllib.parse.urlparse(input('Paste the Success URL here:')).query\n query = urllib.parse.parse_qs(success_url)\n self.code = query['code']\n self.token_params = {\"client_id\": self.client_id,\n \"client_secret\": self.client_secret,\n \"code\": self.code,\n \"grant_type\": \"authorization_code\"\n }\n self.r = requests.post(\"https://www.strava.com/oauth/token\", self.token_params)\n\n self.write_creds()", "title": "" }, { "docid": "afd80de2b6a8aa3f8b2f04452e562fcf", "score": "0.61538583", "text": "def request_authorize(self):\n\n from socket import gethostname\n\n payload = {\n \"app_id\": self.app_id,\n \"app_name\": self.app_name,\n \"app_version\": self.app_version,\n \"device_name\": gethostname()\n }\n print(\"You need to press YES on the box.\")\n app_register = self.post(\"login/authorize\", payload)\n self.app_token = app_register[\"app_token\"]\n return app_register[\"track_id\"]", "title": "" }, { "docid": "96f30e36d0302a3a91a341e555761510", "score": "0.61465275", "text": "def auth_return_view(request):\n\n token = str(request.GET['state']).encode()\n if not xsrfutil.validate_token(settings.SECRET_KEY, token, request.user):\n return HttpResponseBadRequest('Improper OAuth request.')\n\n if 'error' in request.GET:\n if request.GET['error'] != 'access_denied':\n # access_denied means the user clicked deny; it's not exceptional.\n logger.error(\"error on oauth return: %s\", request.GET['error'])\n return redirect('autorespond')\n\n credential = FLOW.step2_exchange(request.GET)\n\n if credential.refresh_token is not None:\n # refresh tokens only come with fresh auths.\n # we don't want to store reauths, since we'd clobber the refresh token.\n http = httplib2.Http()\n http = credential.authorize(http)\n # https://github.com/googleapis/google-api-python-client/issues/299\n service = build(\"oauth2\", \"v2\", http=http, cache_discovery=False)\n res = service.userinfo().get().execute()\n\n email = res['email']\n\n try:\n GoogleCredential.objects.update_or_create(\n email=email, user=request.user,\n defaults={'credentials': credential})\n except IntegrityError:\n logger.error('attempt to connect already-connected account %r to %r',\n email, request.user.email,\n extra={'tags': {'linker': request.user.email,\n 'linkee': email}})\n\n return HttpResponseBadRequest('This Google account is already connected to a different account.')\n\n return redirect('autorespond')", "title": "" }, { "docid": "f2e4f8f04c19855789cc7d08c504eaa0", "score": "0.61338633", "text": "def get(self):\n if not self.request.get(\"code\"):\n self.redirect(\"/\")\n return\n user = users.get_current_user()\n flow = pickle.loads(memcache.get(user.user_id()))\n if flow:\n error = False\n try:\n credentials = flow.step2_exchange(self.request.params)\n except client.FlowExchangeError, e:\n credentials = None\n error = True\n appengine.StorageByKeyName(\n model.Credentials, user.user_id(), \"credentials\").put(credentials)\n if error:\n self.redirect(\"/?msg=ACCOUNT_ERROR\")\n else:\n self.redirect(self.request.get(\"state\"))", "title": "" }, { "docid": "61ac95d8de9f4fe871195f3daa5c3a68", "score": "0.61204153", "text": "def login():\n return redirect(url_for('callback'))", "title": "" }, { "docid": "034d16490be74e437eb0617526f1b64c", "score": "0.6119916", "text": "def login():\n return google.authorize(callback=url_for('authorized', _external=True))", "title": "" }, { "docid": "f840ddd530630c55340e931c31f04d64", "score": "0.6118114", "text": "def login():\n callback_url = url_for('oauthorized', next=request.args.get('next'))\n return twitter.authorize(callback=callback_url or request.referrer or None)", "title": "" }, { "docid": "baec55355b70d04aee6578c325877d9c", "score": "0.61107045", "text": "def test_authorize_success(self):\n pass", "title": "" }, { "docid": "5238e095d7658beaecb2d67239a0958c", "score": "0.61087006", "text": "def oidc():\n print(\"oidc()\")\n # print(request.form)\n\n if \"error\" in request.form:\n print(\"ERROR: {0}, MESSAGE: {1}\".format(request.form[\"error\"], request.form[\"error_description\"]))\n\n # Check Nonce\n # print(\"state: '{0}'\".format(session[\"state\"]))\n # print(\"nonce: '{0}'\".format(session[\"nonce\"]))\n if session[\"state\"] == request.form[\"state\"]:\n oidc_code = request.form[\"code\"]\n print(\"oidc_code: {0}\".format(oidc_code))\n oauth_token = get_oauth_token(oidc_code)\n redirect_url = os.environ[\"APP_AUTH_URL\"]\n response = make_response(redirect(redirect_url))\n response.set_cookie('token', oauth_token)\n else:\n print(\"FAILED TO MATCH STATE!!!\")\n response = make_response(redirect(os.environ[\"APP_AUTH_URL\"]))\n\n session.pop(\"state\", None)\n session.pop(\"nonce\", None)\n\n return response", "title": "" }, { "docid": "e7cb8c73a11e874e7f64dc84a72d7abe", "score": "0.6094664", "text": "def redirect_login(request):\n client_id = settings.KEYCLOAK_CLIENT_ID\n base_authorize_url = settings.KEYCLOAK_AUTHORIZE_URL\n redirect_uri = request.build_absolute_uri(reverse('callback'))\n if 'next' in request.GET:\n redirect_uri += \"?next=\" + quote(request.GET['next'])\n oauth2_session = OAuth2Session(\n client_id, scope='openid email profile', redirect_uri=redirect_uri)\n authorization_url, state = oauth2_session.authorization_url(\n base_authorize_url)\n # Store state and redirect_uri in session for\n # later validation (see authentication.py)\n request.session['OAUTH2_STATE'] = state\n request.session['OAUTH2_REDIRECT_URI'] = redirect_uri\n return redirect(authorization_url)", "title": "" }, { "docid": "7dcea9aa192d4f3e44caf5013adb4343", "score": "0.60840166", "text": "def get(self, request):\n IDV_workflow = IDVerificationService.get_verify_location()\n return redirect(IDV_workflow)", "title": "" }, { "docid": "6aba1c2ef137231952598ccc40bbf3cb", "score": "0.60835546", "text": "def authorize_client(self):\n self.client.force_login(self.user)", "title": "" }, { "docid": "8d5a355c1f14dc12b3cdd8bfe1b8da92", "score": "0.60774845", "text": "def verify(request, redirect_field_name=auth.REDIRECT_FIELD_NAME):\n redirect_to = request.REQUEST.get(redirect_field_name, '')\n if not redirect_to:\n redirect_to = getattr(settings, 'LOGIN_REDIRECT_URL', '/')\n redirect_to_failure = getattr(settings, 'LOGIN_REDIRECT_URL_FAILURE', '/')\n form = BrowserIDForm(data=request.POST)\n if form.is_valid():\n assertion = form.cleaned_data['assertion']\n user = auth.authenticate(assertion=assertion,\n audience=get_audience(request))\n if user and user.is_active:\n if user.get_profile().is_complete():\n auth.login(request, user)\n return redirect(reverse('profile', args=[user.username]))\n else:\n _store_user_in_session(request, assertion,\n get_audience(request))\n return redirect(reverse('register'))\n return HttpResponseRedirect(redirect_to_failure)", "title": "" }, { "docid": "934167e07487371a11f1c7546d4e26a2", "score": "0.6068907", "text": "def xero_auth_accept(request):\n try:\n # retrieve the details for this session\n verifier = request.GET.get('oauth_verifier')\n token = request.GET.get('oauth_token')\n\n if not verifier or not token:\n raise XeroFlowException(request.GET.get('error', \"unknown\"),\n request.GET.get('error_description',\n 'unknown'))\n state_obj = get_object_or_404(XeroAuthFlowState, pk=token)\n # complete the flow\n xerouser = state_obj.complete_flow(verifier, request.user)\n # org should be validated, maybe...?\n xerouser.org = request.GET.get('org')\n xerouser.save()\n # find out where user should go next\n next_page = state_obj.next_page\n # we are done, forget this state\n state_obj.delete()\n # send user on its way\n return redirect(_validate_next(next_page))\n except XeroFlowException as xfe:\n logger.exception(xfe)\n return HttpResponseBadRequest()", "title": "" }, { "docid": "bcb49b220d96c218f84ba32d0f1cc78a", "score": "0.6068169", "text": "def authorize_handle(self, cookie):\n return True", "title": "" }, { "docid": "841d47ef2c6d70c8dbc7404433c03755", "score": "0.6063969", "text": "def yt_authorize():\n\n if request.referrer and url_for(\"api.settings\", _external=True) in request.referrer:\n session[\"yt_referrer\"] = request.referrer\n\n flow = Flow.from_client_config(\n CLIENT_CONFIG,\n scopes=SCOPES\n )\n\n flow.redirect_uri = url_for('yt.yt_oauth2callback', _external=True)\n\n auth_url, state = flow.authorization_url(prompt='consent')\n\n session[\"yt_state\"] = state\n\n return redirect(auth_url)", "title": "" }, { "docid": "1b78a714e651f2f5424a66ef731eab9d", "score": "0.60549295", "text": "async def authorization_successful(req, resp):\n params = {\n \"client_id\": os.getenv('STRAVA_CLIENT_ID'),\n \"client_secret\": os.getenv('STRAVA_CLIENT_SECRET'),\n \"code\": req.params.get('code'),\n \"grant_type\": \"authorization_code\"\n }\n r = requests.post(\"https://www.strava.com/oauth/token\", params)\n response = json.loads(r.text)\n logger.info(f\"authorization succesful {response}\")\n load_athlete(response)\n load_activities(response)\n app_url = os.getenv('APP_URL', 'http://localhost:5042')\n api.redirect(resp, location=f\"{app_url}/{response['athlete']['id']}\")", "title": "" }, { "docid": "cd7ddca6ab8d162d4c0e261ff288f481", "score": "0.6053504", "text": "def authorize_redirect(self, callback_uri=None, extra_params=None,\r\n http_client=None, callback=None):\r\n if callback_uri and getattr(self, \"_OAUTH_NO_CALLBACKS\", False):\r\n raise Exception(\"This service does not support oauth_callback\")\r\n if http_client is None:\r\n http_client = self.get_auth_http_client()\r\n if getattr(self, \"_OAUTH_VERSION\", \"1.0a\") == \"1.0a\":\r\n http_client.fetch(\r\n self._oauth_request_token_url(callback_uri=callback_uri,\r\n extra_params=extra_params),\r\n self.async_callback(\r\n self._on_request_token,\r\n self._OAUTH_AUTHORIZE_URL,\r\n callback_uri,\r\n callback))\r\n else:\r\n http_client.fetch(\r\n self._oauth_request_token_url(),\r\n self.async_callback(\r\n self._on_request_token, self._OAUTH_AUTHORIZE_URL,\r\n callback_uri,\r\n callback))", "title": "" }, { "docid": "cd7ddca6ab8d162d4c0e261ff288f481", "score": "0.6053504", "text": "def authorize_redirect(self, callback_uri=None, extra_params=None,\r\n http_client=None, callback=None):\r\n if callback_uri and getattr(self, \"_OAUTH_NO_CALLBACKS\", False):\r\n raise Exception(\"This service does not support oauth_callback\")\r\n if http_client is None:\r\n http_client = self.get_auth_http_client()\r\n if getattr(self, \"_OAUTH_VERSION\", \"1.0a\") == \"1.0a\":\r\n http_client.fetch(\r\n self._oauth_request_token_url(callback_uri=callback_uri,\r\n extra_params=extra_params),\r\n self.async_callback(\r\n self._on_request_token,\r\n self._OAUTH_AUTHORIZE_URL,\r\n callback_uri,\r\n callback))\r\n else:\r\n http_client.fetch(\r\n self._oauth_request_token_url(),\r\n self.async_callback(\r\n self._on_request_token, self._OAUTH_AUTHORIZE_URL,\r\n callback_uri,\r\n callback))", "title": "" }, { "docid": "5ce78df6d0b361985334f549b388ec63", "score": "0.6044337", "text": "def get(self, req):\n code = req.GET.get(\"code\")\n token = self._token_exchange(code)\n\n info = self._get_info(token)\n\n req.session[\"is_logged\"] = True\n req.session[\"token\"] = token\n req.session['username'] = info['username']\n\n return HttpResponseRedirect(\"/\")", "title": "" }, { "docid": "9b349baf9fd864221e745238c94d69c1", "score": "0.6040058", "text": "def test_valid_redirect_oauth(client, oauth_client, idp):\n response = client.get(\"/login/{}?redirect={}\".format(idp, oauth_client.url))\n assert response.status_code == 302", "title": "" }, { "docid": "fe6c4f1cf3a204d234ab4735ff1a2095", "score": "0.6040045", "text": "def call_back():\n # Recreate the auth variable with the addiional request token.\n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n verifier = request.args.get('oauth_verifier')\n token = session.pop('request_token')\n auth.request_token = token\n\n # Passes in the 'verifier' query parameter that 'proves' the user \n # has successfuly passed Twitter's authorization. \n try:\n auth.get_access_token(verifier)\n except tweepy.TweepError:\n print('Error! Failed to get access token.')\n\n # Storing the access token in the session object. This is what\n # gives the user access to Twitter's treasure trove.\n session['access_key'] = auth.access_token\n session['access_secret'] = auth.access_token_secret\n session.permanent = True \n return redirect(url_for('dashboard'))", "title": "" }, { "docid": "dc1f6c26ecf690c7354227cc1063717b", "score": "0.6009275", "text": "def get_code(self):\n return self.auth_url", "title": "" }, { "docid": "72b5494db12adb0100ca5c95f26aa45b", "score": "0.6002281", "text": "async def authorize(self) -> bool:\n raise NotImplementedError", "title": "" }, { "docid": "7d109a1dcfed610e9d8990462fe6d77a", "score": "0.59968895", "text": "def get(self, request, *args, **kwargs):\n code = request.GET.get(\"code\", None)\n state = request.GET.get(\"state\", None)\n\n if code is None:\n logger.info(\"Redirecting call to main page. No code provided.\")\n return HttpResponseRedirect(os.environ[\"FRONTEND_BASE_URL\"])\n\n if state is None:\n logger.info(\"Redirecting call to main page. No state provided.\")\n return HttpResponseRedirect(os.environ[\"FRONTEND_BASE_URL\"])\n\n # get the validation keys to confirm generated nonce and state\n nonce_and_state = get_nonce_and_state(request.session)\n nonce_validator = nonce_and_state.get(\"nonce\", \"not_nonce\")\n state_validator = nonce_and_state.get(\"state\", \"not_state\")\n\n # build out the query string parameters\n # and full URL path for OIDC token endpoint\n token_params = generate_token_endpoint_parameters(code)\n token_endpoint = os.environ[\"OIDC_OP_TOKEN_ENDPOINT\"] + \"?\" + token_params\n token_response = requests.post(token_endpoint)\n\n if token_response.status_code != 200:\n return Response(\n {\n \"error\": (\n \"Invalid Validation Code Or OpenID Connect Authenticator \"\n \"Down!\"\n )\n },\n status=status.HTTP_400_BAD_REQUEST,\n )\n\n token_data = token_response.json()\n id_token = token_data.get(\"id_token\")\n\n decoded_payload = self.decode_payload(id_token)\n if decoded_payload == {\"error\": \"The token is expired.\"}:\n return Response(decoded_payload, status=status.HTTP_401_UNAUTHORIZED)\n\n decoded_nonce = decoded_payload[\"nonce\"]\n\n if not validate_nonce_and_state(\n decoded_nonce, state, nonce_validator, state_validator\n ):\n msg = \"Could not validate nonce and state\"\n raise SuspiciousOperation(msg)\n\n if not decoded_payload[\"email_verified\"]:\n return Response(\n {\"error\": \"Unverified email!\"}, status=status.HTTP_400_BAD_REQUEST\n )\n\n try:\n user = self.handle_user(request, id_token, decoded_payload)\n return response_redirect(user, id_token)\n\n except Exception as e:\n logger.exception(f\"Error attempting to login/register user: {e} at...\")\n return Response(\n {\n \"error\": (\n \"Email verfied, but experienced internal issue \"\n \"with login/registration.\"\n )\n },\n status=status.HTTP_400_BAD_REQUEST,\n )", "title": "" }, { "docid": "430840ff616b97c033b147e6abafbaba", "score": "0.59937596", "text": "def xero_auth_start(request):\n # calculate where MS will send user on success\n acceptance_url = request.build_absolute_uri(\n reverse('xero-auth-accept',\n current_app=request.resolver_match.namespace))\n\n # validate the 'next' parameter, we don't want an open proxy...\n next_page = _validate_next(request.GET.get('next'))\n # save state for later\n xerostate = XeroAuthFlowState.start_flow(acceptance_url, next_page)\n # send user to MS\n return redirect(xerostate.auth_url)", "title": "" }, { "docid": "6a6b46c90d71bbae23f74fb27ba682c2", "score": "0.5993504", "text": "def login():\n session.pop('redirect', None)\n if request.args.get('r'):\n session['redirect'] = request.args.get('r')\n return oauth.google.authorize_redirect(url_for('auth', _external=True))", "title": "" }, { "docid": "1d332722b72a38ebb14f95bd14c91a3b", "score": "0.5982788", "text": "def authorize_request(params, headers, auth=None):", "title": "" }, { "docid": "c353719ff3bdde783b59fb26bfa7699d", "score": "0.5971687", "text": "def login(self):\n if 'user' in g:\n return {'redirect': '/', 'authenticated': True}\n\n provider_cfg = get_google_provider_cfg()\n authorization_endpoint = provider_cfg['authorization_endpoint']\n\n request_uri = client.prepare_request_uri(\n authorization_endpoint,\n redirect_uri=request.base_url + \"callback\",\n scope=['openid', 'email', 'profile']\n )\n\n return {'redirect': request_uri}", "title": "" }, { "docid": "8ca50dd0de3a312d1105087fd541e23f", "score": "0.5956493", "text": "def auth():\n token = oauth.google.authorize_access_token()\n user = oauth.google.parse_id_token(token)\n user_hash = sha1(creds.backend.user_salt + user['email'])\n session['user_hash'] = user_hash\n app.logger.info(f\"User {user_hash} logged in\")\n return redirect(session.pop('redirect', None) or '/')", "title": "" }, { "docid": "50b1b60750d8f0a9487e7aa66e95cf3a", "score": "0.59391314", "text": "def authorize(simulation_type, oauth_type):\n oauth_next = '/{}#{}'.format(simulation_type, flask.request.args.get('next', ''))\n if oauth_type == _ANONYMOUS_OAUTH_TYPE:\n _update_session(_ANONYMOUS)\n cookie.clear_user()\n return server.javascript_redirect(oauth_next)\n state = util.random_base62()\n cookie.set_value(_COOKIE_NONCE, state)\n cookie.set_value(_COOKIE_NEXT, oauth_next)\n callback = cfg.github_callback_uri\n if not callback:\n from sirepo import uri_router\n callback = uri_router.uri_for_api(\n 'oauthAuthorized',\n dict(oauth_type=oauth_type),\n )\n return _oauth_client(oauth_type).authorize(\n callback=callback,\n state=state,\n )", "title": "" }, { "docid": "f47427bad7d68e66d05dc1627dea3c2b", "score": "0.59299195", "text": "def login():\n # the redirect URI, as a complete URI (not relative path)\n redirect_uri = url_for('login', _external=True)\n\n auth_client = load_app_client()\n auth_client.oauth2_start_flow(redirect_uri, \n requested_scopes='openid email profile urn:globus:auth:scope:auth.globus.org:view_identity_set')\n\n # If there's no \"code\" query string parameter, we're in this route\n # starting a Globus Auth login flow.\n # Redirect out to Globus Auth\n if 'code' not in request.args:\n auth_uri = auth_client.oauth2_get_authorize_url()\n return redirect(auth_uri)\n # If we do have a \"code\" param, we're coming back from Globus Auth\n # and can start the process of exchanging an auth code for a token.\n else:\n code = request.args.get('code')\n tokens_response = auth_client.oauth2_exchange_code_for_tokens(code)\n ids = tokens_response.decode_id_token(auth_client)\n session.update(\n tokens=tokens_response.by_resource_server,\n id_token=ids,\n username=ids['sub'],\n realname=ids['name'],\n is_authenticated=True\n )\n return redirect(url_for('index'))", "title": "" }, { "docid": "5537c576d294c4eb3f40fdf488faaf20", "score": "0.5925161", "text": "def authorize_redirect(self, redirect_uri=None, client_id=None,\r\n client_secret=None, extra_params=None,\r\n callback=None, scope=None, response_type=\"code\"):\r\n args = {\r\n \"redirect_uri\": redirect_uri,\r\n \"client_id\": client_id,\r\n \"response_type\": response_type\r\n }\r\n if extra_params:\r\n args.update(extra_params)\r\n if scope:\r\n args['scope'] = ' '.join(scope)\r\n self.redirect(\r\n url_concat(self._OAUTH_AUTHORIZE_URL, args))\r\n callback()", "title": "" }, { "docid": "23f7dfbe14b0bb12912a371e6eeddfaf", "score": "0.5919921", "text": "def get(self, request, code): # pylint: disable=unused-argument\n settings = models.SiteSettings.get()\n if request.user.is_authenticated or not settings.require_confirm_email:\n return redirect(\"/\")\n\n # look up the user associated with this code\n try:\n user = models.User.objects.get(confirmation_code=code)\n except models.User.DoesNotExist:\n return TemplateResponse(\n request, \"confirm_email/confirm_email.html\", {\"valid\": False}\n )\n # update the user\n user.is_active = True\n user.deactivation_reason = None\n user.save(broadcast=False, update_fields=[\"is_active\", \"deactivation_reason\"])\n # direct the user to log in\n return redirect(\"login\", confirmed=\"confirmed\")", "title": "" }, { "docid": "e2ef59d7236df523c2321c0ca3fcfc1d", "score": "0.59126455", "text": "def _VerifyAuthRedirectResponse(response):\r\n self.assertEqual(response.code, 302)\r\n self.assertTrue(response.headers['location'].startswith('/auth'))\r\n self.assertEqual(self._tester.GetCookieFromResponse(response), '')", "title": "" }, { "docid": "0a47c1adc8db52ae2a46abf3c4dd0144", "score": "0.59111184", "text": "def authorized_callback(oauth_type):\n oc = _oauth_client(oauth_type)\n resp = oc.authorized_response()\n if not resp:\n util.raise_forbidden('missing oauth response')\n state = _remove_cookie_key(_COOKIE_NONCE)\n if state != flask.request.args.get('state', ''):\n util.raise_forbidden(\n 'mismatch oauth state: {} != {}',\n state,\n flask.request.args.get('state'),\n )\n # fields: id, login, name\n user_data = oc.get('user', token=(resp['access_token'], '')).data\n user = _update_database(user_data, oauth_type)\n _update_session(_LOGGED_IN, user.user_name)\n return server.javascript_redirect(_remove_cookie_key(_COOKIE_NEXT))", "title": "" }, { "docid": "0abf115de779acc9f4161d4d49d25517", "score": "0.5904413", "text": "def _do_authenticate(self, http_client):", "title": "" }, { "docid": "2e35241323e67e1359759eac5e99213f", "score": "0.58997816", "text": "def success_redirect_endpoint(request):\n return _make_response(200)", "title": "" }, { "docid": "5c37731ca6caf24b800a6f4758619e7e", "score": "0.5898995", "text": "def listen_for_token_redirect(self):\n subprocess.call([\"open\", self.sp_oauth.get_authorize_url()])\n webserver = Web_Server()\n self.url = webserver.run()", "title": "" }, { "docid": "d51ff92a8b8b7390061e4798549af25c", "score": "0.5891893", "text": "def dropbox_auth_start(request):\n authorize_url = get_dropbox_auth_flow(request.session).start()\n return HttpResponseRedirect(authorize_url)", "title": "" }, { "docid": "6cc820538952955dac8f883c4ef2da78", "score": "0.5887156", "text": "def checkAuthorization(self):\n get_credential_details_url = f'https://{self.access_hostname}/-/client-api/active-grants/implicit'\n get_credential_details_url = self.formUrl(get_credential_details_url)\n\n credential_details_response = self.session.get(get_credential_details_url)\n return credential_details_response", "title": "" }, { "docid": "c5a9d6e0c9087aac5f99771c8c811253", "score": "0.58564806", "text": "def authorizeRequire(fn):\n @wraps(fn)\n def wrapper(*args, **kwds):\n user = session.get('SID', None)\n if user:\n return fn(*args, **kwds)\n else:\n return redirect(url_for('Login3'))\n return wrapper", "title": "" }, { "docid": "a268109ddb2840a104fcb5c513454386", "score": "0.58454776", "text": "def login():\n LOGGER.debug(\"login\")\n return redirect(build_auth_url())", "title": "" }, { "docid": "c75ffae01f72d633db7c53869b54b958", "score": "0.58430433", "text": "def authorize_redirect(self, redirect_uri=None, client_id=None,\r\n client_secret=None, extra_params=None,\r\n callback=None):\r\n args = {\r\n \"redirect_uri\": redirect_uri,\r\n \"client_id\": client_id\r\n }\r\n if extra_params:\r\n args.update(extra_params)\r\n self.redirect(\r\n url_concat(self._OAUTH_AUTHORIZE_URL, args))\r\n callback()", "title": "" }, { "docid": "a23105fb3d71b3ec360d8e5b9b52ffbb", "score": "0.58418", "text": "def onAuthenticate(self, signature, extra):\n print(\"onAuthenticate: {} {}\".format(signature, extra))\n\n ## if there is a pending auth, and the signature provided by client matches ..\n if self._pending_auth:\n if signature == auth.compute_totp(self._pending_auth.secret) or \\\n signature == auth.compute_totp(self._pending_auth.secret, 1) or \\\n signature == auth.compute_totp(self._pending_auth.secret, -1):\n ## accept the client\n return types.Accept(authid = self._pending_auth.authid,\n authrole = self._pending_auth.authrole,\n authmethod = self._pending_auth.authmethod,\n authprovider = self._pending_auth.authprovider)\n\n ## deny client\n return types.Deny()", "title": "" }, { "docid": "f9275c004ff37e996b7a2832ce716185", "score": "0.58397", "text": "def login():\n\treturn twitter.authorize(callback=url_for('oauthorized', next=request.args.get('next') or request.referrer or None))", "title": "" } ]
b31ebce4c7201d1504501250d1b982c1
Receives the day and time in a list and returns it in a datetime form if it's valid
[ { "docid": "20900a8652daabd363d5bdd7a7c569e4", "score": "0.55915296", "text": "def check_dates(self, date):\n\n if len(date[0]) == 4: #Fyrsta stakið er árið, það verður að koma á formi fjögurra stafa því annars bætir datetime 0 við og úr verður algjört bull\n try:\n valid_date = datetime.datetime(int(date[0]), int(date[1]), int(date[2]), int(date[3]), int(date[4]), int(date[5])).isoformat()\n return valid_date\n except ValueError:\n return False\n else:\n return False", "title": "" } ]
[ { "docid": "da6dddcc9c81bc45e1377ae2ea2591ac", "score": "0.6805444", "text": "def check_time(self, date, unavailable_time_list): \n \n date_time = \":\".join(date[3:])\n new_list = []\n for unavailable_time_ob in unavailable_time_list:\n new_list.append(unavailable_time_ob.departure_time[11:])\n if date_time not in new_list:\n try:\n valid_time = datetime.datetime(int(date[0]), int(date[1]), int(date[2]), int(date[3]), int(date[4]), 0).isoformat()\n return valid_time\n except ValueError:\n return False\n else:\n return False", "title": "" }, { "docid": "d059232525544f451cf66e9a8c64b445", "score": "0.65078264", "text": "def make_datetime(lst):\n date_str = lst.split('_')[4]\n return datetime.strptime(date_str, '%H-%M-%S')", "title": "" }, { "docid": "7bf89ce51a3ad4a517834d09e57b9f33", "score": "0.6426541", "text": "def get_datetime(timestring): \n time_patterns = ['%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%d %H:%M:%S']\n for pattern in time_patterns:\n try:\n return datetime.datetime.strptime(timestring, pattern)\n except:\n pass\n\n print(\"Date is not in expected format:\", timestring)\n \n sys.exit(0)", "title": "" }, { "docid": "8ea38c05413ee8478408fd8a6e4a2926", "score": "0.63908434", "text": "def datetime_check(ctx, param, value):\n if value is None:\n if param.name == 'start_time':\n raise click.BadParameter('You must provide a start time.')\n elif param.name == 'end_time':\n raise click.BadParameter('You must provide an end time.')\n else:\n raise click.BadParameter(f'I\\'m being called for {param.name} which is wrong.')\n try:\n # Dummy conversion. Just checking syntax now. Real conversion happens in main.\n denver_dt = pendulum.from_format(value, 'DD-MM-YYYY:HH:mm:ss')\n return value\n except:\n if param.name == 'start_time':\n raise click.BadParameter('Start datetime is not in the correct format.')\n elif param.name == 'end_time':\n raise click.BadParameter('End datetime is not in the correct format.')\n else:\n raise click.BadParameter(f'I\\'m being called for {param.name} which is wrong.')", "title": "" }, { "docid": "88eb3c14939973233de04347042949b3", "score": "0.60838884", "text": "def convert_time_format(t_list, dformat=0):\n save = []\n byear = 0\n for ent in t_list:\n out = Chandra.Time.DateTime(ent).date\n atemp = re.split(':', out)\n year = int(atemp[0])\n if byear == 0:\n byear = year\n if mcf.is_leapyear(byear):\n base = 366\n else:\n base = 365\n\n yday = float(atemp[1])\n hh = float(atemp[2])\n mm = float(atemp[3])\n ss = float(atemp[4])\n yday += hh / 24.0 + mm / 1440.0 + ss / 84600.0\n#\n#--- for the case that the time is in yday; assume that the range is always \n#--- smaller than two years\n#\n if dformat == 0:\n if year > byear:\n yday += base\n\n save.append(yday)\n#\n#--- for the case that the time is in fractional year\n#\n else:\n if year > byear:\n if mcf.is_leapyear(byear):\n base = 366\n else:\n base = 365\n\n byear = year\n\n fyear = year + yday / base\n save.append(fyear)\n\n return [save, byear]", "title": "" }, { "docid": "339f5c001e77c117fd8e60cecff194e5", "score": "0.6071859", "text": "def find_valid_times(self):\n tokens = VALID_TIME.findall(self.unixtext)\n if not tokens:\n self.warnings.append(\"failed to find VALID...\")\n return None, None\n day1 = int(tokens[0][0][:2])\n hour1 = int(tokens[0][0][2:4])\n min1 = int(tokens[0][0][4:])\n day2 = int(tokens[0][1][:2])\n hour2 = int(tokens[0][1][2:4])\n min2 = int(tokens[0][1][4:])\n issue = self.valid.replace(day=day1, hour=hour1, minute=min1)\n expire = self.valid.replace(day=day2, hour=hour2, minute=min2)\n if day1 < self.valid.day and day1 == 1:\n issue = self.valid + timedelta(days=25)\n issue = issue.replace(day=day1, hour=hour1, minute=min1)\n if day2 < self.valid.day and day2 == 1:\n expire = self.valid + timedelta(days=25)\n expire = expire.replace(day=day2, hour=hour2, minute=min2)\n\n return (\n issue.replace(tzinfo=timezone.utc),\n expire.replace(tzinfo=timezone.utc),\n )", "title": "" }, { "docid": "ebf16d2f738db9de72723459306ab068", "score": "0.6036512", "text": "async def validate_time_input(date: str, time: str) -> bool:\n try:\n dt = parse(f\"{date} {time}\", dayfirst=True)\n except ParserError:\n return False\n\n if dt < datetime.today():\n return False\n else:\n return True", "title": "" }, { "docid": "c3d516c84498fdd225bc48f0975d9cbc", "score": "0.60169977", "text": "def day_to_time_tuple (day_string) :\n for pat in _day_patterns :\n match = pat.match (day_string)\n if match :\n return Time_Tuple (** match.groupdict ())\n raise ValueError (day_string)", "title": "" }, { "docid": "e7c6bb71dbcbc62632ed01eba93e9a8a", "score": "0.5928072", "text": "def strings2datetime(date, time):\n y, m, d = map(int, date.split('/'))\n H, M, S = map(int, time.split(':'))\n dt = datetime.datetime(y, m, d, H, M, S)\n return dt", "title": "" }, { "docid": "bfcb7a0e6bf0a5c7f06c3f949e3a27a8", "score": "0.59162146", "text": "def convert_datetimes(timestamp_data):\n error_indices = []\n for i in range(0, len(timestamp_data)):\n try:\n datetime_obj = parser.parse(timestamp_data[i])\n datetime_obj = datetime_obj + timedelta(hours=3)\n timestamp_data[i] = datetime_obj.isoformat()\n except parser.ParserError:\n sys.stderr.write('Warning: Invalid character in date field.\\n')\n error_indices.append(i)\n\n return error_indices", "title": "" }, { "docid": "5214b3e65b0b1b254cf03114881e240c", "score": "0.5712888", "text": "def valid_time(value):\n\n if not isinstance(value, str):\n raise argparse.ArgumentTypeError(\n f\"Input must be string type, you have specified '{value}' which is of type {type(value)}\"\n )\n\n fmt = \"%Y-%m-%d\"\n\n try:\n dt_object = datetime.datetime.strptime(value, fmt)\n except ValueError:\n console.print(f\"[red]Unable to convert {value} to correct date format\")\n console.print_exception()\n raise ValueError\n\n return dt_object", "title": "" }, { "docid": "8c51f67205127f7409737640c7f57405", "score": "0.56761235", "text": "def _check_date_format(self, date):\n # Initialize time for given value of date\n time = None\n try:\n # Check for the time is in valid format or not\n time = datetime.strptime(date, GC_DATE_FORMAT)\n except Exception as e:\n self.debug_print(f\"Invalid date string received. Error occurred while checking date format. Error: {str(e)}\")\n return False, None\n return True, time", "title": "" }, { "docid": "bb714dba2a047e3a9d3522cbb613026d", "score": "0.56392944", "text": "def get_astropytime(dates, times=None):\n # Process dates\n if dates is None or dates is False:\n return None\n if isinstance(dates, list) or isinstance(times, (list, ndarray)):\n if isinstance(times, ndarray):\n times = list(times)\n if not isinstance(times, list):\n times = [times] * len(dates)\n if not isinstance(dates, list):\n dates = [dates] * len(times)\n if len(dates) != len(times):\n raise ValueError(\"dates/times list lengths must match.\")\n return_Time = []\n if len(dates) > 1000:\n print(\"Converting {} time entries - could take a moment.\".format(len(dates)))\n for _date, _time in zip(dates, times):\n return_Time.append(get_astropytime(_date, _time))\n return Time(return_Time)\n if isinstance(dates, str):\n if dates.lower() == 'none':\n return None\n if dates == '<':\n return Time('2000-01-01', scale='utc')\n if dates == '>':\n return Time.now() + TimeDelta(1000, format='jd')\n if dates.lower() == 'now' or dates.lower() == 'current':\n return Time.now()\n if isinstance(dates, Time):\n return_Time = dates\n elif isinstance(dates, datetime):\n return_Time = Time(dates, format='datetime')\n else:\n try:\n dates = float(dates)\n if dates > 1000000000.0:\n return_Time = Time(dates, format='gps')\n elif dates > 2400000.0 and dates < 2500000.0:\n return_Time = Time(dates, format='jd')\n else:\n raise ValueError(f'Invalid format: date as a number should be gps time '\n f'or julian date, not {dates}.')\n except ValueError:\n dates = dates.replace('/', '-')\n try:\n return_Time = Time(dates, scale='utc')\n except ValueError:\n raise ValueError(\n f'Invalid format: YYYY[/-]M[/-]D [HH:MM:SS], not {dates}')\n # add on times\n if times is None or abs(times) < 1E-6:\n return return_Time\n try:\n times = float(times)\n return return_Time + TimeDelta(times * 3600.0, format='sec')\n except ValueError:\n pass\n sign_of_times = 1.0\n if times[0] == '-':\n sign_of_times = -1.0\n times = times[1:]\n add_time = 0.0\n for i, d in enumerate(times.split(':')):\n add_time += (float(d)) * 3600.0 / (60.0**i)\n add_time *= sign_of_times\n return return_Time + TimeDelta(add_time, format='sec')", "title": "" }, { "docid": "a5a27e736d2674cfcbbecdf565b1da11", "score": "0.56303024", "text": "def _convert_time(time_data):\n if isinstance(time_data, list) and len(time_data) == 2:\n if time_data[0] == 1:\n return int(time_data[1])\n if time_data[0] == 2:\n return datetime.fromtimestamp(time_data[1])\n if time_data[0] == 3:\n return datetime.fromtimestamp(time_data[1][0])\n return None", "title": "" }, { "docid": "8f68aca0e833951a5473428daa5a038f", "score": "0.5615581", "text": "def _str_to_datetime(val):\r\n tmp_date = datetime.date(int(val[:4]), int(val[4:6]), int(val[6:8]))\r\n\r\n time_str = val[8:]\r\n assert len(time_str) in [1, 3, 4]\r\n if len(time_str) == 1:\r\n # midnight - only one number\r\n return datetime.datetime.combine(tmp_date, datetime.time(int(time_str)))\r\n elif len(time_str) == 3:\r\n # hmm format\r\n return datetime.datetime.combine(tmp_date, datetime.time(int(time_str[:1]), int(time_str[1:])))\r\n elif len(time_str) == 4:\r\n # hhmm format\r\n return datetime.datetime.combine(tmp_date, datetime.time(int(time_str[:2]), int(time_str[2:])))", "title": "" }, { "docid": "8205c036abde12a9f18f151cfa0b9b6b", "score": "0.5615326", "text": "def parseTimeStr(timeStr):\n if len(timeStr.split('-')) == 3:\n dt = datetime.datetime.strptime(timeStr, '%Y-%m-%d')\n elif len(timeStr.split('-')) == 4:\n dt = datetime.datetime.strptime(timeStr, '%Y-%m-%d-%H')\n elif len(timeStr.split('-')) == 5:\n dt = datetime.datetime.strptime(timeStr, '%Y-%m-%d-%H-%M')\n elif len(timeStr.split('-')) == 6:\n dt = datetime.datetime.strptime(timeStr, '%Y-%m-%d-%H-%M-%S')\n else:\n raise Exception('Could not parse date string:', timeStr)\n return dt", "title": "" }, { "docid": "a2384eefa174ad7279d606fb4e3db464", "score": "0.55970067", "text": "def createDatetime(yr, mo, dy, hr):\n import datetime as dt\n\n datetime = []\n for i in range(len(yr)):\n time = dt.datetime(yr[i], mo[i], dy[i], hr[i])\n datetime.append(time)\n\n return datetime", "title": "" }, { "docid": "3dc406aa87cdfb70366f9d9dd23b898c", "score": "0.5587867", "text": "def parse_non_naive_dates(datetimes: typing.Sequence[str], *args, **kwargs) -> typing.Sequence[datetime]:\n data = list()\n\n for date_string in datetimes:\n date_and_time = parse_date_string(str(date_string))\n\n if date_and_time.tzinfo is None:\n date_and_time = date_and_time.replace(tzinfo=timezone.utc)\n\n data.append(date_and_time)\n\n return data", "title": "" }, { "docid": "09408914e149a50a29b10a6f2dba6f3c", "score": "0.55782795", "text": "def _validate_time_other_format(self, action_result, value):\n # Checking date format for 'start_time' format\n check, time = self._check_date_format(value)\n if not check:\n try:\n # Check date format for '<digit><d/h/m/s>' format\n if self._check_timerange(value.lower()):\n # Derive time period using time range\n ret_val, start_date, end_date = self._derive_time_period(action_result, value.lower())\n if phantom.is_fail(ret_val):\n return action_result.set_status(phantom.APP_ERROR, GC_ON_POLL_INVALID_TIME_ERROR), None\n\n # Return time period\n return phantom.APP_SUCCESS, [start_date, end_date]\n else:\n # Given time range value not matched with any of the possible format of date\n return action_result.set_status(phantom.APP_ERROR, GC_ON_POLL_INVALID_TIME_ERROR), None\n except OverflowError:\n return action_result.set_status(phantom.APP_ERROR, f\"{GC_UTC_SINCE_TIME_ERROR} {GC_ON_POLL_INVALID_TIME_ERROR}\"), None\n except Exception as e:\n return action_result.set_status(phantom.APP_ERROR, f\"{GC_ON_POLL_INVALID_TIME_ERROR} Error: {str(e)}\"), None\n\n # Derive end time\n ret_val, end_time = self._derive_end_time(action_result, time)\n if phantom.is_fail(ret_val):\n return action_result.get_status(), None\n\n return phantom.APP_SUCCESS, [time.strftime(GC_DATE_FORMAT), end_time.strftime(GC_DATE_FORMAT)]", "title": "" }, { "docid": "2316abfa2dcf3796443ee7e9849b6307", "score": "0.5570764", "text": "def time2datetime(date):\n assert isinstance(date, (time.struct_time, tuple, list))\n date = datetime.datetime(*date[:7])\n return date", "title": "" }, { "docid": "0bdcc72678819f15840939ef8e5e8672", "score": "0.55421054", "text": "def _parse_listed_date(self, data):\n return datetime.strptime(data, '%Y/%m/%d')", "title": "" }, { "docid": "702aedeb53a1218bbc4cae4d24256c77", "score": "0.5529178", "text": "def convert_time(time):\n base_time = dt.datetime(1900,1,1,0,0,0)\n return [base_time + dt.timedelta(days=d) for d in time]", "title": "" }, { "docid": "5c2de9825252ae6fb6ed8b7e9b3bbba1", "score": "0.55242705", "text": "def string_to_date(string):\n for char in ' {}[]': #Clean out any extra characters\n string=string.replace(char,'')\n \n try: \n (M,D,Y)=string.split('/') #Split along / dividers\n gettime=datetime(year=int(Y),month=int(M),day=int(D)) #Convert to datetime \n return gettime \n \n except: #If something went wrong, you input a bad data format.\n return None", "title": "" }, { "docid": "4ff6f287b13b96f0ad3a4ef602a7ffe7", "score": "0.5522919", "text": "def test_day_list(self):\n\n start_date = datetime(2019, 10, 1, 1)\n end_date = datetime(2019, 10, 1, 5)\n\n day_list = utils.daylist(start_date, end_date)\n\n self.assertTrue(len(day_list) == 1)\n self.assertTrue(day_list[0] == start_date.date())\n\n start_date = datetime(2017, 7, 2, 1)\n end_date = datetime(2017, 7, 10, 5)\n\n day_list = utils.daylist(start_date, end_date)\n\n self.assertTrue(len(day_list) == 9)\n self.assertTrue(day_list[0] == start_date.date())\n self.assertTrue(day_list[-1] == end_date.date())", "title": "" }, { "docid": "0e9c37eb26c72c72317a266d6cdf1b6c", "score": "0.5510502", "text": "def get_datetime(date_string):\n #if timestampe is found in string, its a datetime else, just date\n if date_string.find('T') > 0:\n return parse_datetime(date_string)\n return parse_date(date_string)", "title": "" }, { "docid": "da90b8e586a40b45393267200d106c5b", "score": "0.550845", "text": "def try_parsing_date(xs: str) -> Optional[datetime]:\n # https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior\n for fmt in (\"%d %b %Y: %H:%M\", \"%d %b %Y : %H:%M\", \"%d %b %y %H:%M\"):\n try:\n return datetime.strptime(xs, fmt).replace(tzinfo=BRISBANE_TIME_ZONE)\n except ValueError:\n pass\n\n return None", "title": "" }, { "docid": "e0d4113dceb270b4973fb4afa44199a7", "score": "0.5495461", "text": "def __convert_ecb_time_format_to_datetime(self, time_string):\n time_string_split = time_string.split(\"-\")\n if len(time_string_split) == 1:\n return parse(time_string + \"-12-31\")\n elif len(time_string_split) == 2:\n return self.__format_two_parts_date(time_string_split)\n else:\n return parse(time_string)", "title": "" }, { "docid": "314bebf3f7a4720f4ec7f378d7ee93ef", "score": "0.54859185", "text": "def get_datetime(time):\n if \":\" == time[-3:-2]: # Fix for the colon in the TZ information\n time = time[:-3]+time[-2:]\n return datetime.strptime(time, FORMAT_STRING)", "title": "" }, { "docid": "42bd6ed84a3f9dcc83e4fd6388628a53", "score": "0.547628", "text": "def _parse_windates(v):\n if isinstance(v,(list,tuple)):\n out=list(v)\n for i in range(len(v)):\n out[i] =_parse_windates(v[i])\n else:\n out=v\n if isinstance(out, _pywintypes.TimeType):\n out = _datetime.datetime(v.year, v.month, v.day, v.hour, v.minute, v.second)\n return out", "title": "" }, { "docid": "f95b62b4a9a52a00b73d6681052c6828", "score": "0.5473703", "text": "def check_dttime(self, start, end):\r\n\r\n # Convert to timedelta to make checking easier\r\n t1 = timedelta(hours = start.hour, minutes = start.minute)\r\n t2 = timedelta(hours = end.hour, minutes = end.minute)\r\n\r\n # Check to make sure times are in correct order\r\n if t1 > t2:\r\n print(\"\\n<Error>:\\n\\tEnd time cannot be earlier than start time\\n\")\r\n # Check to make sure times don't exceed limit\r\n elif ( (t2 - t1) > timedelta(hours = 4, minutes = 14)): \r\n print(\"\\n<Error>:\\n\\tTotal duration cannot exceed 4 hrs and 14 min\\n\")\r\n else:\r\n # Inputs were valid \r\n return self.to_bits_conv(start, end)\r\n\r\n # Inputs were invalid\r\n return None", "title": "" }, { "docid": "d4865c262210bd1a29f501355fe81e1d", "score": "0.5473241", "text": "def transform_time(data):\n date = datetime.fromtimestamp(data['departure_time'])\n\n departure_timestamp = data['departure_time']\n minute = date.minute\n hour = date.hour\n day = date.isoweekday()\n week_of_year = date.isocalendar()[1]\n month = date.month\n year = date.year\n\n if day in [6, 7]:\n is_weekday = False\n else:\n is_weekday = True\n\n return (\n departure_timestamp,\n minute,\n hour,\n day,\n week_of_year,\n month,\n year,\n is_weekday\n )", "title": "" }, { "docid": "da1b6df77fd82bdf31a645cc4985d5c9", "score": "0.54722255", "text": "def __parseISODate(self,dval):\n daytime = dval.strip().strip(\"'\").split('T')\n ymd = daytime[0].split('-')\n if len(daytime) == 2:\n hms = daytime[1].split(':')\n else: hms = []\n return ymd+hms", "title": "" }, { "docid": "38040c9f4d47e539ffd1012cf02c75fd", "score": "0.5458682", "text": "def _read_datetime(self, value):\n date_formats = self.date_formats\n for date_format in date_formats:\n try:\n return datetime.datetime.strptime(value, date_format)\n except (TypeError, ValueError):\n pass\n\n return None", "title": "" }, { "docid": "52202ad5ec2431cc07a619064c3d645b", "score": "0.5457634", "text": "def convert_time(t):\n\n expression = r'\\d+'\n pattern = re.compile(expression)\n match = pattern.findall(t)\n if match:\n year, month, day, hour, minute, second = match[0:6]\n t = datetime.datetime(int(year),\n int(month),\n int(day),\n int(hour),\n int(minute),\n int(round(float(second), 0)))\n return t", "title": "" }, { "docid": "b6bfa46993279ecd85b05e6a010ab3b3", "score": "0.5452837", "text": "def make_datetime(dates):\n \n import operator\n from datetime import datetime\n from datetime import timedelta\n \n dates = list(dates)\n \n sep = 'T'\n \n just_dates = [date.split(sep, 1)[0] if type(date) == str else np.nan for date in dates]\n just_times = [date.split(sep, 1)[1] if type(date) == str else np.nan for date in dates]\n \n # Datetime object for just the dates:\n just_dates = [datetime.strptime(date, '%Y-%m-%d') if type(date) == str else np.nan for date in just_dates]\n \n # Convert times to datetime and also universalize offsets.\n ops = {'+' : operator.sub, '-' : operator.add}\n \n for i, time in enumerate(just_times):\n if type(time) == str:\n time, op, zone = time[:8], time[8], time[9:]\n time = datetime.strptime(time,\"%H:%M:%S\")\n res1 = ops[op](time, timedelta(hours = int(zone[:2])))\n just_times[i] = ops[op](res1, timedelta(minutes = int(zone[3:])))\n \n # Combine results into datetime object:\n \n return [datetime.combine(datetime.date(one), datetime.time(two)) if type(one) == datetime else np.nan\n for one, two in zip(just_dates, just_times)]", "title": "" }, { "docid": "b945e1b471d41050bf774b3630887f15", "score": "0.5442804", "text": "def _getTime(self, date, time):\n if(date is None or time is None or len(date) == 0 or len(time) == 0):\n return None\n \n return datetime.strptime( date + 'T' + time, self.timeformat )", "title": "" }, { "docid": "a91521140406dd8e1e6073a4fb9be3ef", "score": "0.54389864", "text": "def check_datetime(self, text: str) -> datetime:\n try:\n res = dateutil.parser.parse(text)\n return res\n except:\n try:\n res = datetime.utcfromtimestamp(int(text))\n return res\n except ValueError:\n pass", "title": "" }, { "docid": "0b4423646c7d2df06fc3073874f6a0f2", "score": "0.54373586", "text": "def prep_datetime(self,dt):\n import datetime\n if not type(dt) is datetime.datetime:\n import datetime\n dt=datetime.datetime(dt)\n return dt.year, dt.weekday(), dt.month, dt.day, dt.day > 4, (dt.hour*3600) + (dt.minute)*60 + dt.second", "title": "" }, { "docid": "8627d69d4c2c9dc3f07d1783880eb524", "score": "0.54174554", "text": "def date2datetime(day):\n if hasattr(day, \"tzinfo\"):\n dtime = datetime.time(tzinfo=day.tzinfo)\n else:\n dtime = datetime.time()\n dt = datetime.datetime.combine(day, dtime)\n return dt", "title": "" }, { "docid": "9fc2f0aa397504d942ff4164e72d30a6", "score": "0.54169744", "text": "def twitch_time_to_datetime(twitch_time):\n for ch in [\"-\", \"T\", \"Z\", \":\"]:\n twitch_time = twitch_time.replace(ch, \"\")\n\n return datetime.strptime(twitch_time, \"%Y%m%d%H%M%S\")", "title": "" }, { "docid": "d69db531383d2b356ce67b250aec3a00", "score": "0.54143786", "text": "def processTime(line):\n line[10] = float(line[10])\n line[11] = float(line[11])\n t = line[9]\n try:\n t = datetime.datetime.strptime(t, '%Y-%m-%d %H:%M:%S')\n except:\n t = None\n if t != None:\n # clean the data\n t = str(t).split(' ')\n if t[0] != '2018-04-01':\n t[0] = '2018-04-01'\n t = t[0] + ' ' + t[1]\n t = t = datetime.datetime.strptime(t, '%Y-%m-%d %H:%M:%S')\n line[9] = t\n return line", "title": "" }, { "docid": "03559aad00736b85f980b9819797b18d", "score": "0.5413542", "text": "def which_date(start_date, time):\n\n import datetime\n\n date = datetime.datetime.strptime(start_date, '%Y/%m/%d')\n delta = time.split(' ')\n\n if delta[1] == 'days':\n return datetime.datetime.strftime(date + datetime.timedelta(days=int(delta[0])), '%Y/%m/%d')\n else:\n return datetime.datetime.strftime(date + datetime.timedelta(days=int(delta[0])*7), '%Y/%m/%d')", "title": "" }, { "docid": "e4cce8638eec8226f1b94c615f3072aa", "score": "0.5412996", "text": "def parse_date_with_timerange(date_with_timerange_raw):\n words = date_with_timerange_raw.split(\" \")\n date_raw = words[0]\n if len(words) < 2:\n return None, None\n else:\n for word in words[1:]:\n if parse_date(date_raw + \" \" + word):\n date_raw += \" \" + word\n else:\n # confirms that there is only 1 item in list after date (i.e. the timerange)\n if len(words) != words.index(word) + 1:\n return None, None\n time_range = words[-1]\n break\n\n time_div = time_range.find(\"-\")\n if not time_div:\n return None, None\n\n start_time_raw = time_range[:time_div]\n end_time_raw = time_range[time_div + 1:]\n start_time = parse_time(start_time_raw)\n end_time = parse_time(end_time_raw)\n date = parse_date(date_raw)\n\n start_datetime = datetime.datetime.combine(date, start_time)\n end_datetime = datetime.datetime.combine(date, end_time)\n return start_datetime, end_datetime", "title": "" }, { "docid": "b14eb7d011780128b89db44d08366b1d", "score": "0.54088116", "text": "def _parse_start_datetime(self, item):\n if 'cancel' in item[2].lower():\n return ''\n\n if not item[1].strip():\n if 'stakeholder' in item[0].lower():\n time = '8:30 a.m.'\n if 'performance oversight' in item[0].lower():\n time = '9:00 a.m.'\n else:\n time = '9:30 a.m.'\n else:\n time = item[1]\n date = ('{year} {date}'.format(year=self.event_year, date=item[2]))\n\n time_string = '{0} {1}'.format(date, time)\n return (parse(time_string))", "title": "" }, { "docid": "f3a238bed16bca55815d7d0a93b6ad77", "score": "0.54009736", "text": "def classify_time(time):\n hour = datetime.datetime.strptime(time, \"%H:%M\").time().hour\n return get_part_of_day(hour)", "title": "" }, { "docid": "40e4a4194612307d594509cc96149fdc", "score": "0.5400025", "text": "def call_time():\n t0 = datetime.datetime.fromtimestamp(time.time())\n YY, MM = t0.year, str(t0.month).zfill(2)\n DD = str(t0.day).zfill(2)\n hh, mm = str(t0.hour).zfill(2), str(t0.minute).zfill(2)\n ss = str(t0.second).zfill(2)\n wa_date = '{}-{}-{}-'.format(DD, MM, YY)\n wa_time = '{}{}h{}m'.format(wa_date, hh, mm)\n t_return = [wa_date, wa_time]\n return t_return", "title": "" }, { "docid": "9cd2457914f35e1b57d243fed3cc5ded", "score": "0.53967965", "text": "def makedatetime(s):\n return datetime.datetime(*parse_datetime(s))", "title": "" }, { "docid": "703885b5f39c2c8165a20973a47cb5f9", "score": "0.5392787", "text": "def int_to_daytime(daytime):\n if not 0 <= daytime <= 3:\n return \"\"\n return [\"Night\", \"Morning\", \"Afternoon\", \"Evening\"][daytime]", "title": "" }, { "docid": "721226ae6175d5c400914881a6283008", "score": "0.5371358", "text": "def datetime(item):", "title": "" }, { "docid": "105269a60baf8bc87551569dfd3ed37f", "score": "0.5355542", "text": "def str_to_datetime(time):\n year = int(time[:4])\n month = int(time[4:6])\n try:\n day = int(time[6:8])\n except:\n day = 1\n try:\n hour = int(time[8:10])\n except Exception:\n hour = 0\n try:\n minute = int(time[10:12])\n except Exception:\n minute = 0\n return datetime(year, month, day, hour, minute)", "title": "" }, { "docid": "2b781adaf759a78b7643aba78434730c", "score": "0.5355499", "text": "def process_datetime(self, cols_of_datetime):\n pass", "title": "" }, { "docid": "1ad5f6698b1e888b39102fde1dbf84c9", "score": "0.53473675", "text": "def input_list_to_time_list(input_list):\n time_list = []\n for x,input in enumerate(input_list):\n if input != 0 and input != '0':\n time_24 = x // 2\n minutes = 0\n if x % 2 == 1:\n minutes = 30\n time_list.append(time(hour=time_24, minute=minutes))\n return time_list", "title": "" }, { "docid": "603d26f34f279261e095f86ada4e1945", "score": "0.5334922", "text": "def _verify_time_format(time):\n if not fullmatch('\\d{2}-\\d{2} \\d{2}:\\d{2}', time):\n raise RuntimeError('Incorrect Time Format!')", "title": "" }, { "docid": "01e8ef15b14146a4ee627eb0ce4e7ffb", "score": "0.5329818", "text": "def parse(self, argument: str) -> datetime.datetime:\n \n argument = argument.lower()\n self.now = datetime.datetime.utcnow()\n \n try:\n date = Date.parse(argument)\n return datetime.datetime(date.year, date.month, date.day, 0, 0, 0)\n except (error.ParserError) as e:\n pass\n\n try:\n time = Time.parse(argument)\n return datetime.datetime(self.now.year, self.now.month, self.now.day, time.hour, time.minute, time.second)\n except (error.ParserError) as e:\n pass\n\n match = re.fullmatch(regex.Regex.TOMORROW_AT_HOUR, argument)\n if (match):\n # tomorrow at 0\n # tomorrow at 0am\n # tomorrow at 0 am\n # tomorrow at 0pm\n # tomorrow at 0 pm\n # tomorrow at 00\n # tomorrow at 00am\n # tomorrow at 00 am\n # tomorrow at 00pm\n # tomorrow at 00 pm\n # until tomorrow at 0\n # until tomorrow at 0am\n # until tomorrow at 0 am\n # until tomorrow at 0pm\n # until tomorrow at 0 pm\n # until tomorrow at 00\n # until tomorrow at 00am\n # until tomorrow at 00 am\n # until tomorrow at 00pm\n # until tomorrow at 00 pm\n\n hour = int(match.group(\"hour\"))\n\n meridies = match.group(\"meridies\")\n if (meridies == \"pm\"):\n hour += 12\n\n if (hour not in range(0, 24)):\n raise error.ParserError(self, \"hour '{0}' is out of range\".format(hour))\n\n tomorrow = self.now + datetime.timedelta(days=1)\n return datetime.datetime(tomorrow.year, tomorrow.month, tomorrow.day, hour, 0, 0)\n\n match = re.fullmatch(regex.Regex.TOMORROW_AT_TIME, argument)\n if (match):\n # tomorrow at 00:00\n # tomorrow at 00:00:00\n # until tomorrow at 00:00\n # until tomorrow at 00:00:00\n\n hour = int(match.group(\"hour\"))\n minute = int(match.group(\"minute\"))\n\n second = match.group(\"second\")\n if (second):\n second = int(second)\n else:\n second = 0\n\n if (hour not in range(0, 24)):\n raise error.ParserError(self, \"hour '{0}' is out of range\".format(hour))\n elif (minute not in range(0, 60)):\n raise error.ParserError(self, \"minute '{0}' is out of range\".format(minute))\n elif (second not in range(0, 60)):\n raise error.ParserError(self, \"second '{0}' is out of range\".format(second))\n \n tomorrow = self.now + datetime.timedelta(days=1)\n return datetime.datetime(tomorrow.year, tomorrow.month, tomorrow.day, hour, minute, second)\n\n match = re.fullmatch(regex.Regex.US_DATE_TIME, argument)\n if (match):\n # 12/31/00 00:00\n # 12/31/00 at 00:00\n # 12/31/00 00:00:00\n # 12/31/00 at 00:00:00\n # 12/31/0000 00:00\n # 12/31/0000 at 00:00\n # 12/31/0000 00:00:00\n # 12/31/0000 at 00:00:00\n # 12-31-00 00:00\n # 12-31-00 at 00:00\n # 12-31-00 00:00:00\n # 12-31-00 at 00:00:00\n # 12-31-0000 00:00\n # 12-31-0000 at 00:00\n # 12-31-0000 00:00:00\n # 12-31-0000 at 00:00:00\n # on 12/31/00 00:00\n # on 12/31/00 at 00:00\n # on 12/31/00 00:00:00\n # on 12/31/00 at 00:00:00\n # on 12/31/0000 00:00\n # on 12/31/0000 at 00:00\n # on 12/31/0000 00:00:00\n # on 12/31/0000 at 00:00:00\n # on 12-31-00 00:00\n # on 12-31-00 at 00:00\n # on 12-31-00 00:00:00\n # on 12-31-00 at 00:00:00\n # on 12-31-0000 00:00\n # on 12-31-0000 at 00:00\n # on 12-31-0000 00:00:00\n # on 12-31-0000 at 00:00:00\n # until 12/31/00 00:00\n # until 12/31/00 at 00:00\n # until 12/31/00 00:00:00\n # until 12/31/00 at 00:00:00\n # until 12/31/0000 00:00\n # until 12/31/0000 at 00:00\n # until 12/31/0000 00:00:00\n # until 12/31/0000 at 00:00:00\n # until 12-31-00 00:00\n # until 12-31-00 at 00:00\n # until 12-31-00 00:00:00\n # until 12-31-00 at 00:00:00\n # until 12-31-0000 00:00\n # until 12-31-0000 at 00:00\n # until 12-31-0000 00:00:00\n # until 12-31-0000 at 00:00:00\n\n month = int(match.group(\"month\"))\n day = int(match.group(\"day\"))\n\n year = match.group(\"year\")\n if (len(year) == 2):\n year = str(self.now.year)[:2] + year\n\n year = int(year)\n\n hour = int(match.group(\"hour\"))\n minute = int(match.group(\"minute\"))\n\n second = match.group(\"second\")\n if (second):\n second = int(second)\n else:\n second = 0\n \n return datetime.datetime(year, month, day, hour, minute, second)\n\n match = re.fullmatch(regex.Regex.EU_DATE_TIME, argument)\n if (match):\n # 31/12/00 00:00\n # 31/12/00 at 00:00\n # 31/12/00 00:00:00\n # 31/12/00 at 00:00:00\n # 31/12/0000 00:00\n # 31/12/0000 at 00:00\n # 31/12/0000 00:00:00\n # 31/12/0000 at 00:00:00\n # 31-12-00 00:00\n # 31-12-00 at 00:00\n # 31-12-00 00:00:00\n # 31-12-00 at 00:00:00\n # 31-12-0000 00:00\n # 31-12-0000 at 00:00\n # 31-12-0000 00:00:00\n # 31-12-0000 at 00:00:00\n # on 31/12/00 00:00\n # on 31/12/00 at 00:00\n # on 31/12/00 00:00:00\n # on 31/12/00 at 00:00:00\n # on 31/12/0000 00:00\n # on 31/12/0000 at 00:00\n # on 31/12/0000 00:00:00\n # on 31/12/0000 at 00:00:00\n # on 31-12-00 00:00\n # on 31-12-00 at 00:00\n # on 31-12-00 00:00:00\n # on 31-12-00 at 00:00:00\n # on 31-12-0000 00:00\n # on 31-12-0000 at 00:00\n # on 31-12-0000 00:00:00\n # on 31-12-0000 at 00:00:00\n # until 31/12/00 00:00\n # until 31/12/00 at 00:00\n # until 31/12/00 00:00:00\n # until 31/12/00 at 00:00:00\n # until 31/12/0000 00:00\n # until 31/12/0000 at 00:00\n # until 31/12/0000 00:00:00\n # until 31/12/0000 at 00:00:00\n # until 31-12-00 00:00\n # until 31-12-00 at 00:00\n # until 31-12-00 00:00:00\n # until 31-12-00 at 00:00:00\n # until 31-12-0000 00:00\n # until 31-12-0000 at 00:00\n # until 31-12-0000 00:00:00\n # until 31-12-0000 at 00:00:00\n\n day = int(match.group(\"day\"))\n month = int(match.group(\"month\"))\n\n year = match.group(\"year\")\n if (len(year) == 2):\n year = str(self.now.year)[:2] + year\n\n year = int(year)\n\n if (not year):\n raise error.ParserError(self, \"year '{0}' is out of range\".format(year))\n\n hour = int(match.group(\"hour\"))\n minute = int(match.group(\"minute\"))\n\n second = match.group(\"second\")\n if (second):\n second = int(second)\n else:\n second = 0\n\n if (hour not in range(0, 24)):\n raise error.ParserError(self, \"hour '{0}' is out of range\".format(hour))\n elif (minute not in range(0, 60)):\n raise error.ParserError(self, \"minute '{0}' is out of range\".format(minute))\n elif (second not in range(0, 60)):\n raise error.ParserError(self, \"second '{0}' is out of range\".format(second))\n \n try:\n return datetime.datetime(year, month, day, hour, minute, second)\n except (ValueError) as e:\n raise error.ParserError(self, \"day '{0}' is out of range\".format(day))\n\n match = re.fullmatch(regex.Regex.ISO8601, argument)\n if (match):\n year = int(match.group(\"year\"))\n month = int(match.group(\"month\"))\n day = int(match.group(\"day\"))\n hour = int(match.group(\"hour\"))\n minute = int(match.group(\"minute\"))\n\n if (not year):\n raise error.ParserError(None, \"year '{0}' is out of range\".format(year))\n elif (hour not in range(0, 24)):\n raise error.ParserError(None, \"hour '{0}' is out of range\".format(hour))\n elif (minute not in range(0, 60)):\n raise error.ParserError(None, \"minute '{0}' is out of range\".format(minute))\n \n try:\n return datetime.datetime(year, month, day, hour, minute, 0)\n except (ValueError) as e:\n raise error.ParserError(None, \"day '{0}' is out of range\".format(day))\n\n match = re.fullmatch(regex.Regex.ANY_HUMANIZED_TIME, argument)\n if (match):\n new = datetime.timedelta()\n\n seconds = match.group(\"seconds\")\n if (seconds):\n seconds = int(seconds)\n if (seconds):\n new += datetime.timedelta(seconds=seconds)\n\n minutes = match.group(\"minutes\")\n if (minutes):\n minutes = int(minutes)\n if (minutes):\n new += datetime.timedelta(minutes=minutes)\n\n hours = match.group(\"hours\")\n if (hours):\n hours = int(hours)\n if (hours):\n new += datetime.timedelta(hours=hours)\n\n days = match.group(\"days\")\n if (days):\n days = int(days)\n if (days):\n new += datetime.timedelta(days=days)\n\n weeks = match.group(\"weeks\")\n if (weeks):\n weeks = int(weeks)\n if (weeks):\n new += datetime.timedelta(weeks=weeks)\n\n if (new > datetime.timedelta()):\n return self.now + new\n\n raise error.ParserError(self, \"couldn't parse datetime from '{0}'\".format(argument))", "title": "" }, { "docid": "067490a6fad73f3285f9f344254e6d2a", "score": "0.53178936", "text": "def parse_date_time(date_str, time_str):\r\n\r\n day,month,year = [int(x) for x in date_str.split('/')]\r\n hour,minute,second = [int(x) for x in time_str.split(':')]\r\n return datetime.datetime(year+2000,month,day,hour,minute,second)", "title": "" }, { "docid": "c6a8dfc7d8a6daac8c16f2156d73d17d", "score": "0.53131485", "text": "def validate_time(time, context=None):\n error = None\n\n # Check that there is a valid date string\n pattern = \"([0-1][0-9]|[2][0-3])[0-5][0-9]\"\n fmt_time = re.search(pattern, time)\n extra = re.sub(pattern, '', time)\n\n if fmt_time is None or len(extra.strip()) > 0:\n error = 'I can\\'t understand what you mean by {}.'.format(time)\n\n return error", "title": "" }, { "docid": "5362835c0c23f64ed7ff0a6a57bc60c8", "score": "0.53126556", "text": "def validate_date(d):\n try:\n datetime.strptime(d, '%Y-%m-%dT%H:%M:%S')\n except ValueError:\n new_d = dateutil.parser.parse(d)\n d = new_d.isoformat()\n print('new formated date {}'.format(d))\n return d", "title": "" }, { "docid": "7d22ef2521250c657d6707c87f094de9", "score": "0.53090525", "text": "def str2datetime(str_time):\n try:\n return datetime.strptime(str_time,'%Y-%m-%d %H:%M:%S.%f')\n except:\n return datetime.strptime(str_time,'%Y-%m-%d %H:%M:%S')", "title": "" }, { "docid": "f4dbd12607938e6cba98374b54260095", "score": "0.52888674", "text": "def is_same_day(timestr1, timestr2):\n td1 = datetime.datetime.utcfromtimestamp(timestr1/1000)\n td2 = datetime.datetime.utcfromtimestamp(timestr2/1000)\n return td1.day == td2.day", "title": "" }, { "docid": "9345d614965cb89447aff7f190bb806a", "score": "0.5286932", "text": "def _parse_date(d: str):\n return datetime(int(d[:4]), int(d[4:6]), int(d[6:8]))", "title": "" }, { "docid": "28abe7061b38065142b4949f94e147e8", "score": "0.52813", "text": "def get_datetime(**params):\r\n ...", "title": "" }, { "docid": "5fde9b82249b8049df761d86b404910f", "score": "0.5276263", "text": "def _process_dates(self, args):\n \n dfrom = args.get('from', None)\n duntil = args.get('until', None)\n \n now = datetime.datetime.utcnow()\n \n if dfrom:\n dfrom = time_utils.convert_date_str_to_datetime(dfrom)\n else:\n #set default for from: create datetime now - 1 hours\n dfrom = now + datetime.timedelta(hours=-1)\n \n if duntil:\n #set default for until: create datetime now \n duntil = time_utils.convert_date_str_to_datetime(duntil)\n else:\n #create datetime now \n duntil = now \n \n \n #check that from is anterior to until\n if dfrom >= duntil:\n raise Exception(\"from date (%s) cannot be posterior to until date (%s)\" %(args.get('from', None), args.get('until', None)))\n \n return (dfrom, duntil)", "title": "" }, { "docid": "5df091e290ae3d839b2b9129e4d072e4", "score": "0.5253768", "text": "def smart_datetime(s, format='%Y-%m-%d', fallback=None):\n try:\n return datetime.strptime(s, format)\n except (ValueError, TypeError):\n return fallback", "title": "" }, { "docid": "12de98994ddf6d768f4840a13ffea037", "score": "0.52450424", "text": "def test_validation(self):\n self.assertIsNone(self.time_obj.check_time_format(\"23:52\"))\n self.assertIsNone(self.time_obj.check_time_format(\"00:05\"))\n self.assertIsNone(self.time_obj.check_time_format(\"5:45PM\"))\n self.assertIsNone(self.time_obj.check_time_format(\"1:05a.m.\"))\n self.assertIsNone(self.time_obj.check_time_format(\"5:45PM\"))\n self.assertIsNone(self.time_obj.check_time_format(\"5:45P.M.\"))\n self.assertIsNone(self.time_obj.check_time_format(\"1:05a.m.\"))\n self.assertIsNone(self.time_obj.check_time_format(\"5:45PM\"))\n with self.assertRaises(SyntaxError):\n self.time_obj.check_time_format(\"13:05a.m.\")\n with self.assertRaises(SyntaxError):\n self.time_obj.check_time_format(\"00:30am\")\n with self.assertRaises(SyntaxError):\n self.time_obj.check_time_format(\"0:30am\")\n with self.assertRaises(SyntaxError):\n self.time_obj.check_time_format(\"noon\")", "title": "" }, { "docid": "8f1688fbc570c17e01bc55ef16e35c56", "score": "0.52401835", "text": "def dtm_whole_day(event):\n # like 2021-05-31\n event_str = event['start'].get('dateTime', event['start'].get('date'))\n date_obj = date(int(event_str[0:4]), int(event_str[5:7]),\n int(event_str[8:10]))\n time_obj = time(1, 1)\n return datetime.combine(date_obj, time_obj)", "title": "" }, { "docid": "01ae61bfcd93688fab84559278f73324", "score": "0.5228634", "text": "def composeADate(date, time):\n return datetime.datetime(date.year, date.month, date.day, time.hour, time.minute)", "title": "" }, { "docid": "d5190aa2c174239790d0e6d7a6a376b1", "score": "0.52284557", "text": "def parse(self, argument: str) -> datetime.time:\n \n argument = argument.lower()\n self.now = datetime.datetime.utcnow()\n \n match = re.fullmatch(regex.Regex.DIGITS, argument)\n if (match):\n # 0+\n\n minutes = int(match.group(\"digits\"))\n if (minutes):\n new = self.now + datetime.timedelta(minutes=minutes)\n return datetime.time(new.hour, new.minute, new.second)\n\n match = re.fullmatch(regex.Regex.HOUR, argument)\n if (match):\n # 0\n # 0am\n # 0 am\n # 0pm\n # 0 pm\n # 00\n # 00am\n # 00 am\n # 00pm\n # 00 pm\n # at 0\n # at 0am\n # at 0 am\n # at 0pm\n # at 0 pm\n # at 00\n # at 00am\n # at 00 am\n # at 00pm\n # at 00 pm\n # until 0\n # until 0am\n # until 0 am\n # until 0pm\n # until 0 pm\n # until 00\n # until 00am\n # until 00 am\n # until 00pm\n # until 00 pm\n\n hour = int(match.group(\"hour\"))\n\n meridies = match.group(\"meridies\")\n if (meridies == \"pm\"):\n hour += 12\n\n if (hour not in range(0, 24)):\n raise error.ParserError(self, \"hour '{0}' is out of range\".format(hour))\n\n return datetime.time(hour, 0, 0)\n\n match = re.fullmatch(regex.Regex.TODAY_AT_HOUR, argument)\n if (match):\n # today at 0\n # today at 0am\n # today at 0 am\n # today at 0pm\n # today at 0 pm\n # today at 00\n # today at 00am\n # today at 00 am\n # today at 00pm\n # today at 00 pm\n # until today at 0\n # until today at 0am\n # until today at 0 am\n # until today at 0pm\n # until today at 0 pm\n # until today at 00\n # until today at 00am\n # until today at 00 am\n # until today at 00pm\n # until today at 00 pm\n\n hour = int(match.group(\"hour\"))\n\n meridies = match.group(\"meridies\")\n if (meridies == \"pm\"):\n hour += 12\n\n if (hour not in range(0, 24)):\n raise error.ParserError(self, \"hour '{0}' is out of range\".format(hour))\n\n return datetime.time(hour, 0, 0)\n\n match = re.fullmatch(regex.Regex.TIME, argument)\n if (match):\n # 00:00\n # 00:00:00\n # at 00:00\n # at 00:00:00\n # until 00:00\n # until 00:00:00\n\n hour = int(match.group(\"hour\"))\n minute = int(match.group(\"minute\"))\n\n second = match.group(\"second\")\n if (second):\n second = int(second)\n else:\n second = 0\n\n if (hour not in range(0, 24)):\n raise error.ParserError(self, \"hour '{0}' is out of range\".format(hour))\n elif (minute not in range(0, 60)):\n raise error.ParserError(self, \"minute '{0}' is out of range\".format(minute))\n elif (second not in range(0, 60)):\n raise error.ParserError(self, \"second '{0}' is out of range\".format(second))\n\n return datetime.time(hour, minute, second)\n\n match = re.fullmatch(regex.Regex.TODAY_AT_TIME, argument)\n if (match):\n # today at 00:00\n # today at 00:00:00\n # until today at 00:00\n # until today at 00:00:00\n\n hour = int(match.group(\"hour\"))\n minute = int(match.group(\"minute\"))\n\n second = match.group(\"second\")\n if (second):\n second = int(second)\n else:\n second = 0\n\n if (hour not in range(0, 24)):\n raise error.ParserError(self, \"hour '{0}' is out of range\".format(hour))\n elif (minute not in range(0, 60)):\n raise error.ParserError(self, \"minute '{0}' is out of range\".format(minute))\n elif (second not in range(0, 60)):\n raise error.ParserError(self, \"second '{0}' is out of range\".format(second))\n\n return datetime.time(hour, minute, second)\n\n raise error.ParserError(self, \"couldn't parse time from '{0}'\".format(argument))", "title": "" }, { "docid": "d5b68ab6d663ab7eb5802ae1af74fd50", "score": "0.52278376", "text": "def parse_time(content):\n matches = datefinder.find_dates(content)\n l = list(matches)\n date = str(l[0])\n return date[:-3]", "title": "" }, { "docid": "528655bc208dae0910818b8954c9caac", "score": "0.52215064", "text": "def ParseDateTime( datestring, usa_format=False ):\n\n #DEBUG:\n #print \"DATE: \"\n #print datestring\n #print \"\\n\"\n\n if usa_format:\n # swap day and month if both are numeric\n datestring = re.sub( r'(\\d{1,2})([-/])(\\d{1,2})([-/])(\\d{2,4})', r'\\3\\2\\1\\4\\5', datestring )\n\n\n for c in datecrackers:\n m = c.search( datestring )\n if not m:\n continue\n\n #DEBUG:\n #print \"MONTH: \"\n #print m.group( 'month' )\n #print \"\\n\"\n\n day = int( m.group( 'day' ) )\n month = MonthNumber( m.group( 'month' ) )\n year = int( m.group( 'year' ) )\n if year < 100:\n year = year+2000\n\n hour = GetGroup(m,'hour')\n if not hour:\n return datetime( year,month,day )\n hour = int( hour )\n\n # convert to 24 hour time\n # if no am/pm, assume 24hr\n if GetGroup(m,'pm') and hour>=1 and hour <=11:\n hour = hour + 12\n if GetGroup(m,'am') and hour==12:\n hour = hour - 12\n\n # if hour present, min will be too\n min = int( m.group( 'min' ) )\n\n # sec might be missing\n sec = GetGroup( m,'sec' )\n if not sec:\n return datetime( year,month,day,hour,min )\n sec = int( sec )\n\n return datetime( year,month,day,hour,min,sec )\n\n raise Exception, (\"Can't extract date from '%s'\" %(datestring) )", "title": "" }, { "docid": "1cc4dffc24e5bc7ff69796f507cba6eb", "score": "0.521511", "text": "def format_time(start_time, end_time, start_date, is_24_hour_time=False, overnight=False):\n if is_24_hour_time:\n start = datetime.strptime(start_time, \"%H%M\")\n start_time = start.strftime(\"%I:%M%p\")\n end = datetime.strptime(end_time, \"%H%M\")\n end_time = end.strftime(\"%I:%M%p\")\n\n else:\n start = datetime.strptime(start_time, \"%H:%M%p\")\n start_time = start.strftime(\"%I:%M\") + start_time[-2:].upper()\n end = datetime.strptime(end_time, \"%I:%M%p\")\n end_time = end.strftime(\"%I:%M\") + end_time[-2:].upper()\n\n end_date = start_date\n if overnight or (end.strftime(\"%p\") == \"AM\" and end < start):\n year, month, day = start_date.split(\"-\")\n next_day = date(int(year), int(month), int(day)) + timedelta(days=1)\n end_date = next_day.isoformat()\n\n new_start = \"{}:{}\".format(start_date, start_time)\n new_end = \"{}:{}\".format(end_date, end_time)\n\n return [new_start, new_end]", "title": "" }, { "docid": "db1189656c8afec930d40bcbd20b38e4", "score": "0.52139056", "text": "def test_datetime_validation(self):\n\n class LogEntry(Document):\n time = DateTimeField()\n\n log = LogEntry()\n log.time = dt.datetime.now()\n log.validate()\n\n log.time = dt.date.today()\n log.validate()\n\n log.time = dt.datetime.now().isoformat(\" \")\n log.validate()\n\n log.time = \"2019-05-16 21:42:57.897847\"\n log.validate()\n\n if dateutil:\n log.time = dt.datetime.now().isoformat(\"T\")\n log.validate()\n\n log.time = -1\n with pytest.raises(ValidationError):\n log.validate()\n log.time = \"ABC\"\n with pytest.raises(ValidationError):\n log.validate()\n log.time = \"2019-05-16 21:GARBAGE:12\"\n with pytest.raises(ValidationError):\n log.validate()\n log.time = \"2019-05-16 21:42:57.GARBAGE\"\n with pytest.raises(ValidationError):\n log.validate()\n log.time = \"2019-05-16 21:42:57.123.456\"\n with pytest.raises(ValidationError):\n log.validate()", "title": "" }, { "docid": "037ec9f3dd105764c311a67bcfb6c96f", "score": "0.5213268", "text": "def parse_date(txt):\n date = None\n clock = None\n\n for word in txt.split(' '):\n if date is None:\n try:\n date = datetime.strptime(word, \"%d-%m-%Y\")\n continue\n except ValueError:\n pass\n\n try:\n date = datetime.strptime(word, \"%d.%m.%Y\")\n continue\n except ValueError:\n pass\n if clock is None:\n try:\n clock = datetime.strptime(word, \"%H:%M\")\n continue\n except ValueError:\n pass\n\n if date is not None and clock is not None:\n return {'h': clock.hour,\n 'm': clock.minute,\n 'D': date.day,\n 'M': date.month,\n 'Y': date.year}\n return None", "title": "" }, { "docid": "610dd5cf1682001afb10961ecaa3a270", "score": "0.52087754", "text": "def allstrptime(self):\n for p in self.pCond():\n p['ts']['date'] = [datetime.datetime.strptime(t,\"%Y-%m-%d %H:%M\") for t in p['ts']['date']]\n if 'tsRaw' in p.keys():\n p['tsRaw']['date'] = [datetime.datetime.strptime(t,\"%Y-%m-%d %H:%M\") for i in p['ts']['date']]\n print('Converted all datetime to dt-objects')", "title": "" }, { "docid": "8cf03b77361d9bcb2849500f385f8179", "score": "0.52058345", "text": "def add_freetime_caps(day_list, day, starttime, endtime, buffer, min_time, free_cal):\n if len(day_list) > 0:\n if starttime < day_list[0][0]:\n new_range = parse_range((starttime, day_list[0][0]), buffer, min_time, endtime)\n if new_range is not None:\n free_cal.add_time(new_range, day)\n if endtime > day_list[-1][1]:\n new_range = parse_range((day_list[-1][1], endtime), buffer, min_time, endtime)\n if new_range is not None:\n free_cal.add_time(new_range, day)\n else:\n new_range = parse_range((starttime, endtime), buffer, min_time, endtime)\n if new_range is not None:\n free_cal.add_time(new_range, day)", "title": "" }, { "docid": "09c8f5b0a09d0e94855e13400f9858a9", "score": "0.52048147", "text": "def check_time_format(time):\n full_time_format = \"[\\d]{4}-[\\d]{2}-[\\d]{2}T[\\d]{2}:[\\d]{2}:[\\d]{2}Z\"\n date_format = \"[\\d]{4}-[\\d]{2}-[\\d]{2}\"\n \n if re.match(full_time_format, time):\n return \"full_format\"\n elif re.match(date_format, time):\n return \"date_only\"\n else:\n raise ValueError(\"Incorrect time format: \", time)", "title": "" }, { "docid": "51efe5de594312bbf4c1743dfe608b16", "score": "0.52013683", "text": "def matchtime(self, t):\n return ((t.minute in self.minutes) and\n (t.hour in self.hours) and\n (t.day in self.monthdays) and\n (t.month in self.months) and\n ((t.isoweekday() % 7) in self.weekdays))", "title": "" }, { "docid": "2f2f1f994cb0d9c6742e48d8c8263c86", "score": "0.5200053", "text": "def test_define_cycletime_format(self):\n cycletime = \"201711220100\"\n dt = datetime(2017, 11, 22, 1, 0)\n result = cycletime_to_datetime(cycletime, cycletime_format=\"%Y%m%d%H%M\")\n self.assertEqual(result, dt)", "title": "" }, { "docid": "61d2a23b7172918cee322e262c153b6e", "score": "0.5197719", "text": "def strpdatetime(value, frmt='date'):\n if frmt == 'date':\n from_func = DateFrom\n else:\n from_func = DateTimeFrom\n input_value = value\n try:\n value = from_func(value)\n except RangeError, exc:\n raise InvalidDateError(input_value, exc)\n except ValueError:\n raise InvalidDateError(input_value)\n try:\n assert value == today()\n except AssertionError:\n pass\n else:\n raise InvalidDateError(input_value)\n return value", "title": "" }, { "docid": "f5dd25b6ff54428d7f7d2b56e1c23735", "score": "0.5193574", "text": "def dump_datetime(value):\n if value is None:\n return None\n return [value.strftime(\"%Y-%m-%d\"), value.strftime(\"%H:%M:%S\")]", "title": "" }, { "docid": "f5dd25b6ff54428d7f7d2b56e1c23735", "score": "0.5193574", "text": "def dump_datetime(value):\n if value is None:\n return None\n return [value.strftime(\"%Y-%m-%d\"), value.strftime(\"%H:%M:%S\")]", "title": "" }, { "docid": "2853a55210410b459f387f23494a520b", "score": "0.5179158", "text": "def parse_dt_string(s):\n intervals = []\n # https://dateutil.readthedocs.io/en/stable/parser.html\n parser_info = dtp.parserinfo(dayfirst=True)\n try:\n groups = s.split('.')\n for group in groups:\n if group in ['']:\n continue\n k, v = group.split(':', 1)\n name = k.strip()\n interval_strings = v.split(',')\n for interval_str in interval_strings:\n if interval_str in ['']:\n continue\n if interval_str.find('+') > 0:\n # '+' was found implying relative end-time was specified.\n interval_parts = interval_str.split('+')\n datetime_str = interval_parts[0].strip()\n dur_str = interval_parts[1].strip()\n dur = parse_dur(dur_str) # timedelta\n start_dt_str = datetime_str\n if start_dt_str.find(':') < 0:\n raise ValueError(FORMAT_MSG)\n start_dt = dtp.parse(start_dt_str, parserinfo=parser_info)\n\n # auto set year\n if dt.today().month > start_dt.month:\n # user likely referring to next year\n start_dt = start_dt.replace(year=dt.today().year+1)\n else:\n start_dt = start_dt.replace(year=dt.today().year)\n end_dt = start_dt + dur\n elif interval_str.find('-') > 0:\n # '-' was found implying absolute end-time was specified.\n interval_parts = interval_str.split('-')\n start_dt_str = interval_parts[0].strip()\n if start_dt_str.find(':') < 0:\n raise ValueError(FORMAT_MSG)\n start_dt = dtp.parse(start_dt_str, parserinfo=parser_info)\n\n # determine is interval's end was specified as datetime or time.\n end_str = interval_parts[1].strip()\n if end_str.find(' ') > 0:\n # date was specified. e.g. '2 feb 13:00'\n end_dt_str = end_str\n end_dt = dtp.parse(end_dt_str, parserinfo=parser_info)\n else:\n # only time was specified.\n if end_str.find(':') < 0:\n raise ValueError(FORMAT_MSG)\n tmp_dt = dtp.parse(end_str, parserinfo=parser_info) # for time.\n end_dt = start_dt # for base dt info, which will be replaced.\n if tmp_dt.hour < start_dt.hour:\n # end-time refers to next day.\n end_dt += timedelta(days=1)\n else:\n end_dt = end_dt.replace(hour=tmp_dt.hour, minute=tmp_dt.minute)\n\n # auto set year\n if dt.today().month > start_dt.month:\n # user likely referring to next year\n start_dt = start_dt.replace(year=dt.today().year + 1)\n end_dt = end_dt.replace(year=dt.today().year + 1)\n else:\n start_dt = start_dt.replace(year=dt.today().year)\n end_dt = end_dt.replace(year=dt.today().year)\n else:\n interval_parts = interval_str.split()\n timeslot = interval_parts[-1].strip().lower()\n start_dt_str = interval_parts[0] + ' ' + interval_parts[1]\n start_dt = dtp.parse(start_dt_str, parserinfo=parser_info)\n start_dt = start_dt.replace(year=dt.today().year)\n if timeslot not in GENERAL_TIMESLOTS:\n raise ValueError(FORMAT_MSG)\n elif timeslot == 'breakfast':\n start_dt = start_dt.replace(hour=8)\n delta = timedelta(hours=2,minutes=30)\n elif timeslot == 'brunch':\n start_dt = start_dt.replace(hour=11)\n delta = timedelta(hours=2,minutes=30)\n elif timeslot == 'lunch':\n start_dt = start_dt.replace(hour=12)\n delta = timedelta(hours=2,minutes=30)\n elif timeslot == 'dinner':\n start_dt = start_dt.replace(hour=18)\n delta = timedelta(hours=2,minutes=30)\n elif timeslot == 'supper':\n start_dt = start_dt.replace(hour=21)\n delta = timedelta(hours=2,minutes=30)\n elif timeslot == 'morning':\n start_dt = start_dt.replace(hour=8)\n delta = timedelta(hours=4)\n elif timeslot == 'afternoon':\n start_dt = start_dt.replace(hour=12)\n delta = timedelta(hours=6)\n elif timeslot == 'night':\n start_dt = start_dt.replace(hour=19)\n delta = timedelta(hours=5)\n end_dt = start_dt + delta\n\n # auto set year\n if dt.today().month > start_dt.month:\n # user likely referring to next year\n start_dt = start_dt.replace(year=dt.today().year + 1)\n end_dt = end_dt.replace(year=dt.today().year + 1)\n else:\n start_dt = start_dt.replace(year=dt.today().year)\n end_dt = end_dt.replace(year=dt.today().year)\n\n interval = Interval(start_dt, end_dt, name)\n print(interval) # debugging statement\n print()\n intervals.append(interval)\n except IndexError:\n raise IndexError(FORMAT_MSG)\n\n return intervals", "title": "" }, { "docid": "8ec6eec68b941f7af30b4be27d34d027", "score": "0.5178086", "text": "def load_time(str_time):\n return datetime.datetime.strptime(str_time, \"%Y-%m-%d %H:%M:%S\")", "title": "" }, { "docid": "2daaf005b92da44798c6b38d670ce765", "score": "0.5174988", "text": "def _datetime_standardize(self, package):\n # conversion keys\n _keys = ['pst', 'pet', 'dst', 'det']\n for key in package:\n if key in _keys:\n value = package.get(key)\n try:\n if not isinstance(value, datetime.datetime):\n value = datetime.datetime.strptime(value[:-6], '%Y-%m-%dT%H:%M:%S.%f')\n except:\n raise ValueError(\"time data does not match format Y-m-dTH:M:S\")\n package[key] = value\n return package", "title": "" }, { "docid": "7c26166540998f9586ad9c698e6b0098", "score": "0.5159381", "text": "def str_to_datetime(time_string):\n try:\n return make_aware(datetime.strptime(time_string, API_TIME_FORMAT), tz.UTC)\n except ValueError as e:\n if \"does not match format\" in str(e):\n log(\"does not match format\")\n log(str(e))\n return abort(400)\n raise # not best practice but I'm okay with a potential 500 error alerting us to new cases", "title": "" }, { "docid": "cbea83c9becdf010417b9d6b360e4610", "score": "0.51592904", "text": "def respones_to_datetime(response, formate):\n return datetime.datetime.strptime(response, formate)", "title": "" }, { "docid": "4223a60224885939e30973483ae895fb", "score": "0.5156785", "text": "def test_event_date_time(self, client, workshop):\n response = client.get(reverse(\"events:upcoming\"))\n # start/end dates (same month/day)\n assertContains(response, date(localtime(workshop.start_time), \"F j\"))\n assertContains(response, date(localtime(workshop.end_time), \"j\"))\n # start/end times (same day)\n assertContains(response, date(localtime(workshop.start_time), \"g:i\"))\n assertContains(response, date(localtime(workshop.end_time), \"g:i A\"))", "title": "" }, { "docid": "866841e72159859dfd0f095f54efd5e6", "score": "0.5147876", "text": "def parse(self, argument: str) -> datetime.date:\n \n argument = argument.lower()\n self.now = datetime.datetime.utcnow()\n\n match = re.fullmatch(regex.Regex.US_DATE, argument)\n if (match):\n # 12/31/00\n # 12/31/0000\n # 12-31-00\n # 12-31-0000\n # on 12/31/00\n # on 12/31/0000\n # on 12-31-00\n # on 12-31-0000\n # until 12/31/00\n # until 12/31/0000\n # until 12-31-00\n # until 12-31-0000\n\n month = int(match.group(\"month\"))\n day = int(match.group(\"day\"))\n\n year = match.group(\"year\")\n if (len(year) == 2):\n year = str(self.now.year)[:2] + year\n\n year = int(year)\n\n if (not year):\n raise error.ParserError(self, \"year '{0}' is out of range\".format(year))\n\n try:\n return datetime.date(year, month, day)\n except (ValueError) as e:\n raise error.ParserError(self, \"day '{0}' is out of range\".format(day))\n\n match = re.fullmatch(regex.Regex.EU_DATE, argument)\n if (match):\n # 31/12/00\n # 31/12/0000\n # 31-12-00\n # 31-12-0000\n # on 31/12/00\n # on 31/12/0000\n # on 31-12-00\n # on 31-12-0000\n # until 31/12/00\n # until 31/12/0000\n # until 31-12-00\n # until 31-12-0000\n\n day = int(match.group(\"day\"))\n month = int(match.group(\"month\"))\n\n year = match.group(\"year\")\n if (len(year) == 2):\n year = str(self.now.year)[:2] + year\n\n year = int(year)\n\n if (not year):\n raise error.ParserError(self, \"year '{0}' is out of range\".format(year))\n\n try:\n return datetime.date(year, month, day)\n except (ValueError) as e:\n raise error.ParserError(self, \"day '{0}' is out of range\".format(day))\n\n match = re.fullmatch(regex.Regex.DAYS, argument)\n if (match):\n # 1d\n # in 1d\n # for 1d\n # 1 day\n # in 1 day\n # for 1 day\n # 2 days\n # in 2 days\n # for 2 days\n\n days = int(match.group(days))\n if (days):\n new = self.now + datetime.timedelta(days=days)\n return datetime.date(new.year, new.month, new.day)\n\n if (argument == \"yesterday\"):\n new = self.now - datetime.timedelta(days=1)\n return datetime.date(new.year, new.month, new.day)\n elif (argument == \"today\"):\n return datetime.date(self.now.year, self.now.month, self.now.day)\n elif (argument == \"tomorrow\"):\n new = self.now + datetime.timedelta(days=1)\n return datetime.date(new.year, new.month, new.day)\n\n raise error.ParserError(self, \"couldn't parse date from '{0}'\".format(argument))", "title": "" }, { "docid": "d25b0c71019e21951c368e34fca4d78b", "score": "0.5146917", "text": "def parse(self, day: datetime.date) -> Dict[datetime.time, Show]:\n pass", "title": "" }, { "docid": "d3f02bb619575273966c997da555a09d", "score": "0.5137569", "text": "def _check_date(date):\n\n if isinstance(date, six.string_types):\n try:\n date = dateutil.parser.parse(date)\n except ValueError:\n msg = \"Start date '{0}' not recognized as a valid date\".format(date)\n raise ValueError(msg)\n\n if isinstance(date, datetime.date):\n date = datetime.datetime.strftime(date, '%Y-%m-%dT%H:%M:%S')\n\n return date", "title": "" }, { "docid": "ea2c473a2ded675a2ce9f31cdfcc89e2", "score": "0.51373047", "text": "def dateparse(x):\n if not pd.isnull(x):\n return pd.datetime.strptime(x, '%Y-%m-%d %H:%M:%S')\n else:\n return x", "title": "" }, { "docid": "37d9078fe50c8008f6d8d4666aa91cf3", "score": "0.5133595", "text": "def get_update_time_components(update_time):\n if isinstance(update_time, datetime.datetime):\n hour = \"{:d}\".format(update_time.hour)\n min = \"{:02d}\".format(update_time.minute)\n time_of_day = update_time.strftime('%p')\n else:\n hour = \"12\"\n min = \"00\"\n time_of_day = \"PM\"\n\n return hour, min, time_of_day", "title": "" }, { "docid": "271a6cea6dd0fc203e676759dea4172e", "score": "0.5133154", "text": "def load_datetime():", "title": "" }, { "docid": "149779ee17e2dbbb1c0d15b5e852ed12", "score": "0.5127833", "text": "def _parse_datetime_string(val):\n dt = None\n lenval = len(val)\n fmt = {19: \"%Y-%m-%d %H:%M:%S\", 10: \"%Y-%m-%d\"}.get(lenval)\n if fmt is None:\n # Invalid date\n raise exc.InvalidDateTimeString(\"The supplied value '%s' does not \"\n \"match either of the formats 'YYYY-MM-DD HH:MM:SS' or \"\n \"'YYYY-MM-DD'.\" % val)\n return datetime.datetime.strptime(val, fmt)", "title": "" }, { "docid": "30cb10d086a3bc1061366de8f4088b85", "score": "0.5125864", "text": "def test_basic(self):\n result = iris_time_to_datetime(self.cube.coord(\"time\"))\n self.assertIsInstance(result, list)\n for item in result:\n self.assertIsInstance(item, datetime)\n self.assertEqual(result[0], datetime(2017, 2, 17, 6, 0))", "title": "" }, { "docid": "d8cc1bced0fc807ef04e6cc216327379", "score": "0.5125057", "text": "def not_valid_time(user_input):\n\t\n\ttry:\n\t\tuser_input = datetime.datetime.strptime(user_input,'%H:%M')\n\texcept:\n\t\treturn True\n\telse:\n\t\treturn False", "title": "" }, { "docid": "a986909ff53fcd5e3c8dfa14e0fa94d3", "score": "0.5122409", "text": "def parseForTime(self):\n # break down the words\n words = self.taskObj_str.split(\" \")\n # create a dictionary that represents the\n # appropriate date of next \n time_dict = {\n \"today\": datetime.datetime.today(),\n \"tomorrow\": datetime.datetime.now() + datetime.timedelta(days=1),\n \"next week\": datetime.datetime.now() + datetime.timedelta(days=7),\n \"yesterday\": datetime.datetime.now() - datetime.timedelta(days=1)\n }\n\n # records the numerical representation of each \n # day of the week\n days_dict = {\n \"mon\":0,\n \"Mon\":0,\n \"tues\":1,\n \"tue\":1,\n \"Tues\":1,\n \"Tue\":1,\n \"Wedn\":2,\n \"wedn\":2,\n \"Thurs\":3,\n \"thurs\":3,\n \"fri\":4,\n \"Fri\":4,\n \"Satur\":5,\n \"satur\":5,\n \"Sund\":6,\n \"sund\":6\n }\n\n next_week = False # user asking to schedule for next week or last week \n # iterate through each word\n duedate = None\n for i in range(len(words)):\n\n if words[i] == \"next\":\n next_week = True\n # check if the duedate for task is sunday \n # iterate through the next 7 days to see which one corresponds\n # to the due date of the task \n for day_key in days_dict.keys():\n if day_key in words[i]:\n for day in range(7):\n new_day = datetime.datetime.now()+datetime.timedelta(days=day)\n if (new_day.weekday() == days_dict[day_key]):\n if next_week: # if next <day of the week> scale by a week\n duedate = new_day + datetime.timedelta(days=7)\n else: # else its this week\n duedate = new_day\n break\n \n if duedate is None and words[i] in time_dict.keys():\n duedate = time_dict[words[i]]\n \n return duedate", "title": "" }, { "docid": "143e67c8ce30e79375f308c90b175b99", "score": "0.5115949", "text": "def _reduce_datetimes(row):\n\n row = list(row)\n\n for i, val in enumerate(row):\n if hasattr(val, \"strftime\"):\n row[i] = val.strftime(\"%Y-%m-%d %H:%M:%S\")\n elif hasattr(val, 'isoformat'):\n row[i] = val.isoformat()\n return tuple(row)", "title": "" }, { "docid": "3843ebd13ec812c8308c17eab8a1f2ef", "score": "0.51128066", "text": "def check_iaad_time(self, time):\n\n try:\n valid_time = datetime.datetime(2019, 1, 1, int(time[0]), int(time[1]), 0).isoformat()\n return valid_time[-8:]\n except ValueError:\n return False", "title": "" }, { "docid": "54d9ac27a76cf965870657dd7f4296c7", "score": "0.51125735", "text": "def _try_parse_datetime(time_string):\n out = dateparser.parse(time_string)\n if out:\n return out\n else:\n raise Exception('Could not parse datetime from string: {}'.format(time_string))", "title": "" }, { "docid": "54d9ac27a76cf965870657dd7f4296c7", "score": "0.51125735", "text": "def _try_parse_datetime(time_string):\n out = dateparser.parse(time_string)\n if out:\n return out\n else:\n raise Exception('Could not parse datetime from string: {}'.format(time_string))", "title": "" } ]
c24768a77439507119a3a6354b108b18
delete collection of NetNamespace This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response.
[ { "docid": "0f33948f2729be0efc3d225f85e1b7c0", "score": "0.6659838", "text": "def delete_netnamespaces_with_http_info(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_netnamespaces\" % key\n )\n params[key] = val\n del params['kwargs']\n\n resource_path = '/oapi/v1/netnamespaces'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='UnversionedStatus',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'))", "title": "" } ]
[ { "docid": "423a97b7c0770c96dbfc718f268eedc6", "score": "0.7025836", "text": "def delete_netnamespaces(self, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.delete_netnamespaces_with_http_info(**kwargs)\n else:\n (data) = self.delete_netnamespaces_with_http_info(**kwargs)\n return data", "title": "" }, { "docid": "2a2a03f197caea58358b3779258c700a", "score": "0.68234956", "text": "def delete_netnamespace(self, body, name, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.delete_netnamespace_with_http_info(body, name, **kwargs)\n else:\n (data) = self.delete_netnamespace_with_http_info(body, name, **kwargs)\n return data", "title": "" }, { "docid": "ca95f2f75f3db266b4eebf6e2f9e7500", "score": "0.62351376", "text": "def delete_netnamespace_with_http_info(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_netnamespace\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `delete_netnamespace`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `delete_netnamespace`\")\n\n resource_path = '/oapi/v1/netnamespaces/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='UnversionedStatus',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'))", "title": "" }, { "docid": "73283c69dd5e57ea9f0460befeefb224", "score": "0.61065453", "text": "def delete_collection(self, bucket_name=None, **kwargs):\n response = self.doDelete('{0}/{1}'.format(COLLECTIONS_API, bucket_name), params=kwargs,\n headers=DEFAULT_HEADERS)\n if response.status_code != OK:\n self.parse_response_as_json(response)\n\n return response", "title": "" }, { "docid": "b07d58871ed0f40f4e779fa5554546f7", "score": "0.608011", "text": "def delete(self, namespace):\n r = requests.post(\"https://%s:%d/d/%s\" % (self.con.host, self.con.port, namespace), headers={\"content-type\": \"application/json\",\"Authorization\": self.con.apikey},\n verify=self.con.verify)\n if r.status_code == 200:\n return True\n return False", "title": "" }, { "docid": "929792a65b02a343b66ad69ad2046966", "score": "0.6010301", "text": "def delete_collection(self, collection):\n url = f'{self.byoc_url}/collections/{self._parse_id(collection)}'\n return self.client.get_json(url=url, request_type=RequestType.DELETE, use_session=True)", "title": "" }, { "docid": "a0784ee356ddfa02290d1f4753a0a30c", "score": "0.5935651", "text": "def collection_delete(login_manager: LoginManager, *, collection_id: uuid.UUID) -> None:\n gcs_client = login_manager.get_gcs_client(collection_id=collection_id)\n res = gcs_client.delete_collection(collection_id)\n display(res, text_mode=TextMode.text_raw, response_key=\"code\")", "title": "" }, { "docid": "81fecd184db1515120c3b27340efa2dc", "score": "0.5732344", "text": "def remove_netns(name, **kwargs):\n try:\n netns.remove(name, libc=priv_linux.get_cdll())\n except OSError as e:\n if e.errno != errno.ENOENT:\n raise\n LOG.debug(\"Namespace %s deleted.\", name)", "title": "" }, { "docid": "b3dcce7e4d89fa27efe993bceb690f0e", "score": "0.5688481", "text": "def delete_collection(self, collection_type: str, name: str) -> None:\n collection = self.get_collection(collection_type, name)\n self._automodel_request(f\"collection/{collection.data.id}\", \"delete\", payload={\"meta\": {}}, full_return=True)", "title": "" }, { "docid": "b496758e5481fe53a5f31807735bbfab", "score": "0.5611074", "text": "def delete(self,\n ns_group_id,\n force=None,\n ):\n return self._invoke('delete',\n {\n 'ns_group_id': ns_group_id,\n 'force': force,\n })", "title": "" }, { "docid": "f0e9d8cb725f31a42b6e09051ae245ff", "score": "0.55513597", "text": "def delete_nets(self, netlist):\n if type(netlist) is str:\n netlist = [netlist]\n nets_deleted = []\n for net in netlist:\n try:\n edb_net = self._edb.Cell.Net.FindByName(self._active_layout, net)\n if edb_net is not None:\n edb_net.Delete()\n nets_deleted.append(net)\n self._messenger.add_info_message(\"Net {} Deleted\".format(net))\n except:\n pass\n\n return nets_deleted", "title": "" }, { "docid": "93ad97028088d2a2c023c89be37f0f48", "score": "0.5343871", "text": "def delete_collection(self, collection_id, **kwargs):\n\n if collection_id is None:\n raise ValueError('collection_id must be provided')\n\n headers = {}\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n sdk_headers = get_sdk_headers('watson_vision_combined', 'V4',\n 'delete_collection')\n headers.update(sdk_headers)\n\n params = {'version': self.version}\n\n url = '/v4/collections/{0}'.format(\n *self._encode_path_vars(collection_id))\n request = self.prepare_request(method='DELETE',\n url=url,\n headers=headers,\n params=params,\n accept_json=True)\n response = self.send(request)\n return response", "title": "" }, { "docid": "fa97c83a53b61b77ae41b6763332eb0c", "score": "0.53396374", "text": "def delete(self, name):\n CRUDService.get_instance().delete_namespace(name)\n return KubeApiResponseDto('Resource deleted.'), HttpStatusCode.Accepted.value", "title": "" }, { "docid": "c22769e4ab241db637466288279e5715", "score": "0.53163457", "text": "def delete(self, params):\n ngs = NetworkGroup.get_by_ids(params.network)\n for network_group in ngs:\n network_group.delete()\n\n self.serializer.print_to_output(\n {},\n \"Network groups with IDS {0} have been deleted.\".format(\n ','.join(params.network))\n )", "title": "" }, { "docid": "5132bfc7cfca8026b19aed6cc1002e30", "score": "0.5288787", "text": "def delete_collection(self, collection):\n sql = '''DELETE FROM collections WHERE coll_name=?'''\n with closing(self.conn.cursor()) as c:\n c.execute(sql, (collection.name,))\n self.conn.commit()\n print(collection.name, \"was successfully deleted.\")", "title": "" }, { "docid": "a2cb78b6d4fa313f4246574f73046532", "score": "0.527623", "text": "def delete_network(self, net_id, created_items=None):\n self.logger.debug('delete_network: {}'.format(net_id))\n try:\n self.fos_api.network.remove_network(net_id)\n except fimapi.FIMNotFoundException as fnfe:\n raise vimconn.VimConnNotFoundException(\n \"Network {} not found at VIM (already deleted?). Error {}\".format(net_id, fnfe))\n except Exception as e:\n raise vimconn.VimConnException(\"Cannot delete network {} from VIM. Error {}\".format(net_id, e))\n return net_id", "title": "" }, { "docid": "1ef0d47a09c8f9609039a020ac62d726", "score": "0.5268201", "text": "def delete(self,\n ns_service_group_id,\n force=None,\n ):\n return self._invoke('delete',\n {\n 'ns_service_group_id': ns_service_group_id,\n 'force': force,\n })", "title": "" }, { "docid": "6c114d4031b7eaa1234d8ef304a1a4d4", "score": "0.5114616", "text": "def delete_network(self, future, network_uuid, callback):\n pass", "title": "" }, { "docid": "c07d236bc63504fb0aec8a205614910f", "score": "0.51092094", "text": "def testDeleteNamespace(self):\n namespaces = NamespaceAPI(self.user)\n namespaces.create([(u'username/name', u'A namespace.')])\n self.store.commit()\n\n with login(u'username', self.user.objectID, self.transact) as session:\n yield self.facade.deleteNamespace(session, u'username/name')\n\n self.store.rollback()\n self.assertEqual({}, namespaces.get([u'username/name']))", "title": "" }, { "docid": "018f4db651e1e57ae4bb712038943536", "score": "0.5102822", "text": "def collection_remove(collection):\n\n try:\n table = database.get_collection(collection)\n except KeyError:\n return '''\n Not found.\n The collection \\\"{}\\\" is not found. Please check again the name of the collection. \n You can use /show_collections to find all collections in the database.\n '''.format(collection), 404\n\n # Full request body version\n if 'Content-Type' in request.headers and request.headers['Content-Type'] == 'application/json':\n query = request.json\n if not query:\n return \"Find query can't be empty. Please use _all to query all data.\", 400\n # Query string version\n else:\n query = dict(request.args)\n if not query:\n return \"Find query can't be empty. Please use _all to query all data.\", 400\n for k in query:\n if isinstance(query[k], str) and query[k].isnumeric():\n query[k] = eval(query[k])\n rtn = table.remove(query)\n if rtn['successful']:\n return jsonify(rtn['doc_id'])\n else:\n return rtn['message'], 406", "title": "" }, { "docid": "13eff58ff557c216e7831c1341fa122a", "score": "0.50797707", "text": "def ex_delete_network(self, network, force=None):\n\n args = {\"id\": network.id, \"forced\": force}\n\n self._async_request(command=\"deleteNetwork\", params=args, method=\"GET\")\n return True", "title": "" }, { "docid": "7c2d376081fca09c27bce58f7cf03f6c", "score": "0.5055226", "text": "def delete_collection(self, userid, collection):", "title": "" }, { "docid": "7be4bffda853772f1d3b1aaea522a07d", "score": "0.5046856", "text": "def empty_nsd_collection():\n nsd_coll.delete_many({})", "title": "" }, { "docid": "636d3867f7eda358f1d42c0cc1d86678", "score": "0.5000439", "text": "def delete(self, params=None):\n self.logger.debug('Deleting %s with parameters: %s'\n % (self.type_name, params))\n res = self.client.delete_network_acl(**params)\n self.logger.debug('Response: %s' % res)\n return res", "title": "" }, { "docid": "76c60ffa4d34e741374b10bfa5803ce2", "score": "0.49762136", "text": "def delete_ip_rule(namespace, **kwargs):\n try:\n with get_iproute(namespace) as ip:\n ip.rule('del', **kwargs)\n except OSError as e:\n if e.errno == errno.ENOENT:\n raise NetworkNamespaceNotFound(netns_name=namespace)\n raise", "title": "" }, { "docid": "5e4cde30c7fca79644056621cef0a047", "score": "0.49700975", "text": "def test_delete_network_groupnet(self):\n pass", "title": "" }, { "docid": "11bc4622a64ddb36eb12755e8e8d4a3a", "score": "0.49681637", "text": "def test_delete_node_v1alpha1_collection_runtime_class(self):\n pass", "title": "" }, { "docid": "3ac1997fb2b32680f16c770f28ecaf4c", "score": "0.49538225", "text": "def _delete(self, uri):\n resp, resp_body = self.api.method_delete(uri)\n if resp not in range(200, 300):\n raise exceptions.DomainDeletionFailed(\"(%s) %s\" % (resp,\n resp_body['message']))", "title": "" }, { "docid": "d0a674d1b1e611e3d265605934ffdd5f", "score": "0.49466026", "text": "def delete_namespaced_policie(self, body, namespace, name, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.delete_namespaced_policie_with_http_info(body, namespace, name, **kwargs)\n else:\n (data) = self.delete_namespaced_policie_with_http_info(body, namespace, name, **kwargs)\n return data", "title": "" }, { "docid": "87645277bc670b8a121ead116d1588dc", "score": "0.4934415", "text": "def delete_network_interfaces(\n self,\n references=None, # type: List[models.ReferenceType]\n authorization=None, # type: str\n x_request_id=None, # type: str\n names=None, # type: List[str]\n async_req=False, # type: bool\n _return_http_data_only=False, # type: bool\n _preload_content=True, # type: bool\n _request_timeout=None, # type: Optional[int]\n ):\n # type: (...) -> None\n kwargs = dict(\n authorization=authorization,\n x_request_id=x_request_id,\n names=names,\n async_req=async_req,\n _return_http_data_only=_return_http_data_only,\n _preload_content=_preload_content,\n _request_timeout=_request_timeout,\n )\n kwargs = {k: v for k, v in kwargs.items() if v is not None}\n endpoint = self._network_interfaces_api.api210_network_interfaces_delete_with_http_info\n _process_references(references, ['names'], kwargs)\n return self._call_api(endpoint, kwargs)", "title": "" }, { "docid": "9fb6ba7dd527b350ab49431c82232838", "score": "0.4929661", "text": "def DeleteCluster(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "title": "" }, { "docid": "65754434a7070890ec958f9eb268edb0", "score": "0.49112335", "text": "def delete(self,\n ns_service_id,\n force=None,\n ):\n return self._invoke('delete',\n {\n 'ns_service_id': ns_service_id,\n 'force': force,\n })", "title": "" }, { "docid": "120668033149887e942e396fa00dd90b", "score": "0.48914415", "text": "def delete_namespace(self, path, recurse=False, force=False,\n verbose=False):\n absPath = self.abs_tag_path(path)\n fullPath = u'/namespaces' + absPath\n if fullPath.endswith(u'/'):\n fullPath = fullPath[:-1]\n status, result = self.call('DELETE', fullPath)\n if verbose:\n if status == STATUS.NO_CONTENT:\n Print(u'Removed namespace %s' % absPath)\n else:\n Print(u'Failed to remove namespace %s (%d)' % (absPath, status))\n return status", "title": "" }, { "docid": "5648ae61d06da28b6867d193363a99ab", "score": "0.48774707", "text": "def delete_collection(self):\n collection = self.get_collection(render_to_response=False)\n in_clause = tuple(map(self.mapper, collection))\n stmt = delete(self.context.entity.__table__).where(\n self.context.entity.__table__.c.id_error.in_(in_clause))\n self.context.session.begin()\n self.context.session.execute(stmt)\n self.context.session.commit()\n self.context.session.close()\n\n return Response('OK')", "title": "" }, { "docid": "dc2f82fae447ecb738f256d601e67fda", "score": "0.48636612", "text": "def delete_documents(self, doc_path, q=None):\n assert len(doc_path.split(\".\")) == 2\n\n rest_url = \"https://api.mongolab.com/api/1/databases/%s/collections/%s\" % tuple(doc_path.split(\".\"))\n # send_kwargs.\n send_kwargs = {}\n send_kwargs.update(MongolabRestWrapper.HTTP_API_KEY)\n send_kwargs['q'] = json.dumps(q) if q else None\n # send_kwargs['m'] = True\n\n # Specifying an empty list in the body is equivalent to deleting the documents\n response = requests.put(rest_url, params=send_kwargs, headers=self.HTTP_HEADERS,\n timeout=self.HTTP_TIMIOUT, data=\"[]\")\n logging.debug(\"(delete_documents)Request URL: %s\" % response.url)\n # { \"n\" : 0 , \"removed\" : 1}\n logging.debug(\"(delete_documents)Response Text: %s\" % response.text)\n # print(\"Request URL: %s\" % response.url)\n return response.json()[\"removed\"]", "title": "" }, { "docid": "30b2edaf06f0bd2ec33826b9273cee38", "score": "0.48593616", "text": "def delete_namespaced_imagestream(self, body, namespace, name, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.delete_namespaced_imagestream_with_http_info(body, namespace, name, **kwargs)\n else:\n (data) = self.delete_namespaced_imagestream_with_http_info(body, namespace, name, **kwargs)\n return data", "title": "" }, { "docid": "eed794e48d33671554e7fc7c2d312cb8", "score": "0.4851142", "text": "def delete(self,\n transport_node_collection_id,\n ):\n return self._invoke('delete',\n {\n 'transport_node_collection_id': transport_node_collection_id,\n })", "title": "" }, { "docid": "6d1a704a9141b3ec9416583215cb6848", "score": "0.4848332", "text": "def gnc_commodity_table_delete_namespace(*args):\n return _gnucash_core_c.gnc_commodity_table_delete_namespace(*args)", "title": "" }, { "docid": "9eb6bd475957fc0f0f684db4fb653612", "score": "0.48268515", "text": "def delete_array_connections(\n self,\n references=None, # type: List[models.ReferenceType]\n authorization=None, # type: str\n x_request_id=None, # type: str\n names=None, # type: List[str]\n async_req=False, # type: bool\n _return_http_data_only=False, # type: bool\n _preload_content=True, # type: bool\n _request_timeout=None, # type: Optional[int]\n ):\n # type: (...) -> None\n kwargs = dict(\n authorization=authorization,\n x_request_id=x_request_id,\n names=names,\n async_req=async_req,\n _return_http_data_only=_return_http_data_only,\n _preload_content=_preload_content,\n _request_timeout=_request_timeout,\n )\n kwargs = {k: v for k, v in kwargs.items() if v is not None}\n endpoint = self._array_connections_api.api210_array_connections_delete_with_http_info\n _process_references(references, ['names'], kwargs)\n return self._call_api(endpoint, kwargs)", "title": "" }, { "docid": "7724c71b89ea657381ba47dcbc70a221", "score": "0.4825178", "text": "def delete_items(self, condition=None, db_name=None, collection_name=None):\r\n if condition is None:\r\n raise Exception(\"Need to condition\")\r\n if db_name is not None:\r\n self._db = self._client[db_name]\r\n if collection_name is not None:\r\n self._collection = self._db[collection_name]\r\n return self._collection.delete_many(condition)", "title": "" }, { "docid": "0b513446dac829846f98676bcf53cb2c", "score": "0.48164892", "text": "def delete_policies_nfs(\n self,\n references=None, # type: List[models.ReferenceType]\n authorization=None, # type: str\n x_request_id=None, # type: str\n ids=None, # type: List[str]\n names=None, # type: List[str]\n async_req=False, # type: bool\n _return_http_data_only=False, # type: bool\n _preload_content=True, # type: bool\n _request_timeout=None, # type: Optional[int]\n ):\n # type: (...) -> None\n kwargs = dict(\n authorization=authorization,\n x_request_id=x_request_id,\n ids=ids,\n names=names,\n async_req=async_req,\n _return_http_data_only=_return_http_data_only,\n _preload_content=_preload_content,\n _request_timeout=_request_timeout,\n )\n kwargs = {k: v for k, v in kwargs.items() if v is not None}\n endpoint = self._policies_api.api210_policies_nfs_delete_with_http_info\n _process_references(references, ['ids', 'names'], kwargs)\n return self._call_api(endpoint, kwargs)", "title": "" }, { "docid": "655ba943bef289845909c03fc317d7ee", "score": "0.4813149", "text": "def delete_namespaced_imagestreams(self, namespace, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.delete_namespaced_imagestreams_with_http_info(namespace, **kwargs)\n else:\n (data) = self.delete_namespaced_imagestreams_with_http_info(namespace, **kwargs)\n return data", "title": "" }, { "docid": "22a8ed4cfc57128359b77b56bdc24eaf", "score": "0.48011118", "text": "def delete(self, url, description=None, ok_codes=None):\r\n return self.method('delete', url, None, description, ok_codes)", "title": "" }, { "docid": "eb1ba2abd55de146cd3934f849ca2eb3", "score": "0.48010704", "text": "def delete_clusternetworks(self, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.delete_clusternetworks_with_http_info(**kwargs)\n else:\n (data) = self.delete_clusternetworks_with_http_info(**kwargs)\n return data", "title": "" }, { "docid": "fa40fe83988b22a034ea1d51f6aaf2d1", "score": "0.47996196", "text": "def zone_del(name):\n client = Client()\n\n fetch_and_delete(\n client.NetZones, {'name': name}, 'network zone not found.')\n click.echo('Item successfully deleted.')", "title": "" }, { "docid": "0b10056d0c70225aca940f805cb9b364", "score": "0.47905776", "text": "def delete(self,\n ns_profile_id,\n force=None,\n ):\n return self._invoke('delete',\n {\n 'ns_profile_id': ns_profile_id,\n 'force': force,\n })", "title": "" }, { "docid": "d228e45f5588b4b97d566f0720ca72f4", "score": "0.47879112", "text": "def list_netnamespaces_with_http_info(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_netnamespaces\" % key\n )\n params[key] = val\n del params['kwargs']\n\n resource_path = '/oapi/v1/netnamespaces'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1NetNamespaceList',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'))", "title": "" }, { "docid": "5f3d24d48d327a0a45c52a649670be64", "score": "0.47723743", "text": "def delete_collection(request, path):\n coll = Collection.find(path)\n if not coll:\n raise Http404\n \n if not coll.user_can(request.user, \"delete\"):\n raise PermissionDenied\n \n if request.method == \"POST\":\n parent_coll = Collection.find(coll.path)\n if parent_coll:\n parent_path = parent_coll.container\n else:\n # Just in case\n parent_path = \"\"\n coll.delete(username=request.user.name)\n messages.add_message(\n request,\n messages.INFO,\n u\"The collection '{}' has been deleted\".format(coll.name),\n )\n return redirect(\"archive:view\", path=parent_path)\n \n return render(request, \"archive/delete.html\", {\"collection\": coll})", "title": "" }, { "docid": "c449a3fe4860e6106310ce3d0d393195", "score": "0.47709566", "text": "def delete_subnets(\n self,\n references=None, # type: List[models.ReferenceType]\n authorization=None, # type: str\n x_request_id=None, # type: str\n names=None, # type: List[str]\n async_req=False, # type: bool\n _return_http_data_only=False, # type: bool\n _preload_content=True, # type: bool\n _request_timeout=None, # type: Optional[int]\n ):\n # type: (...) -> None\n kwargs = dict(\n authorization=authorization,\n x_request_id=x_request_id,\n names=names,\n async_req=async_req,\n _return_http_data_only=_return_http_data_only,\n _preload_content=_preload_content,\n _request_timeout=_request_timeout,\n )\n kwargs = {k: v for k, v in kwargs.items() if v is not None}\n endpoint = self._subnets_api.api210_subnets_delete_with_http_info\n _process_references(references, ['names'], kwargs)\n return self._call_api(endpoint, kwargs)", "title": "" }, { "docid": "af2e8a8faecd3173dd61e283473903df", "score": "0.4743913", "text": "def obj_delete_list(self, request=None, **kwargs):\n self.get_collection().remove()", "title": "" }, { "docid": "0b553592f094a9ff0f3995636f4007c4", "score": "0.47309074", "text": "def delete_network_and_subnet(conn, os_network):\n name = os_network.name\n logprint(\"Deleting network \\\"{0}\\\"\".format(name))\n s3p.delete_network(conn, os_network)\n logprint(\"Network \\\"{0}\\\" Successfully deleted\".format(name))", "title": "" }, { "docid": "8d2ae8fafc3a636834600e6a691d6cef", "score": "0.47308275", "text": "def _Delete(self):\n delete_cmd = [AZURE_PATH,\n 'network',\n 'vnet',\n 'delete',\n '--quiet',\n self.name]\n vm_util.IssueCommand(delete_cmd)", "title": "" }, { "docid": "0c0a18c3931289bd06a608ebb2a518ff", "score": "0.47276238", "text": "def delete_clusternetwork(self, body, name, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.delete_clusternetwork_with_http_info(body, name, **kwargs)\n else:\n (data) = self.delete_clusternetwork_with_http_info(body, name, **kwargs)\n return data", "title": "" }, { "docid": "418bff54a11dbfea60f7cf702be33b99", "score": "0.47131997", "text": "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "418bff54a11dbfea60f7cf702be33b99", "score": "0.47131997", "text": "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "418bff54a11dbfea60f7cf702be33b99", "score": "0.47131997", "text": "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "418bff54a11dbfea60f7cf702be33b99", "score": "0.47131997", "text": "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "418bff54a11dbfea60f7cf702be33b99", "score": "0.47131997", "text": "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "418bff54a11dbfea60f7cf702be33b99", "score": "0.47131997", "text": "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "418bff54a11dbfea60f7cf702be33b99", "score": "0.47131997", "text": "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "418bff54a11dbfea60f7cf702be33b99", "score": "0.47131997", "text": "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "418bff54a11dbfea60f7cf702be33b99", "score": "0.47131997", "text": "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "418bff54a11dbfea60f7cf702be33b99", "score": "0.47131997", "text": "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "418bff54a11dbfea60f7cf702be33b99", "score": "0.47131997", "text": "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "418bff54a11dbfea60f7cf702be33b99", "score": "0.47131997", "text": "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "418bff54a11dbfea60f7cf702be33b99", "score": "0.47131997", "text": "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "418bff54a11dbfea60f7cf702be33b99", "score": "0.47131997", "text": "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "418bff54a11dbfea60f7cf702be33b99", "score": "0.47131997", "text": "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "418bff54a11dbfea60f7cf702be33b99", "score": "0.47131997", "text": "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "418bff54a11dbfea60f7cf702be33b99", "score": "0.47131997", "text": "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "418bff54a11dbfea60f7cf702be33b99", "score": "0.47131997", "text": "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "418bff54a11dbfea60f7cf702be33b99", "score": "0.47131997", "text": "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "418bff54a11dbfea60f7cf702be33b99", "score": "0.47131997", "text": "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "418bff54a11dbfea60f7cf702be33b99", "score": "0.47131997", "text": "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "418bff54a11dbfea60f7cf702be33b99", "score": "0.47131997", "text": "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "418bff54a11dbfea60f7cf702be33b99", "score": "0.47131997", "text": "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "418bff54a11dbfea60f7cf702be33b99", "score": "0.47131997", "text": "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "418bff54a11dbfea60f7cf702be33b99", "score": "0.47131997", "text": "def Delete(self, request, global_params=None):\n config = self.GetMethodConfig('Delete')\n return self._RunMethod(\n config, request, global_params=global_params)", "title": "" }, { "docid": "1544b9a1c0e80c99f252a07f0caf097f", "score": "0.47101364", "text": "def remove_nsd_by_id(id):\n output = nsd_coll.remove({\"_id\": ObjectId(id)})\n # print(output)", "title": "" }, { "docid": "bd7f8e3cf5f22d818731bd65b07f82f0", "score": "0.4709134", "text": "def delete(collectionName: str, documentName: str):\n db.collection(collectionName).document(documentName).delete()", "title": "" }, { "docid": "43558829baa6ba1ebe8718fee6e3ba4c", "score": "0.47031385", "text": "def DeleteCallSet(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "title": "" }, { "docid": "d3bd8a3ca90e28bc845e032c0e22c7ce", "score": "0.47015694", "text": "def delete(self):\n response = self.http_request(self._url, 'DELETE')\n if response.status != 204:\n self.raise_http_error(response)", "title": "" }, { "docid": "f139340ebfee56587f9ece99bd7772ea", "score": "0.46999764", "text": "def delete(self,**kwargs):\n session = Session()\n del_network = session.query(self.model).filter_by\\\n (**kwargs).first()\n if del_network:\n session.delete(del_network)\n session.commit()\n\n session.close() \n return \"200\"", "title": "" }, { "docid": "e7dbd1b3c6ca57ea9cda261a86d47bca", "score": "0.4698791", "text": "def test_delete_namespace_by_id(self):\n pass", "title": "" }, { "docid": "834eb71ca89618b87cdd29efb7a19088", "score": "0.4698221", "text": "def delete(self):\n return http_request(methods.DELETE, self.url)", "title": "" }, { "docid": "3ed7a18982f64dbd8d4d070224333af2", "score": "0.46960884", "text": "def delete_policies_nfs_client_rules(\n self,\n references=None, # type: List[models.ReferenceType]\n policies=None, # type: List[models.ReferenceType]\n authorization=None, # type: str\n x_request_id=None, # type: str\n names=None, # type: List[str]\n policy_ids=None, # type: List[str]\n policy_names=None, # type: List[str]\n async_req=False, # type: bool\n _return_http_data_only=False, # type: bool\n _preload_content=True, # type: bool\n _request_timeout=None, # type: Optional[int]\n ):\n # type: (...) -> None\n kwargs = dict(\n authorization=authorization,\n x_request_id=x_request_id,\n names=names,\n policy_ids=policy_ids,\n policy_names=policy_names,\n async_req=async_req,\n _return_http_data_only=_return_http_data_only,\n _preload_content=_preload_content,\n _request_timeout=_request_timeout,\n )\n kwargs = {k: v for k, v in kwargs.items() if v is not None}\n endpoint = self._policies_api.api210_policies_nfs_client_rules_delete_with_http_info\n _process_references(references, ['names'], kwargs)\n _process_references(policies, ['policy_ids', 'policy_names'], kwargs)\n return self._call_api(endpoint, kwargs)", "title": "" }, { "docid": "f894d57f1f3bf342d59f24f69604c5d1", "score": "0.46884155", "text": "def delete_network(self, ofc_tenant_id, ofc_network_id):\n pass", "title": "" }, { "docid": "7ceae52e6dd3107b81d57609cf7690a6", "score": "0.46855015", "text": "def testDeleteNamespaceWithData(self):\n namespaces = NamespaceAPI(self.user)\n namespaces.create([(u'username/parent', u'A parent namespace.')])\n namespaces.create([(u'username/parent/child', u'A child namespace.')])\n self.store.commit()\n\n with login(u'username', self.user.objectID, self.transact) as session:\n deferred = self.facade.deleteNamespace(session, u'username/parent')\n yield self.assertFailure(deferred, TNamespaceNotEmpty)", "title": "" }, { "docid": "1e39379454be4f05adcdf3677856591b", "score": "0.46835598", "text": "def delete_clusterpolicies_with_http_info(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_clusterpolicies\" % key\n )\n params[key] = val\n del params['kwargs']\n\n resource_path = '/oapi/v1/clusterpolicies'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='UnversionedStatus',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'))", "title": "" }, { "docid": "a4245de80d92330e5bbb6f5b98610273", "score": "0.46813124", "text": "def remove_all_networks(request):\n dcs = getattr(request.node.cls, \"remove_dcs_networks\", list())\n\n def fin():\n \"\"\"\n Remove all networks from Data-Centers\n \"\"\"\n results = [\n hl_networks.remove_all_networks(datacenter=dc) for dc in dcs\n ]\n assert all(results)\n request.addfinalizer(fin)", "title": "" }, { "docid": "fbedf1d1a04ffe22a86981a1650b2d57", "score": "0.46773592", "text": "def UndeleteCluster(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "title": "" }, { "docid": "b07b7578dea8eb0524b053cef932fa31", "score": "0.4675672", "text": "def test_delete_ovn_network(self):\n assert enp_conf.PROVIDER_CLS.delete_network_by_name(\n network=self.auto_sync_net\n )\n self.remove_ovn_networks_from_provider.pop(self.auto_sync_net)\n assert helper.wait_for_auto_sync(networks=[self.auto_sync_net])", "title": "" }, { "docid": "c272d2d971fcc02449b13231442ca550", "score": "0.46642482", "text": "def delete_object(headers, tenant, namespace, _object):\n\n tenant_url = \"/api/Tenants/{}\".format(tenant)\n api_url = \"/Namespaces/{0}/{1}\".format(namespace, _object)\n url = OCS_URL + tenant_url + api_url\n\n response = requests.delete(url, headers=headers)\n\n print('--- Deleted {} -----------------------------------------'.format(api_url))\n\n print('\\nExit code: |{0}| \\n\\nText: |{1}| \\n\\nUrl: |{2}| '.format(\n response.status_code,\n response.text,\n response.url,\n ))\n\n return response.text", "title": "" }, { "docid": "474728d6215c96853fbcd7915b7a8adb", "score": "0.46638745", "text": "def delete(self, session, _id, dry_run=False):\n if dry_run or session[\"force\"]: # delete completely\n return BaseTopic.delete(self, session, _id, dry_run)\n else: # if not sent to kafka\n v = BaseTopic.delete(self, session, _id, dry_run=True)\n self.db.set_one(\"sdns\", {\"_id\": _id}, {\"_admin.to_delete\": True}) # TODO change status\n self._send_msg(\"delete\", {\"_id\": _id})\n return v # TODO indicate an offline operation to return 202 ACCEPTED", "title": "" }, { "docid": "ae607b8aa575219693244e557fae5e2e", "score": "0.4657349", "text": "def dropCollection(collection_name):\n\n\ttry:\n\t\tclient = MongoClient(host=\"localhost\", port=27017)\n\t\tdb = client[\"Library_System\"]\n\t\tmycol = db[collection_name]\n\t\tmycol.drop()\n\t\tprint('Deleted Successfully')\n\n\texcept Exception as e:\n\n\t\traise Exception(\"Error Occured in dropCollection function :\"+ str(e))", "title": "" }, { "docid": "5fa70c6c8e3f543af01cc85979c9976c", "score": "0.46551102", "text": "def list_netnamespaces(self, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.list_netnamespaces_with_http_info(**kwargs)\n else:\n (data) = self.list_netnamespaces_with_http_info(**kwargs)\n return data", "title": "" }, { "docid": "0dcbe1cccf79cfeffe38e0bea8f6ad56", "score": "0.4653646", "text": "def clear(nitro):\n __nsacls = NSAcls()\n return __nsacls.perform_operation(nitro, \"clear\")", "title": "" }, { "docid": "8aed8fd8534a79adbc00f205bb4f0d85", "score": "0.46523225", "text": "def delete(nitro, policybinding):\n __policybinding = SystemGlobalAuthTacacsPolicyBinding()\n __policybinding.set_policyname(policybinding.get_policyname())\n nsresponse = __policybinding.delete_resource(nitro)\n return nsresponse", "title": "" }, { "docid": "52d82676ed87767e02a98f0ea68ca0c9", "score": "0.4646356", "text": "def delete(self, xml, commit=True):\n \n if xml:\n xml = \"\\n<delete>\\n\" + xml + \"</delete>\\n\"\n \n results=[]\n \n results.append(self._update_request(\"delete\", xml))\n \n if commit:\n results.append(self.commit())\n \n return results", "title": "" }, { "docid": "1e8b971f2b134f269cf11039b356fc0e", "score": "0.4646019", "text": "def DeleteCluster(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "title": "" } ]
c93753c8afb08a545112b056bc0f28f0
pool using a 2x2 input and use the max algorithm
[ { "docid": "2fa41d3bc1cf9b83baf0525d8a4a14d3", "score": "0.78795815", "text": "def max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')", "title": "" } ]
[ { "docid": "2eae5b63f04a74e2ec4ec6ebce404bd6", "score": "0.8062191", "text": "def maxpool2x2(x):\n # YOUR CODE HERE\n x_h, x_w, x_dep = x.shape\n win_size = 2\n stride = 2\n step_y = (x_h - win_size) // stride + 1\n step_x = (x_w - win_size) // stride + 1\n tmp = np.zeros((step_y, step_x, x_dep))\n for i in range(step_y):\n for j in range(step_x):\n tmp[i, j] = np.max(x[stride * i:stride * i + win_size, stride * j:stride * j + win_size], axis=(0, 1))\n # result = zero_padding(tmp, x.shape)\n result = tmp\n return result", "title": "" }, { "docid": "6b42edbcf3c3233246c54cb91ca4b57b", "score": "0.8009194", "text": "def max_pool(x, pool_param):\n\n hw2_utils.exercise(\n andrew_username=\"mbarman\", # <<< set your andrew username here\n seed=42\n )\n\n out = None\n\n ###########################################################################\n # Your code starts here\n ###########################################################################\n # TODO: 4.3 MaxPooling Implementation\n\n h = int((x.shape[2] - pool_param['pool_height'])/pool_param ['stride']) + 1\n w = int((x.shape[3] - pool_param['pool_width'])/pool_param ['stride']) + 1\n out = np.zeros((x.shape[0],x.shape[1],h,w))\n\n for c in range(x.shape[0]):\n for d in range(x.shape[1]):\n p =0\n for i in range(out.shape[2]):\n q = 0\n for j in range(out.shape[3]):\n out[c,d,i,j] = np.max(x[c,d,p:p+pool_param['pool_height'],q:q+pool_param['pool_width']])\n q = q + pool_param['stride']\n p = p + pool_param['stride']\n\n ###########################################################################\n # END OF YOUR CODE\n ###########################################################################\n\n return out", "title": "" }, { "docid": "94779241eb84b682b2c5777bb1cfcc4e", "score": "0.7937214", "text": "def max_pool_2x2(self,x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME')", "title": "" }, { "docid": "f7e8ebf3c8adee149edbb8324fc6faf0", "score": "0.78943413", "text": "def max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1], padding='SAME')", "title": "" }, { "docid": "074ccf8b19edf96870bcbb911d7710ce", "score": "0.78600746", "text": "def max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME')", "title": "" }, { "docid": "169b53e5115496718934557f214452fc", "score": "0.7844076", "text": "def max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME')", "title": "" }, { "docid": "012aa5a69786a6ef80ed8cf1a4b9fca4", "score": "0.781227", "text": "def max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME')", "title": "" }, { "docid": "012aa5a69786a6ef80ed8cf1a4b9fca4", "score": "0.781227", "text": "def max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME')", "title": "" }, { "docid": "bf137ad10aee31053da624f9be023644", "score": "0.78080547", "text": "def max_pool_2x2(x, name=\"max_pool\"):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding=\"SAME\", name=name)", "title": "" }, { "docid": "7f7c279d5e48acbbc003b6d54ed2d3e0", "score": "0.77849793", "text": "def max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')", "title": "" }, { "docid": "77cb81ac290537456db90d9f9c5b6186", "score": "0.7780656", "text": "def max_pool_2x2(x, name = 'max-pool-2x2'):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME', name = name)", "title": "" }, { "docid": "46dd036e22a6e60384905d6475a80bba", "score": "0.7766947", "text": "def max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')", "title": "" }, { "docid": "46dd036e22a6e60384905d6475a80bba", "score": "0.7766947", "text": "def max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')", "title": "" }, { "docid": "46dd036e22a6e60384905d6475a80bba", "score": "0.7766947", "text": "def max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')", "title": "" }, { "docid": "3a40b981d413979797b7d92c4869c1cf", "score": "0.7746169", "text": "def max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME')", "title": "" }, { "docid": "3a40b981d413979797b7d92c4869c1cf", "score": "0.7746169", "text": "def max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME')", "title": "" }, { "docid": "3a40b981d413979797b7d92c4869c1cf", "score": "0.7746169", "text": "def max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME')", "title": "" }, { "docid": "3a40b981d413979797b7d92c4869c1cf", "score": "0.7746169", "text": "def max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME')", "title": "" }, { "docid": "3a40b981d413979797b7d92c4869c1cf", "score": "0.7746169", "text": "def max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME')", "title": "" }, { "docid": "978d8e8f582f6d4e72c64e75d3305c89", "score": "0.7631825", "text": "def my_max_pool_2d(sym_input, pool_shape = (2,2)):\n\n s = None\n for i in range(pool_shape[1]):\n t = sym_input[:,:,:,i::pool_shape[1]]\n if s is None:\n s = t\n else:\n s = T.maximum(s, t)\n temp = s\n s = None\n for i in range(pool_shape[0]):\n t = temp[:,:,i::pool_shape[0],:]\n if s is None:\n s = t\n else:\n s = T.maximum(s, t)\n sym_ret = s\n return sym_ret", "title": "" }, { "docid": "7901b56eded131d7d26680fa9a007d19", "score": "0.75443363", "text": "def maxpool2d(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')", "title": "" }, { "docid": "ef9575d52667b102fb09375c511ec58c", "score": "0.73333466", "text": "def max_pool_forward_naive(x, pool_param):\n out = None\n \n # ================================================================ #\n # YOUR CODE HERE:\n # Implement the max pooling forward pass.\n # ================================================================ #\n\n # read off the shapes\n N, C, H, W = x.shape\n ph = pool_param['pool_height']\n pw = pool_param['pool_width']\n stride = pool_param['stride']\n # compute the outputs given the formula\n h_out = int((H-ph)/stride+1)\n w_out = int((W-pw)/stride+1)\n out = np.zeros((N,C,h_out,w_out))\n # take maxes across the height and width regions, meaning that we can keep the filters & num_points constant\n # ie don't have to iter across them\n for i in range(h_out):\n for j in range(w_out):\n height_slice, width_slice = slice(i * stride, i * stride + ph), slice(j * stride, j * stride + pw)\n # only take max across the height and width axes, so 2 and 3 (0 and 1) are num points and channels\n out[:,:,i,j] = np.amax(x[:,:,height_slice, width_slice], axis = (2,3))\n\n\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ # \n cache = (x, pool_param)\n return out, cache", "title": "" }, { "docid": "c59074c81a679ad40aba119e3d315401", "score": "0.7312753", "text": "def max_pool(self, x, pool_size=(2, 2)):\n self.pool_layers += 1\n x = MaxPooling2D(pool_size=pool_size, name=\"max_pool_\" + str(self.conv_layers))(x)\n return x", "title": "" }, { "docid": "5066f48998899a9fd71e57f931f35cce", "score": "0.7312584", "text": "def max_pool(x, input_repr='regular'):\n with tf.name_scope(\"max_pool\"):\n x = tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n return scaleandshift(x, 0.77, -1, input_repr=input_repr)", "title": "" }, { "docid": "06f09785ea1657e57dd94c8af5b2fc18", "score": "0.7296146", "text": "def max_pool(self, x, pool_size=(2, 2)):\n self.pool_layers += 1\n x = TimeDistributed(\n MaxPooling2D(pool_size=pool_size, name=\"max_pool_\" + str(self.conv_layers))\n )(x)\n return x", "title": "" }, { "docid": "d92aed680822c815128de146b7abef19", "score": "0.72692835", "text": "def get_max_pool(input):\n return tf.nn.max_pool(input, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME')", "title": "" }, { "docid": "a4c32330366ec61cd3420a48c4c23132", "score": "0.72664833", "text": "def forward_max_pooling_naive(x, pool_param):\n out = None\n #############################################################################\n # TODO: Implémentez la propagation pour une couche de de max pooling #\n #############################################################################\n N, C, H, W = x.shape\n ph = pool_param['pool_height']\n pw = pool_param['pool_width']\n S = pool_param['stride']\n Hd = int((H - ph) / S + 1)\n Wd = int((W - pw) / S + 1)\n out = np.zeros((N, C, Hd, Wd))\n\n for i in range(N):\n for j in range(C):\n for k in range(Hd):\n for l in range(Wd):\n out[i, j, k, l] = np.max(x[i, j, k * S: k * S + ph, l * S: l * S + pw])\n cache = (x, pool_param)\n #############################################################################\n # FIN DE VOTRE CODE #\n #############################################################################\n return out, cache", "title": "" }, { "docid": "4dc742c97e36c7ae9a7fbaa11d648e37", "score": "0.72600603", "text": "def max_pool_along_second_axis(sym_input, pool_factor):\n s = None\n for i in range(pool_factor):\n t = sym_input[:,i::pool_factor]\n if s is None:\n s = t\n else:\n s = T.maximum(s, t)\n return s", "title": "" }, { "docid": "4dc742c97e36c7ae9a7fbaa11d648e37", "score": "0.72600603", "text": "def max_pool_along_second_axis(sym_input, pool_factor):\n s = None\n for i in range(pool_factor):\n t = sym_input[:,i::pool_factor]\n if s is None:\n s = t\n else:\n s = T.maximum(s, t)\n return s", "title": "" }, { "docid": "10d96c49b2b4a57c7cc3dce053f9ca11", "score": "0.7197504", "text": "def max_pool(self, inputs, name, stride=2):\r\n\t\treturn tf.nn.max_pool(\r\n\t\t\tinputs,\r\n\t\t\tksize=[1, stride, stride, 1],\r\n\t\t\tstrides=[1, stride, stride, 1],\r\n\t\t\tpadding='SAME',\r\n\t\t\tname=name)", "title": "" }, { "docid": "30e19846d0be4705f57a7f5e36cce0b9", "score": "0.71886295", "text": "def MaxPool(inp, name='MaxPool'):\n with tf.variable_scope(name):\n out = tf.nn.max_pool(inp, [1, 2, 2, 1], [1, 2, 2, 1], padding='VALID')\n return out", "title": "" }, { "docid": "61ad5e41f393cb0a8aab86e28ca37124", "score": "0.7182704", "text": "def max_pool_4x4(self,x):\n return tf.nn.max_pool(x, ksize=[1, 4, 4, 1],\n strides=[1, 4, 4, 1], padding='SAME')", "title": "" }, { "docid": "8d5fe6833a0f292ff1840a970f29de28", "score": "0.7148217", "text": "def _max_pool_2x2(value, name, is_training):\r\n with tf.variable_scope(name) as scope1:\r\n if not is_training:\r\n scope1.reuse_variables()\r\n return tf.nn.max_pool(value, ksize=[1, 2, 2, 1],\r\n strides=[1, 2, 2, 1], padding='SAME', name=name)", "title": "" }, { "docid": "920dad41a58ee9dee48a4fff21542dca", "score": "0.71073025", "text": "def test_pool2d():\n verify_pool2d([1, 16, 32, 32], [2, 2], [2, 2], [1, 1], [0, 0, 0, 0], \"avg\", False, True)\n verify_pool2d([1, 16, 31, 31], [3, 3], [3, 3], [1, 1], [1, 2, 1, 2], \"avg\", False, True)\n verify_pool2d([1, 16, 32, 32], [2, 2], [2, 2], [1, 1], [1, 2, 1, 2], \"avg\", False, False)\n verify_pool2d([1, 16, 31, 31], [4, 4], [4, 4], [1, 1], [3, 3, 3, 3], \"avg\", False, False)\n verify_pool2d([1, 16, 31, 31], [4, 4], [4, 4], [1, 1], [0, 0, 0, 0], \"avg\", False, False)\n verify_pool2d([1, 16, 32, 32], [2, 3], [2, 2], [1, 1], [0, 0, 0, 0], \"max\", False)\n verify_pool2d([1, 16, 31, 31], [3, 3], [3, 3], [1, 1], [2, 1, 2, 1], \"max\", False)\n verify_pool2d([1, 16, 31, 31], [3, 3], [3, 3], [1, 1], [2, 1, 2, 1], \"max\", True)\n\n verify_pool2d([1, 16, 31, 31], [3, 3], [3, 3], [1, 1], [2, 1, 0, 3], \"avg\", False, True)\n verify_pool2d([1, 16, 32, 32], [2, 3], [2, 2], [1, 1], [0, 3, 2, 1], \"avg\", False, False)\n verify_pool2d([1, 16, 31, 31], [3, 3], [3, 3], [1, 1], [1, 0, 3, 2], \"max\", False)\n verify_pool2d([1, 16, 31, 31], [3, 3], [3, 3], [1, 1], [3, 2, 1, 0], \"max\", True)\n\n # Test non-1 dilations\n verify_pool2d([1, 16, 31, 31], [3, 3], [3, 3], [2, 1], [2, 1, 0, 3], \"avg\", False, True)\n verify_pool2d([1, 16, 32, 32], [2, 3], [2, 2], [2, 3], [0, 3, 2, 1], \"avg\", False, False)\n verify_pool2d([1, 16, 31, 31], [3, 3], [3, 3], [3, 3], [1, 0, 3, 2], \"max\", False)\n verify_pool2d([1, 16, 31, 31], [3, 3], [3, 3], [2, 2], [3, 2, 1, 0], \"max\", True)\n # Test channel last\n verify_pool2d(\n [1, 32, 32, 16], [2, 2], [2, 2], [1, 1], [0, 0, 0, 0], \"avg\", False, True, layout=\"NHWC\"\n )\n verify_pool2d(\n [1, 31, 31, 16], [3, 3], [3, 3], [1, 1], [1, 2, 1, 2], \"avg\", False, True, layout=\"NHWC\"\n )\n verify_pool2d(\n [1, 32, 32, 16], [2, 2], [2, 2], [1, 1], [1, 2, 1, 2], \"avg\", False, False, layout=\"NHWC\"\n )\n verify_pool2d(\n [1, 31, 31, 16], [4, 4], [4, 4], [1, 1], [3, 3, 3, 3], \"avg\", False, False, layout=\"NHWC\"\n )\n verify_pool2d(\n [1, 31, 31, 16], [4, 4], [4, 4], [1, 1], [0, 0, 0, 0], \"avg\", False, False, layout=\"NHWC\"\n )\n verify_pool2d(\n [1, 32, 32, 16], [2, 3], [2, 2], [1, 1], [0, 0, 0, 0], \"max\", False, layout=\"NHWC\"\n )\n verify_pool2d(\n [1, 31, 31, 16], [3, 3], [3, 3], [1, 1], [2, 1, 2, 1], \"max\", False, layout=\"NHWC\"\n )\n verify_pool2d([1, 31, 31, 16], [3, 3], [3, 3], [1, 1], [2, 1, 2, 1], \"max\", True, layout=\"NHWC\")\n\n verify_pool2d(\n [1, 31, 31, 16], [3, 3], [3, 3], [1, 1], [2, 1, 0, 3], \"avg\", False, True, layout=\"NHWC\"\n )\n verify_pool2d(\n [1, 32, 32, 16], [2, 3], [2, 2], [1, 1], [0, 3, 2, 1], \"avg\", False, False, layout=\"NHWC\"\n )\n verify_pool2d(\n [1, 31, 31, 16], [3, 3], [3, 3], [1, 1], [1, 0, 3, 2], \"max\", False, layout=\"NHWC\"\n )\n verify_pool2d([1, 31, 31, 16], [3, 3], [3, 3], [1, 1], [3, 2, 1, 0], \"max\", True, layout=\"NHWC\")\n verify_pool2d(\n [1, 31, 31, 16], [3, 3], [3, 3], [2, 1], [2, 1, 0, 3], \"avg\", False, True, layout=\"NHWC\"\n )\n verify_pool2d(\n [1, 32, 32, 16], [2, 3], [2, 2], [2, 3], [0, 3, 2, 1], \"avg\", False, False, layout=\"NHWC\"\n )\n verify_pool2d(\n [1, 31, 31, 16], [3, 3], [3, 3], [3, 3], [1, 0, 3, 2], \"max\", False, layout=\"NHWC\"\n )\n verify_pool2d([1, 31, 31, 16], [3, 3], [3, 3], [2, 2], [3, 2, 1, 0], \"max\", True, layout=\"NHWC\")", "title": "" }, { "docid": "8c9df1b2fa10555c86e6181f88b7ff66", "score": "0.71021104", "text": "def _max_pool(self, input, name, debug):\n pool = tf.nn.max_pool(input, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],\n padding='SAME', name=name)\n\n if debug:\n pool = tf.Print(pool, [tf.shape(pool)],\n message='Shape of %s' % name,\n summarize=4, first_n=1)\n return pool", "title": "" }, { "docid": "079779cd9cff8ab7781e0a5861e47ce8", "score": "0.7093704", "text": "def max_pool_2x2(self):\n x = self\n x = x[:, :, ::2, :].max_same_error(x[:, :, 1::2, :])\n x = x[:, ::2, :, :].max_same_error(x[:, 1::2, :, :])\n return x", "title": "" }, { "docid": "615536cc000fe1dc3afc9fa712067373", "score": "0.70828784", "text": "def max_pool_backward_naive(dout, cache):\n dx = None\n #############################################################################\n # TODO: Implement the max pooling backward pass #\n #############################################################################\n # Extracting Params\n x, pool_param = cache\n N, C, H, W = x.shape\n N, C, H_out, W_out = dout.shape\n pool_height = pool_param['pool_height']\n pool_width = pool_param['pool_width']\n stride = pool_param['stride']\n\n # Pre allocation\n dx = np.zeros_like(x)\n\n # Iterate through dout\n pointer_height = 0\n for heigh_idx in xrange(H_out):\n pointer_width = 0\n for width_idx in xrange(W_out):\n d_out_reshaped = dout[:, :, heigh_idx, width_idx].reshape(N*C) # Shape: (N*C)\n # Finding the maximum in x.reshaped along 1\n max_args = x[:, :, pointer_height:pointer_height+pool_height, pointer_width:pointer_width+pool_width].reshape(N*C, -1).argmax(axis=1)\n # Inserting the numbers in dx\n dx_reshaped = np.zeros((N*C, pool_height*pool_height))\n dx_reshaped[range(N*C), max_args] = d_out_reshaped\n dx[:, :, pointer_height:pointer_height+pool_height, pointer_width:pointer_width+pool_width] = dx_reshaped.reshape(N , C, pool_height, pool_width)\n # Adding the strides\n pointer_width += stride\n pointer_height += stride\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return dx", "title": "" }, { "docid": "f21faa4867e63114b8742415f3d3cad3", "score": "0.7067959", "text": "def max_pool(x, kernel_size):\n return tf.nn.max_pool(x, ksize=[1, kernel_size[0], kernel_size[1], 1], strides=[1, kernel_size[0], kernel_size[1], 1], padding='SAME')", "title": "" }, { "docid": "11c511d1f0ed72680b4cfc5527f38f8c", "score": "0.70527047", "text": "def max_pool_forward(x, pool_param):\n out = None\n ###########################################################################\n # TODO: Implement the max-pooling forward pass #\n ###########################################################################\n N, C, H, W = x.shape\n pool_height = pool_param['pool_height']\n pool_width = pool_param['pool_width']\n stride = pool_param['stride']\n H_out = int(1 + (H - pool_height) / stride)\n W_out = int(1 + (W - pool_width) / stride)\n\n # Naive implementation with for loops\n\n #out = np.zeros((N, C, H_out, W_out))\n # for k in range(N):\n # for c in range(C):\n # for i in range(H_out):\n # for j in range(W_out):\n # out[k, c, i, j] = np.max(x[k, c, i * stride:i * stride + pool_height, j * stride:j * stride + pool_width])\n\n X_reshaped = x.reshape(N * C, 1, H, W)\n X_col = im2col_indices(X_reshaped, pool_height, pool_width, padding=0, stride=stride)\n max_idx = np.argmax(X_col, axis=0)\n out = X_col[max_idx, range(max_idx.size)]\n out = out.reshape(H_out, W_out, N, C)\n out = out.transpose(2, 3, 0, 1)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = (x, pool_param, X_col, max_idx)\n return out, cache", "title": "" }, { "docid": "59cdf4abc59051efb717422a7bfdb131", "score": "0.7011144", "text": "def max_pool_forward_naive(x, pool_param):\n out = None\n #############################################################################\n # TODO: Implement the max pooling forward pass #\n #############################################################################\n # Extracting params\n HH = pool_param['pool_height']\n WW = pool_param['pool_width']\n stride = pool_param['stride']\n N, C, H, W = x.shape\n pad = 0 # For pooling, there is no padding\n H_out = 1 + (H + 2 * pad - HH) / stride\n W_out = 1 + (W + 2 * pad - WW) / stride\n\n # Iterating\n pointer_height, pointer_width, out = 0, 0, np.zeros((N, C, H_out, W_out))\n for heigh_idx in xrange(H_out):\n pointer_width = 0\n for width_idx in xrange(W_out):\n out[:, :, heigh_idx, width_idx] = x[:, :, pointer_height:pointer_height+HH, pointer_width:pointer_width+WW].max((2,3))\n pointer_width += stride\n pointer_height += stride\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n cache = (x, pool_param)\n return out, cache", "title": "" }, { "docid": "3274cae404222671b01f5f77fcef2348", "score": "0.7007448", "text": "def max_pool_5x5(self,x):\n return tf.nn.max_pool(x, ksize=[1, 5, 5, 1],\n strides=[1, 5, 5, 1], padding='SAME')", "title": "" }, { "docid": "7c944ecc1a02c8c28e04a6f51b0b1052", "score": "0.7001338", "text": "def my_max_pool_3d(sym_input, pool_shape = (2,2,2)):\n\n s = None\n if pool_shape[2]>1:\n for i in range(pool_shape[2]):\n t = sym_input[:,:,:,:,i::pool_shape[2]]\n if s is None:\n s = t\n else:\n s = T.maximum(s, t)\n else:\n s = sym_input\n if pool_shape[0]>1:\n temp = s\n s = None\n for i in range(pool_shape[0]):\n t = temp[:,i::pool_shape[0],:,:,:]\n if s is None:\n s = t\n else:\n s = T.maximum(s, t)\n\n if pool_shape[1]>1:\n temp = s\n s = None\n for i in range(pool_shape[1]):\n t = temp[:,:,:,i::pool_shape[0],:]\n if s is None:\n s = t\n else:\n s = T.maximum(s, t)\n sym_ret = s\n return sym_ret", "title": "" }, { "docid": "38832672dea60019e3dc92ca67983c91", "score": "0.69579625", "text": "def max_pool(x, filter_height, filter_width, stride_y, stride_x, name,\n padding='SAME'):\n return tf.nn.max_pool(x, ksize=[1, filter_height, filter_width, 1],\n strides=[1, stride_y, stride_x, 1],\n padding=padding, name=name)", "title": "" }, { "docid": "5646d735113b9d471cd2b17971d2a961", "score": "0.6949981", "text": "def max_pool_forward_naive(x, pool_param):\n out = None\n #############################################################################\n # TODO: Implement the max pooling forward pass #\n #############################################################################\n pool_height = pool_param[\"pool_height\"]\n pool_width = pool_param[\"pool_width\"]\n stride = pool_param[\"stride\"]\n N,F,H,W=x.shape\n\n H_new = (H-pool_height)/stride +1\n W_new = (W-pool_width)/stride +1\n out = np.zeros(np.product([N,F,H_new,W_new])).reshape([N,F,H_new,W_new])\n\n for i in xrange(N):\n for j in xrange(F):\n for k in xrange(H_new):\n hs = k * stride\n for l in xrange(W_new):\n ws = l * stride\n\n value = np.max(x[i,j,hs:hs+pool_height,ws:ws+pool_width])\n\n out[i,j,k,l]=value\n\n cache = (x, pool_param)\n return out, cache", "title": "" }, { "docid": "887e1051d8d32f187a206be0f78f7d48", "score": "0.69416213", "text": "def max_pool(bottom, ks=2, stride=2, pad=0):\n pool = caffe.layers.Pooling(bottom, pool=caffe.params.Pooling.MAX, kernel_size=ks, stride=stride, pad=pad)\n return pool", "title": "" }, { "docid": "abb0a2a3b1cb2d9e0b7e1ee6b89fde4f", "score": "0.69391924", "text": "def max_pool(x, pool_size, stride, name, padding='VALID'):\n net = tf.layers.max_pooling2d(inputs=x, pool_size=pool_size, strides=stride,\n padding=padding, name=name)\n print('{}: {}'.format(name, net.get_shape()))\n return net", "title": "" }, { "docid": "9d49b0bee660bf3ce75ebe321deedc2c", "score": "0.6937827", "text": "def pool(input, size):\n return tf.nn.max_pool(\n input, \n ksize = [1, size, size, 1], \n strides = [1, size, size, 1], \n padding = 'SAME'\n )", "title": "" }, { "docid": "9033080531c8f78dbdb6e6db6d0e6018", "score": "0.69180465", "text": "def max_pool(self, block_size):\n arr_0to1_reduced = ia.max_pool(self.arr_0to1, block_size)\n return HeatmapsOnImage.from_0to1(arr_0to1_reduced, shape=self.shape, min_value=self.min_value,\n max_value=self.max_value)", "title": "" }, { "docid": "6986741fa529561525a82276c56ec37d", "score": "0.6914817", "text": "def conv_pool(x,conv_val,feat_in_size,feat_out_size):\n\tW = weight_variable([conv_val,conv_val,feat_in_size,feat_out_size])\n\tb = bias_variable([feat_out_size])\n\ty = tf.nn.elu(conv2d(x,W)+b)\n\ty_pool = max_pool_2x2(y)\n\n\tprint(y,y_pool)\n\treturn y_pool", "title": "" }, { "docid": "a8891ba9dc5e403f3a32b323c9104146", "score": "0.6853666", "text": "def graph_max_pooling(x, pooling_inds):\n return GraphMaxPoolingFunction(pooling_inds)(x)", "title": "" }, { "docid": "8389cc54969cefe5121ecc65401cedc1", "score": "0.6846744", "text": "def op_d_max_pool(self, activation, input_shape, poolsize, stride,\n padding):\n # n_batches, n_in, h, w - number of batches, number of channels,\n # image height, image width\n n_batches, n_in, h, w = input_shape\n\n pad_h, pad_w = padding\n activation = activation.reshape_for_padding(input_shape, padding,\n lower_val=-np.inf,\n upper_val=-np.inf)\n input_shape = (n_batches, n_in, h + 2 * pad_h, w + 2 * pad_w)\n h += 2 * pad_h\n w += 2 * pad_w\n\n # fh, fw - pool height, pool width\n fh, fw = poolsize\n stride_h, stride_w = stride\n output = self\n result = activation.from_shape(input_shape, neutral=True)\n\n for at_h, at_w in product(xrange(0, h - fh + 1, stride_h),\n xrange(0, w - fw + 1, stride_w)):\n # at_out_h - height of output corresponding to pool at position\n # at_h\n at_out_h = at_h / stride_h\n # at_out_w - width of output corresponding to pool at position\n # at_w\n at_out_w = at_w / stride_w\n\n for at_f_h, at_f_w in product(xrange(at_h, at_h + fh),\n xrange(at_w, at_w + fw)):\n # maximum lower and upper value of neighbours\n neigh_max_low = -np.inf\n neigh_max_upp = -np.inf\n neigh_max_low = np.asarray([-np.inf], dtype=config.floatX)\n neigh_max_upp = np.asarray([-np.inf], dtype=config.floatX)\n neigh_max_itv = NpInterval(neigh_max_low, neigh_max_upp)\n act_slice = activation[:, :, at_f_h, at_f_w]\n\n # setting maximum lower and upper of neighbours\n for at_f_h_neigh, at_f_w_neigh in \\\n product(xrange(at_h, at_h + fh),\n xrange(at_w, at_w + fw)):\n\n if (at_f_h_neigh, at_f_w_neigh) != (at_f_h, at_f_w):\n neigh_slice = activation[:, :, at_f_h_neigh,\n at_f_w_neigh]\n neigh_max_itv = neigh_max_itv.max(neigh_slice)\n\n # must have impact on output\n must = act_slice.lower > neigh_max_itv.upper\n # cannot have impact on output\n cannot = act_slice.upper < neigh_max_itv.lower\n # or might have impact on output\n output_slice = output[:, :, at_out_h, at_out_w]\n output_with_0 = NpInterval(np.minimum(output_slice.lower, 0.),\n np.maximum(output_slice.upper, 0.))\n\n result[:, :, at_f_h, at_f_w] += \\\n NpInterval.select([must, cannot, True],\n [output_slice, 0., output_with_0])\n\n return result[:, :, pad_h:h - pad_h, pad_w:w - pad_w]", "title": "" }, { "docid": "6102f60da356cc9874dbb3f6ca3c695e", "score": "0.68458444", "text": "def max_pool(x, filter_height, filter_width, stride_y, stride_x, name,\r\n padding='SAME'):\r\n return tf.nn.max_pool(x, ksize=[1, filter_height, filter_width, 1],\r\n strides=[1, stride_y, stride_x, 1],\r\n padding=padding, name=name)", "title": "" }, { "docid": "8e184fd69250e4e977f64c15b7a5769b", "score": "0.6845299", "text": "def max_pool_with_argmax(net, stride):\n with tf.name_scope('MaxPoolArgMax'):\n _, mask = tf.nn.max_pool_with_argmax(\n net,\n ksize=[1, stride, stride, 1],\n strides=[1, stride, stride, 1],\n padding='SAME')\n mask = tf.stop_gradient(mask)\n net = slim.max_pool2d(net, kernel_size=[stride, stride], stride=stride)\n return net, mask", "title": "" }, { "docid": "7ce55e37ee3bca6ece7adf0823904239", "score": "0.68443614", "text": "def maxpool_forward(x, pool_param):\n (N, C, H, W) = x.shape\n pool_h = pool_param['pool_height']\n pool_w = pool_param['pool_width']\n stride = pool_param['stride']\n\n step_h = 1 + (H - pool_h) / stride\n step_w = 1 + (W - pool_w) / stride\n\n out = np.zeros((N, C, step_h, step_w))\n\n for i in xrange(N):\n this_img = x[i]\n for j in xrange(C):\n this_channel = this_img[j]\n for k in xrange(step_h):\n row_begin = k * stride\n for l in xrange(step_w):\n col_begin = l * stride\n out[i, j, k, l] = np.max(this_channel[row_begin:row_begin+pool_h, col_begin:col_begin+pool_w])\n cache = (x, pool_param)\n\n return out, cache", "title": "" }, { "docid": "3d884eb5ba0023134caa44c99b7db531", "score": "0.684097", "text": "def _max_pool(self, bottom, name):\n return tf.nn.max_pool(\n bottom,\n ksize=[1, 3, 1, 1],\n strides=[1, 3, 1, 1],\n padding='SAME', name=name)", "title": "" }, { "docid": "ef0faccddf809112fa385ca8452ad5e4", "score": "0.6811314", "text": "def max_pool_backward_naive(dout, cache):\n dx = None\n x, pool_param = cache\n pool_height, pool_width, stride = pool_param['pool_height'], pool_param['pool_width'], pool_param['stride']\n\n # ================================================================ #\n # YOUR CODE HERE:\n # Implement the max pooling backward pass.\n # ================================================================ #\n N, C, H, W = x.shape\n pool_height = pool_param['pool_height']\n pool_width = pool_param['pool_width']\n stride = pool_param['stride']\n hout = int(1 + (H - pool_height) / stride)\n wout = int(1 + (W - pool_height) / stride)\n dx = np.zeros(x.shape)\n for k in range(hout):\n for l in range(wout):\n height_slice, width_slice = slice(k * stride, k * stride + pool_height), slice(l * stride, l * stride + pool_width)\n cur_region = x[:,:,height_slice, width_slice]\n # this is really ugly, but if we don't have the new axes there is a shape error\n dx[:,:,height_slice, width_slice]=(dout[:,:,k,l])[:,:,np.newaxis,np.newaxis] * (cur_region == np.amax(cur_region, axis = (2,3))[:,:,np.newaxis,np.newaxis])\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ # \n return dx", "title": "" }, { "docid": "6a481c8eec8a066565c9c5efcfb50f47", "score": "0.6807496", "text": "def max_pool(x, inds):\n\n # Add a last row with minimum features for shadow pools\n x = torch.cat((x, torch.zeros_like(x[:1, :])), 0)\n\n # Get all features for each pooling location [n2, max_num, d]\n pool_features = gather(x, inds)\n\n # Pool the maximum [n2, d]\n max_features, _ = torch.max(pool_features, 1)\n return max_features", "title": "" }, { "docid": "67a055873502923723e1d092aa6a3cf5", "score": "0.6789475", "text": "def max_pool_3x3(x, name = 'max-pool-3x3'):\n return tf.nn.max_pool(x, ksize = [1, 3, 3, 1],\n strides = [1, 1, 1, 1], padding = 'SAME', name = name)", "title": "" }, { "docid": "d77f9c61594644d821bdd5a72abe4097", "score": "0.677681", "text": "def max_pool(x, n, data_format=\"NHWC\"):\n\n if data_format not in [\"NHWC\", \"NCHW\"]:\n raise ValueError(\"data_format must be \\\"NHWC\\\" or \\\"NCHW\\\".\")\n\n if data_format == \"NCHW\":\n return tf.nn.max_pool(x, ksize=[1, 1, n, n], strides=[1, 1, n, n], padding='VALID', data_format=data_format, name='max_pool')\n \n return tf.nn.max_pool(x, ksize=[1, n, n, 1], strides=[1, n, n, 1], padding='VALID', data_format=data_format, name='max_pool')", "title": "" }, { "docid": "af571cc902f321a3eafc62a137b3c700", "score": "0.6774634", "text": "def MaxPool2d(im: np.array,\n kernel_size: int):\n H, W = im.shape[0], im.shape[1]\n new_im = np.zeros(((H-1)//kernel_size + 1, (W-1)//kernel_size + 1, 3))\n for i, row in enumerate(im):\n for j, px in enumerate(row):\n for c, val in enumerate(px):\n new_im[i//kernel_size, j//kernel_size, c] = max(new_im[i//kernel_size, j//kernel_size, c], val)\n\n return new_im", "title": "" }, { "docid": "3d7861893ea094a239a84d5020673386", "score": "0.67723525", "text": "def maxpool(self, k=2, s=None, globe=False):\n self.count['mp'] += 1\n scope = 'maxpool_' + str(self.count['mp'])\n with tf.variable_scope(scope):\n if globe is True: # Global Pool Parameters\n k1 = self.input.get_shape()[1]\n k2 = self.input.get_shape()[2]\n s1 = 1\n s2 = 1\n padding = 'VALID'\n else:\n k1 = k\n k2 = k\n if s is None:\n s1 = k\n s2 = k\n else:\n s1 = s\n s2 = s\n padding = 'SAME'\n # Max Pool Function\n self.input = tf.nn.max_pool(self.input, ksize=[1, k1, k2, 1], strides=[1, s1, s2, 1], padding=padding)\n self.print_log(scope + ' output: ' + str(self.input.get_shape()))", "title": "" }, { "docid": "ee7e9799266cd5d65b51dc626868d461", "score": "0.6768276", "text": "def max_pool_backward_naive(dout, cache):\n dx = None\n #############################################################################\n # TODO: Implement the max pooling backward pass #\n #############################################################################\n x,pool_param = cache\n pool_height = pool_param[\"pool_height\"]\n pool_width = pool_param[\"pool_width\"]\n stride = pool_param[\"stride\"]\n N,D,H,W=x.shape\n H_new = (H-pool_height)/stride +1\n W_new = (W-pool_width)/stride +1\n\n dx = np.zeros(np.product([N,D,H,W])).reshape([N,D,H,W])\n\n for i in xrange(N):\n for j in xrange(D):\n for k in xrange(H_new):\n hs = k * stride\n for l in xrange(W_new):\n ws = l * stride\n window = x[i,j,hs:hs+pool_height,ws:ws+pool_width]\n value = np.max(window)\n\n\n\n dx[i,j,hs:hs+pool_height,ws:ws+pool_width]+= (window==value) * dout[i,j,k,l]\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return dx", "title": "" }, { "docid": "2392ea6f5b1a9cf3df22937cd34a9efb", "score": "0.6745707", "text": "def adaptive_max_pool2d(input, output_size, return_indices=False):\n _check_adaptive_max_pool2d(return_indices)\n _adaptive_max_pool2d = _get_cache_prim(NN_OPS.AdaptiveMaxPool2D)(output_size)\n out = _adaptive_max_pool2d(input)\n output = out if return_indices else out[0]\n return output", "title": "" }, { "docid": "2d7b92a1c115ec4d073b2339c2edae24", "score": "0.67313945", "text": "def mpool1(self, x, p):\n if p > 1:\n x = tf.expand_dims(x, 3) # N x M x F x 1\n x = tf.nn.max_pool(x, ksize=[1,p,1,1], strides=[1,p,1,1], padding='SAME')\n #tf.maximum\n return tf.squeeze(x, [3]) # N x M/p x F\n else:\n return x", "title": "" }, { "docid": "b8d41154e5572083e745664b8b6a1fc3", "score": "0.67208487", "text": "def max_pool_with_argmax(net, stride):\n with tf.compat.v1.name_scope('MaxPoolArgMax'):\n _, mask = tf.nn.max_pool_with_argmax(\n net,\n ksize=[1, stride, stride, 1],\n strides=[1, stride, stride, 1],\n padding='SAME')\n mask = tf.stop_gradient(mask)\n net = tf.nn.max_pool2d(net, ksize=[stride, stride], strides=SETTINGS.pool_size, padding='SAME')\n return net, mask", "title": "" }, { "docid": "59b7d52ba43716aa5ef478e4abdb4c88", "score": "0.6715678", "text": "def maxpooling_2d(input_,\n filter_size=2,\n strides=2,\n padding='VALID',\n name=None):\n with tf.name_scope(name):\n output = tf.nn.max_pool(input_, [1, filter_size, filter_size, 1], [1, strides, strides, 1],\n padding, data_format='NHWC', name='output')\n return output", "title": "" }, { "docid": "1d42f76d4f9c0de650784fef05cf9b4d", "score": "0.6713058", "text": "def max_pooling_layer(y1):\n y2 = y1.max(0)\n return y2", "title": "" }, { "docid": "aa7ec98cac3e7d3934f0e4e9c24c109c", "score": "0.6709414", "text": "def max_pooling2d(inputs, psize, strides):\n\n return tf.layers.max_pooling2d(\n inputs=inputs,\n pool_size=psize,\n strides=strides,\n padding='same',\n )", "title": "" }, { "docid": "de7a6e4745e0d7323f06e14e4d561756", "score": "0.6697028", "text": "def max_pool(images, imgshp, maxpoolshp):\n N = numpy\n poolsize = N.int64(N.prod(maxpoolshp))\n\n # imgshp contains either 2 entries (height,width) or 3 (nfeatures,h,w)\n # in the first case, default nfeatures to 1\n if N.size(imgshp) == 2:\n imgshp = (1,) + imgshp\n\n # construct indices and index pointers for sparse matrix, which,\n # when multiplied with input images will generate a stack of image\n # patches\n indices, indptr, spmat_shape, sptype, outshp = \\\n convolution_indices.conv_eval(imgshp, maxpoolshp,\n maxpoolshp, mode='valid')\n\n# print 'XXXXXXXXXXXXXXXX MAX POOLING LAYER XXXXXXXXXXXXXXXXXXXX'\n# print 'imgshp = ', imgshp\n# print 'maxpoolshp = ', maxpoolshp\n# print 'outshp = ', outshp\n\n # build sparse matrix, then generate stack of image patches\n csc = theano.sparse.CSM(sptype)(N.ones(indices.size), indices,\n indptr, spmat_shape)\n patches = sparse.structured_dot(csc, images.T).T\n\n pshape = tensor.stack([images.shape[0] *\\\n tensor.as_tensor(N.prod(outshp)),\n tensor.as_tensor(imgshp[0]),\n tensor.as_tensor(poolsize)])\n patch_stack = tensor.reshape(patches, pshape, ndim=3)\n\n out1 = tensor.max(patch_stack, axis=2)\n\n pshape = tensor.stack([images.shape[0],\n tensor.as_tensor(N.prod(outshp)),\n tensor.as_tensor(imgshp[0])])\n out2 = tensor.reshape(out1, pshape, ndim=3)\n\n out3 = tensor.DimShuffle(out2.broadcastable, (0, 2, 1))(out2)\n\n return tensor.flatten(out3, 2), outshp", "title": "" }, { "docid": "080a402a967581f03a2e167a07aa4111", "score": "0.66730905", "text": "def max_pool_backward(dout, cache):\n dx = None\n ###########################################################################\n # TODO: Implement the max-pooling backward pass #\n ###########################################################################\n x, pool_param, X_col, max_idx = cache\n N, C, H, W = x.shape\n _, _, H_out, W_out = dout.shape\n pool_height = pool_param['pool_height']\n pool_width = pool_param['pool_width']\n stride = pool_param['stride']\n\n # Naive implementation with for loops\n\n #dx = np.zeros(x.shape)\n # for k in range(N):\n # for c in range(C):\n # for i in range(H_out):\n # for j in range(W_out):\n # region = x[k, c, i * stride:i * stride + pool_height, j * stride:j * stride + pool_width]\n # mask = (region == np.max(region))\n # dx[k, c, i * stride:i * stride + pool_height, j * stride:j * stride + pool_width] = dx[k, c, i * stride:i * stride + pool_height, j * stride:j * stride + pool_width] + mask * dout[k, c, i, j] / np.sum(mask)\n\n dX_col = np.zeros_like(X_col)\n dout_flat = dout.transpose(2, 3, 0, 1).ravel()\n dX_col[max_idx, range(max_idx.size)] = dout_flat\n dx = col2im_indices(dX_col, (N * C, 1, H, W), pool_height, pool_width, padding=0, stride=stride)\n dx = dx.reshape(x.shape)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx", "title": "" }, { "docid": "61f8b3202cfd744975c98b5044162507", "score": "0.66571724", "text": "def global_max_pooling_layer(input_data):\n output = tf.reduce_max(input_data, axis=[1, 2])\n\n return output", "title": "" }, { "docid": "aabc9192cae63880e5b8e7671ae61a37", "score": "0.6637844", "text": "def max_pooling_layer(previous, name, params):\n return cl.Pooling(\n previous, name=name, pool=cp.Pooling.MAX,\n kernel_size=int(params[\"size\"]), stride=int(params[\"stride\"]))", "title": "" }, { "docid": "1bb3c95dbcbc203e653e51d2cca4c57e", "score": "0.660977", "text": "def max_pool_forward_naive(x, pool_param):\n out = None\n A_prev = x\n hparameters = pool_param\n \n ###########################################################################\n # TODO: Implement the max-pooling forward pass #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n # Retrieve dimensions from the input shape\n (m, n_C_prev, n_H_prev, n_W_prev) = A_prev.shape\n # Retrieve hyperparameters from \"hparameters\"\n pool_height = hparameters[\"pool_height\"]\n pool_width = hparameters[\"pool_width\"]\n stride = hparameters[\"stride\"]\n\n # Define the dimensions of the output\n n_H = int(1 + (n_H_prev - pool_height) / stride)\n n_W = int(1 + (n_W_prev - pool_width) / stride)\n n_C = n_C_prev\n \n # Initialize output matrix A\n A = np.zeros((m, n_C, n_H, n_W)) \n \n ### START CODE HERE ###\n for i in range(m): # loop over the training examples\n for c in range (n_C): # loop over the channels of the output volume\n for h in range(n_H): # loop on the vertical axis of the output volume\n for w in range(n_W): # loop on the horizontal axis of the output volume\n # Find the corners of the current \"slice\" (≈4 lines)\n vert_start = h * stride\n vert_end = vert_start + pool_height\n horiz_start = w * stride\n horiz_end = horiz_start + pool_width\n \n # Use the corners to define the current slice on the ith training example of A_prev, channel c. (≈1 line)\n a_prev_slice = A_prev[i,c,:,:]\n \n # Compute the pooling operation on the slice. Use an if statment to differentiate the modes. Use np.max/np.mean.\n A[i, c, h, w] = np.max(a_prev_slice[vert_start:vert_end, horiz_start:horiz_end])\n \n \n # Making sure your output shape is correct\n assert(A.shape == (m, n_C, n_H, n_W))\n \n x = A_prev\n pool_param = hparameters\n out = A\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = (x, pool_param)\n return out, cache", "title": "" }, { "docid": "a1e975be08dfa9f285768a5975e6fba9", "score": "0.6567198", "text": "def maxpool2d(x, kernel_size, strides, padding):\n graph = get_default_graph()\n return neural_network.MaxPool2D(x=x, \n kernel_size=kernel_size, \n strides=strides, \n padding=padding, \n graph=graph)", "title": "" }, { "docid": "9c9d1a92e97a32c3472ba3bccc0f8a04", "score": "0.6564548", "text": "def max_pool2d(inputs,\n pool_size = (2, 2),\n strides = (2, 2),\n name = None):\n return tf.layers.max_pooling2d(\n inputs = inputs,\n pool_size = pool_size,\n strides = strides,\n padding='same',\n name = name)", "title": "" }, { "docid": "4b9bad7b1ca3317879fd4452195adb4a", "score": "0.6560364", "text": "def maxpool(bottom, K, S):\n [Win, Hin, N] = bottom.shape\n Wout = (Win - K) / S + 1\n Hout = (Hin - K) / S + 1\n top = np.zeros(Wout, Hout, N)\n for n in range(1, N):\n for h in range(1, Hout):\n for w in range(1, Wout):\n hstart = (h - 1) * S + 1\n wstart = (w - 1) * S + 1\n hend = hstart + K - 1\n wend = wstart + K - 1\n top[w, h, n] = max(max(bottom[range(wstart, wend), range(hstart, hend), n]))\n return top", "title": "" }, { "docid": "a92239a0b9c96b579f2ec52292b59758", "score": "0.65366274", "text": "def MaxPool(images, targets, numChannels, subsX, startX, strideX, outputsX):\r\n numImages = images.shape[0]\r\n\r\n assert targets.shape == (numImages, numChannels * outputsX * outputsX)\r\n \r\n _ConvNet.MaxPool(images.p_mat, targets.p_mat,\r\n numChannels, subsX, startX, strideX, outputsX)", "title": "" }, { "docid": "5ce7e98bee3033f0f552ab6900bf99cc", "score": "0.6507915", "text": "def max_pooling_2d(x, ksize, stride=None, pad=0, cover_all=True,\n use_cudnn=True):\n return MaxPooling2D(ksize, stride, pad, cover_all, use_cudnn)(x)", "title": "" }, { "docid": "638c62bfb9338a489e9df513133c8dda", "score": "0.6482037", "text": "def maxpool_block_2(X, f, filters, s, stage, block):\n\n # defining name basis\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n mp_name_base = 'mp' + str(stage) + block + '_branch'\n # Retrieve Filters\n F1, F2 = filters\n # n_timesteps, n_features = X.shape[1], X.shape[2]\n # Save the input value. You'll need this later to add back to the main path.\n X_shortcut = X\n n_timesteps, n_features = X.shape[1], X.shape[2]\n\n # First component of main path\n X = keras.layers.BatchNormalization(name=bn_name_base + '2a')(X)\n X = keras.layers.Activation('relu')(X)\n # Second component of main path (≈3 lines)\n X = keras.layers.Conv1D(filters=F1, kernel_size=f, strides=s, padding='same',\n input_shape=(None, n_timesteps, n_features),\n name=conv_name_base + '2b', kernel_initializer=he_normal(seed=0))(X)\n X = keras.layers.BatchNormalization(name=bn_name_base + '2b')(X)\n X = keras.layers.Activation('relu')(X)\n X = keras.layers.Dropout(0.2)(X)\n # Third component of main path (≈2 lines)\n X = keras.layers.Conv1D(filters=F2, kernel_size=1, strides=1, padding='same', name=conv_name_base + '2c',\n kernel_initializer=he_normal(seed=0))(X)\n\n # Final step: Add shortcut value to main path, and pass it through a RELU activation (≈2 lines)\n ##### SHORTCUT PATH #### (≈2 lines)\n X_shortcut = keras.layers.Conv1D(filters=F2, kernel_size=1, strides=1, name=conv_name_base + '1',\n kernel_initializer=he_normal(seed=0))(X_shortcut)\n\n X_shortcut = keras.layers.MaxPooling1D(pool_size=s, padding='same', name=mp_name_base + '1')(X_shortcut)\n # print(X_shortcut.shape)\n\n # Final step: Add shortcut value to main path, and pass it through a RELU activation (≈2 lines)\n X = keras.layers.add([X, X_shortcut])\n X = keras.layers.Activation('relu')(X)\n\n return X", "title": "" }, { "docid": "77b5bc6f0a3329329e8cf711e23ecad1", "score": "0.6468641", "text": "def max_pool2d(x, kernel_size, stride=None, padding=0, dilation=1, return_indices=False, ceil_mode=False):\n strides = stride if (stride is not None) else kernel_size\n max_pool_with_argmax_v2_ = _get_cache_prim(NN_OPS.MaxPoolWithArgmaxV2)(\n kernel_size, strides, padding, dilation, ceil_mode)\n out, indices = max_pool_with_argmax_v2_(x)\n if return_indices:\n return out, indices\n return out", "title": "" }, { "docid": "2c827622e2219dee2693659d59c5135f", "score": "0.64414823", "text": "def max_pool2d(input, pool_size, strides=(1, 1), channels_last=True,\n padding='same', name=None):\n return _pool2d(\n tf.nn.max_pool,\n input=input,\n pool_size=pool_size,\n strides=strides,\n channels_last=channels_last,\n padding=padding,\n name=name,\n default_name='max_pool2d'\n )", "title": "" }, { "docid": "e19aba3b6d41fd1d4a2b1ddcb2c258c7", "score": "0.6433036", "text": "def get_bprop_max_pool_with_argmax(self):\n maxpool_grad = G.MaxPoolGradWithArgmax(\n ksize=self.ksize,\n strides=self.strides,\n padding=self.padding)\n\n def bprop(x, out, dout):\n dx = maxpool_grad(x, dout[0], out[1])\n return (dx,)\n\n return bprop", "title": "" }, { "docid": "dbeeaa075228a374c5ac4a3c29a52d65", "score": "0.6404496", "text": "def backward_max_pooling_naive(dout, cache):\n dx = None\n #############################################################################\n # TODO: Implémentez la rétropropagation pour une couche de max pooling. #\n #############################################################################\n x, pool_param = cache\n N, C, H, W = x.shape\n ph = pool_param['pool_height']\n pw = pool_param['pool_width']\n S = pool_param['stride']\n Hd = int( (H - ph) / S + 1 )\n Wd = int( (W - pw) / S + 1 )\n\n dx = np.zeros((N, C, H, W))\n for i in range(N):\n for j in range(C):\n for k in range(Hd):\n for l in range(Wd):\n window = x[i, j, k * S: k * S + ph, l * S: l * S + pw]\n dx[i, j, k * S: k * S + ph, l * S: l * S + pw] = (window == np.max(window)) * dout[i, j, k, l]\n\n #############################################################################\n # FIN DE VOTRE CODE #\n #############################################################################\n return dx", "title": "" }, { "docid": "3a7e3fbb653b2fe00423bf4f639c98d2", "score": "0.6396403", "text": "def maxpool_block_1(X, f, filters, s, stage, block):\n\n # defining name basis\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n mp_name_base = 'mp' + str(stage) + block + '_branch'\n\n # Retrieve Filters\n F1, F2 = filters\n # n_timesteps, n_features = X.shape[1], X.shape[2]\n # Save the input value. You'll need this later to add back to the main path.\n X_shortcut = X\n n_timesteps, n_features = X.shape[1], X.shape[2]\n\n # First component of main path\n X = keras.layers.Conv1D(filters=F1, kernel_size=f, strides=s, padding='same',\n input_shape=(None, n_timesteps, n_features),\n name=conv_name_base + '2a', kernel_initializer=he_normal(seed=0))(X)\n X = keras.layers.BatchNormalization(name=bn_name_base + '2a')(X)\n X = keras.layers.Activation('relu')(X)\n X = keras.layers.Dropout(0.2)(X)\n\n # Second component of main path ()\n X = keras.layers.Conv1D(filters=F2, kernel_size=1, strides=1, name=conv_name_base + '2b',\n kernel_initializer=he_normal(seed=0))(\n X)\n\n X_shortcut = keras.layers.Conv1D(filters=F2, kernel_size=1, strides=1, name=conv_name_base + '1',\n kernel_initializer=he_normal(seed=0))(X_shortcut)\n\n X_shortcut = keras.layers.MaxPooling1D(pool_size=s, padding='same', name=mp_name_base + '1')(X_shortcut)\n\n # Final step: Add shortcut value to main path, and pass it through a RELU activation (≈2 lines)\n X = keras.layers.add([X, X_shortcut])\n X = keras.layers.Activation('relu')(X)\n\n ### END CODE HERE ###\n\n return X", "title": "" }, { "docid": "d0202566f75dfb9e94394e1c7de5bd60", "score": "0.6390623", "text": "def max_pooling(image, filt_size=(3, 3), stride=None, padding=(0, 0)):\n if stride is None: stride = filt_size\n f_x, f_y = filt_size\n if padding == 'same' or padding == 'SAME':\n p_x = int((f_x - 1) / 2)\n p_y = int((f_y - 1) / 2)\n else:\n p_x, p_y = padding\n s_x, s_y = stride\n image_x = len(image)\n image_y = len(image[0])\n padded_len_x = image_x + 2 * p_x\n padded_len_y = image_y + 2 * p_y\n out_x = math.floor((padded_len_x - f_x) / s_x + 1)\n out_y = math.floor((padded_len_y - f_y) / s_y + 1)\n out = [[0] * out_y for _ in range(out_x)]\n\n image_padded = [[0] * padded_len_y for _ in range(padded_len_x)]\n\n for i in range(image_x):\n image_padded[i + p_x][p_y:p_y + image_y] = image[i]\n\n for i in range(out_x):\n x = i * s_x\n for j in range(out_y):\n y = j * s_y\n # sub_M = M[i:i+p, j:j+q] -- 2D slicing in native python list\n sub_M = [image_padded[_][y:y + f_y] for _ in range(x, x + f_x)]\n out[i][j] = max(map(max, sub_M))\n\n return out", "title": "" }, { "docid": "cc1fea0030934bdd4e05e2403369fea4", "score": "0.6365963", "text": "def poolingLayer(input,size,stride,padding=\"valid\"):\n return tf.layers.max_pooling2d(inputs=input,pool_size=size,\n strides=stride,padding=padding)", "title": "" }, { "docid": "3fbe88f2a8244dfc1f1b40e63e40a5a0", "score": "0.6357235", "text": "def max_pool_forward_reshape(x, pool_param):\n N, C, H, W = x.shape\n pool_height, pool_width = pool_param['pool_height'], pool_param['pool_width']\n stride = pool_param['stride']\n assert pool_height == pool_width == stride, 'Invalid pool params'\n assert H % pool_height == 0\n assert W % pool_height == 0\n x_reshaped = x.reshape(N, C, H // pool_height, pool_height, W // pool_width, pool_width)\n out = x_reshaped.max(axis=3).max(axis=4)\n\n cache = (x, x_reshaped, out)\n return out, cache", "title": "" }, { "docid": "eb63934f5600c2361c7cb1c2e9cc716f", "score": "0.6337849", "text": "def max_pool_2d_same_size(input, patch_size):\n output = DownsampleFactorMax(patch_size, True)(input)\n outs = MaxPoolGrad(patch_size, True)(input, output, output)\n return outs", "title": "" }, { "docid": "18dcb0b9d6c81b925fc12da0ad7ee86a", "score": "0.6305588", "text": "def unravel_pooling_argmax(argmax, shape):\n \n WIDTH = shape[2]\n CHANNELS = shape[3]\n WC = WIDTH * CHANNELS\n \n x = argmax // WC\n y = argmax % WC // CHANNELS\n \n return tf.pack([x, y])", "title": "" }, { "docid": "80284f6b97ea7c612dd8a475b11ce706", "score": "0.63045424", "text": "def max_pool_backward_naive(dout, cache):\n dx = None\n dA = dout\n ###########################################################################\n # TODO: Implement the max-pooling backward pass #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n ### START CODE HERE ###\n \n # Retrieve information from cache (≈1 line)\n (A_prev, hparameters) = cache\n \n # Retrieve hyperparameters from \"hparameters\" (≈2 lines)\n pool_height = hparameters[\"pool_height\"]\n pool_width = hparameters[\"pool_width\"]\n stride = hparameters[\"stride\"]\n \n # Retrieve dimensions from A_prev's shape and dA's shape (≈2 lines)\n (m, n_C_prev, n_H_prev, n_W_prev) = A_prev.shape\n m, n_C, n_H, n_W = dA.shape\n \n # Initialize dA_prev with zeros (≈1 line)\n dA_prev = np.zeros(A_prev.shape)\n \n for i in range(m): # loop over the training examples\n \n # select training example from A_prev (≈1 line)\n a_prev = A_prev[i,:,:,:]\n \n for c in range(n_C): # loop over the channels (depth)\n for h in range(n_H): # loop on the vertical axis\n for w in range(n_W): # loop on the horizontal axis\n \n # Find the corners of the current \"slice\" (≈4 lines)\n vert_start = h * stride\n vert_end = vert_start + pool_height\n horiz_start = w * stride\n horiz_end = horiz_start + pool_width\n \n # Compute the backward propagation in both modes.\n # Use the corners and \"c\" to define the current slice from a_prev (≈1 line)\n a_prev_slice = a_prev[c,vert_start: vert_end, horiz_start: horiz_end]\n # Create the mask from a_prev_slice (≈1 line)\n mask = create_mask_from_window(a_prev_slice)\n # Set dA_prev to be dA_prev + (the mask multiplied by the correct entry of dA) (≈1 line)\n dA_prev[i, c, vert_start:vert_end, horiz_start:horiz_end] += np.multiply(mask, dA[i, c, h, w])\n\n\n dx = dA_prev\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx", "title": "" }, { "docid": "ec401e0c1268e3fa4f6784bc8bf3ea23", "score": "0.6274", "text": "def max_pool_layer(layer_id, inputs, kernel_size, stride):\r\n # TODO(b/67004004): Delete this function and rely on tf.layers exclusively.\r\n with tf.variable_scope(\"pool_%d\" % layer_id):\r\n return tf.nn.max_pool(\r\n inputs, [1, kernel_size, kernel_size, 1], [1, stride, stride, 1],\r\n padding=\"SAME\",\r\n name=\"pool\")", "title": "" }, { "docid": "114a40b4ff2885dd783829d2e4824f8c", "score": "0.62507516", "text": "def generate_pool(max_pool_size=None, data_size=None, cols=None):\n\n suggested_pool_size = suggest_pool_size(data_size, cols)\n if max_pool_size is None or suggested_pool_size is None: \n max_pool_size = suggested_pool_size\n \n # Always leave 1 cores free\n pool = None\n if max_pool_size is not None and max_pool_size > 2: \n try:\n pool = mp.Pool(max_pool_size)\n except Exception as e:\n pool = None\n warnings.warn(\n 'Multiprocessing disabled, please change the multiprocessing'+\n ' start method, via: multiprocessing.set_start_method(<method>)'+\n ' Possible methods include: fork, spawn, forkserver, None'\n ) \n\n return pool, max_pool_size", "title": "" }, { "docid": "0b451a84d972ee2bfc010f208dc20c35", "score": "0.62491995", "text": "def b2Max(*args):\n return _Box2D.b2Max(*args)", "title": "" }, { "docid": "9cb14a9cd399b962f2220479f45985bb", "score": "0.622865", "text": "def max_pool_forward_fast(x, pool_param):\n N, C, H, W = x.shape\n pool_height, pool_width = pool_param['pool_height'], pool_param['pool_width']\n stride = pool_param['stride']\n\n same_size = pool_height == pool_width == stride\n tiles = H % pool_height == 0 and W % pool_width == 0\n if same_size and tiles:\n out, reshape_cache = max_pool_forward_reshape(x, pool_param)\n cache = ('reshape', reshape_cache)\n else:\n out, im2col_cache = max_pool_forward_im2col(x, pool_param)\n cache = ('im2col', im2col_cache)\n return out, cache", "title": "" }, { "docid": "368c117cea9d2824d24053f1e1d2320a", "score": "0.62210965", "text": "def build_convpool_max(input_vars, nb_classes, imsize=32, n_colors=3, n_timewin=3):\n convnets = []\n w_init = None\n\n print('Build max')\n print('inputvar shape', input_vars.shape)\n # Build 7 parallel CNNs with shared weights\n for i in range(n_timewin):\n if i == 0:\n print(\"inputvar:\", input_vars[:, i].shape)\n convnet, w_init = build_cnn(input_vars[:, i], imsize=imsize, n_colors=n_colors)\n else:\n convnet, _ = build_cnn(input_vars[:, i], w_init=w_init, imsize=imsize, n_colors=n_colors)\n print(i, ':', convnet.shape) #7, 4, 4, 128\n convnets.append(tf.contrib.layers.flatten(convnet))\n\n convpool = tf.add(tf.matmul(convpool, weight_variable([n_filter, 512])), bias_variable([512]))\n convpool = tf.nn.relu(convpool)\n convpool = tf.nn.dropout(convpool, 0.5)\n\n convpool = tf.add(tf.matmul(convpool, weight_variable([512, nb_classes])), bias_variable([nb_classes]), name='network')", "title": "" }, { "docid": "dc6edbce3a8170e952998eb4df249564", "score": "0.6192415", "text": "def adaptive_max_pool1d(input, output_size):\n if not isinstance(input, (Tensor, Tensor_)):\n raise TypeError(\"For adaptive_max_pool1d, the input input must be tensor\")\n\n _check_adaptive_max_pool1d_output_size(output_size)\n\n x_in_shape = input.shape\n x_dtype = _get_cache_prim(P.DType)()(input)\n\n if len(x_in_shape) != 3:\n raise ValueError(\"For adaptive_max_pool1d input must have 3 dim, but got {}.\".format(len(x_in_shape)))\n if x_in_shape[2] < output_size:\n raise ValueError(\"For adaptive_max_pool1d input's last dimension must be greater or equal to \"\n \"output size {}, but got {}.\".format(output_size, x_in_shape[2]))\n if x_in_shape[2] % output_size != 0:\n raise ValueError(\"For adaptive_max_pool1d input's last dimension must be divisible by \"\n \"output size {}, but got {}.\".format(output_size, x_in_shape[2]))\n if is_ascend_backend():\n if x_dtype not in [mstype.float16]:\n raise TypeError(\"For adaptive_max_pool1d in Ascend platform, the input dtype must be float16, \"\n \"but got {}.\".format(x_dtype))\n else:\n if x_dtype not in [mstype.float16, mstype.float32]:\n raise TypeError(\"For adaptive_max_pool1d, the input dtype must be float16 or float32, \"\n \"but got {}.\".format(x_dtype))\n\n expand_ = _get_cache_prim(P.ExpandDims)()\n squeeze_ = _get_cache_prim(P.Squeeze)(2)\n\n width = x_in_shape[2]\n stride = width // output_size\n kernel_size = width - (output_size - 1) * stride\n stride = (1, width // output_size)\n kernel_size = (1, kernel_size)\n\n max_pool_ = _get_cache_prim(NN_OPS.MaxPool)(kernel_size=kernel_size, strides=stride)\n\n input = expand_(input, 2)\n input = max_pool_(input)\n input = squeeze_(input)\n\n return input", "title": "" }, { "docid": "644f2292d2d35c3a2184bec992becdc3", "score": "0.61480266", "text": "def maxpool2D(x, ksize, strides, padding, data_format):\n x_shape = x.get_shape().as_list()\n x_shape = [s if isinstance(s, int) else -1 for s in x_shape]\n \n # Flatten matrix in first dimensions if necessary (join samples and sequence positions)\n if len(x_shape) > 4:\n if x_shape[0] == -1:\n x_flat = tf.reshape(x, [-1] + x_shape[2:])\n else:\n x_flat = tf.reshape(x, [x_shape[0] * x_shape[1]] + x_shape[2:])\n maxpool = tf.nn.max_pool(x_flat, ksize=ksize, strides=strides, padding=padding, data_format=data_format)\n maxpool = tf.reshape(maxpool, x_shape[:2] + maxpool.get_shape().as_list()[1:])\n else:\n maxpool = tf.nn.max_pool(x, ksize=ksize, strides=strides, padding=padding, data_format=data_format)\n return maxpool", "title": "" }, { "docid": "4d89102c3bd38d7cc8a4cbcabb9f5f8e", "score": "0.6106335", "text": "def spatial_pyramid_pooling(input_, output_size):\n assert input_.dim() == 4 and input_.size(2) == input_.size(3)\n kernel_size = input_.size(2) // output_size\n padding = 0\n if input_.size(2) // kernel_size > output_size:\n kernel_size += 1\n padding = 1\n return max_pool2d(input_, kernel_size=kernel_size, padding=padding)", "title": "" }, { "docid": "5005e74cd32830c20d98c683c400ac6a", "score": "0.6057674", "text": "def max_pool2d_same(x, kernel_size, stride, padding=(0, 0), dilation=(1, 1), ceil_mode=False):\n kernel_size = (kernel_size, kernel_size) if isinstance(kernel_size, int) else kernel_size\n stride = (stride, stride) if isinstance(stride, int) else stride\n\n x, _= pad_same(x, kernel_size, stride, value=-float('inf'))\n return F.max_pool2d(x, kernel_size, stride, padding, dilation, ceil_mode)", "title": "" }, { "docid": "20c99df5e3ba484ab6b82d5190577630", "score": "0.60235107", "text": "def max_pooling2d(inputs,\n pool_size, strides,\n padding='valid', data_format='channels_last',\n name=None):\n warnings.warn('`tf.layers.max_pooling2d` is deprecated and '\n 'will be removed in a future version. '\n 'Please use `tf.keras.layers.MaxPooling2D` instead.')\n layer = MaxPooling2D(pool_size=pool_size, strides=strides,\n padding=padding, data_format=data_format,\n name=name)\n return layer.apply(inputs)", "title": "" } ]
7b35124295eb845671f544af3f04f9eb
Instantiates the robot the scene. Loads the URDF, sets initial state of parameters, joints, motors, etc...
[ { "docid": "dde9025e422c7b9f345ab0219d94551c", "score": "0.0", "text": "def reconfigure(self) -> None:\n Manipulator.reconfigure(self)\n ArticulatedAgentBase.reconfigure(self)", "title": "" } ]
[ { "docid": "95dd577585073e05235fdae44c273186", "score": "0.68957335", "text": "def __init__(self):\n\n \trospy.init_node(\"robot\") # start node\n\tself.path = None\n\tself.path_changed = False\n \t#rospy.init_node('RobotControl', anonymous=True)\n\tself.reset_odam()\n\t## Sets up the cmd_vel publisher and odem_try subscriber and the subscriber for goal setting up in rviz\n\tself.pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10)\n\tself.sub = rospy.Subscriber('/odom', Odometry, self.odom_callback)\n\tself.robot_sub = rospy.Subscriber('/robot_path', Path, self.path_callback)\n\n\t## Variables for storing the robot moving state\n\tself.nav_state = False\n\tself.goal_msg = None\n\tself.once = False\n\tself.tf_listener = tf.TransformListener()", "title": "" }, { "docid": "6e422c65563f958a3838e719dc785247", "score": "0.68581796", "text": "def __init__(self, init_pos, step=0.01, is_gui=True):\n # connect the client\n if is_gui:\n self.physicsClient = p.connect(p.GUI, options=\"--opengl3\")\n else:\n self.physicsClient = p.connect(p.DIRECT)\n\n # add the ground into the simulation environment\n p.setAdditionalSearchPath(pybullet_data.getDataPath()) # used by loadURDF to load the plane\n self.planeId = p.loadURDF(\"plane.urdf\")\n p.setAdditionalSearchPath(os.getcwd())\n self.biped_robot = p.loadURDF(\"Real_robot.urdf\", init_pos, p.getQuaternionFromEuler([0, 0, 0]))\n # self.biped_robot = p.loadURDF(\"biped_robot_mirror.urdf\", init_pos, p.getQuaternionFromEuler([0, 0, 0]))\n p.setGravity(0, 0, -10) # set the gravity of the simulation environment\n self.step = step # set the time step, the default value is 0.01s\n p.setTimeStep(self.step)\n self.index = 0 # index increase every time simulation, and if index == frequency, clear to zero\n self.init_pos = init_pos", "title": "" }, { "docid": "030397cb32ab5c95c290254b49f08dd1", "score": "0.68161136", "text": "def __init__(self):\n self.pub_tf = rospy.Publisher(\"/tf\", tf.msg.tfMessage, queue_size=1)\n\n #Loads the robot model, which contains the robot's kinematics information\n self.robot = URDF.from_parameter_server()\n\n #Subscribes to information about what the current joint values are.\n rospy.Subscriber(\"joint_states\", JointState, self.callback)", "title": "" }, { "docid": "1bab9fb33b9b72ee8fe9bc92cd36e6ab", "score": "0.6781615", "text": "def robotInit(self):\n self.initMotors()\n self.initSensors()", "title": "" }, { "docid": "8601d3b9c1dd1896fee197bb15c2c34a", "score": "0.6736801", "text": "def init_scene():\n global scene\n # Create a scene\n scene = Scene(\n ambient=(0.05, 0.05, 0.05, 1.0),\n )\n\n light = Light(\n pos=(50, 10, -50),\n colour=(1.0, 1.0, 1.0, 1.0),\n intensity=0.6,\n falloff=0\n )\n\n scene.add(light)\n\n for (x, y), mat in zip(product((-6, 6), (-6, 6)), materials):\n if mat:\n model = robot_model.copy()\n model.meshes[1].material = mat\n else:\n model = robot_model\n\n robot = ModelNode(\n model,\n pos=Point3(x, y, 0)\n )\n scene.add(robot)\n robots.append(robot)", "title": "" }, { "docid": "471fe64f777105c08ab1d2ecfd66f3e9", "score": "0.6712373", "text": "def robotInit(self):\n\n self.drivetrain = DriveTrain(self)\n self.elevator = Elevator(self)\n self.wrist = Wrist(self)\n self.claw = Claw()\n self.oi = OI(self)\n\n # instantiate the command used for the autonomous period\n self.autonomousCommand = Autonomous(self)\n\n # Show what command your subsystem is running on the SmartDashboard\n wpilib.SmartDashboard.putData(self.drivetrain)\n wpilib.SmartDashboard.putData(self.elevator)\n wpilib.SmartDashboard.putData(self.wrist)\n wpilib.SmartDashboard.putData(self.claw)\n\n wpilib.LiveWindow.getInstance().setEnabled(True)", "title": "" }, { "docid": "13d1f70871e9836327f6efe9629b82bb", "score": "0.6704961", "text": "def initialize_robot(self):\r\n\r\n self.stop_interfaces()\r\n self.actuators = Actuators(self.configuration.actuators)\r\n self.sensors = Sensors(self.configuration.sensors)\r\n self.brains = Brains(self.sensors, self.actuators, self.configuration.brain_path, self.controller)\r\n self.__wait_gazebo()", "title": "" }, { "docid": "f77b2babd1fc160ba48f9897b51a63bb", "score": "0.6604403", "text": "def robotInit(self):\n\n # Initialize Joystick\n self.controller = Joystick(Values.CONTROLLER_ID)\n\n # Initialize Drive Sub-System\n self.drive = FroboDrive(self, Values.DRIVE_LEFT_MAIN_ID, Values.DRIVE_LEFT_SLAVE_ID, Values.DRIVE_RIGHT_MAIN_ID, Values.DRIVE_RIGHT_SLAVE_ID)\n\n # Initialize Shooter Sub-System\n self.compressor = wpilib.Compressor()\n self.shooter = Shooter(self, Values.SHOOT_FRONT_ID, Values.SHOOT_BACK_ID, Values.SHOOT_SOLENOID_FORWARD_CHANNEL_ID, Values.SHOOT_SOLENOID_REVERSE_CHANNEL_ID)", "title": "" }, { "docid": "65c8ba211bf5d339e2af0aa342bdebbc", "score": "0.65009063", "text": "def initialize(self):\n\n self.sensor_dict['film']['width'] = self.res\n self.sensor_dict['film']['height'] = self.res\n self.scene_dict['sensor'] = self.sensor_dict\n\n @fresolver_append_path\n def create_scene():\n return mi.load_dict(self.scene_dict)\n self.scene = create_scene()\n self.params = mi.traverse(self.scene)\n\n if hasattr(self, 'key'):\n self.params.keep([self.key])\n self.initial_state = type(self.params[self.key])(self.params[self.key])", "title": "" }, { "docid": "44ed730fd4e62acf7ef5c4651016e95e", "score": "0.64798707", "text": "def __init__(self,robot_params):\n\n #Kinematic information\n self.position = np.zeros(2)\n self.rotation = 0.0\n self.velocity = np.zeros(2)\n \n #Timer can be used to limit the rate at which of control loop executes\n self.timer = 0 \n self.robot_state = 0\n\n self.robot_params = robot_params\n\n\n #These are assigned by the world class\n self.robot_index = None\n self.bin_index = None", "title": "" }, { "docid": "d487640475009c156872283517e5ddac", "score": "0.64760166", "text": "def __init__(self):\n self.robots = []\n self.robotsByName = {}\n self.world = []\n self.time = 0.0\n self.timeslice = 100 # in milliseconds\n self.lightAboveWalls = 0\n self.properties = [\"stall\", \"x\", \"y\", \"th\", \"thr\", \"energy\"]\n # connections to pyrobot:\n self.ports = []\n self.assoc = {}\n self.done = 0\n self.stepCount = 0\n self.running = 0\n self.lights = []\n self.shapes = []", "title": "" }, { "docid": "829623727060594ada0ae300bea27ccd", "score": "0.64748263", "text": "def robotInit(self):\n\n self.drive_motor1 = wpilib.Talon(0) # <--- or whatever motor controller you are using\n self.drive_motor2 = wpilib.Talon(1)\n\n self.robot_drive = wpilib.RobotDrive(self.drive_motor1, self.drive_motor2) # <--- says to robot that these motors work together to drive robot\n\n self.xboxController = wpilib.Joystick(0) # <--- joystick, does not have to be an xbox controller\n\n self.components = { # Add all the objects you are going to want in autonomous like sensors, the robot drive, etc.\n 'drive': self.robot_drive #give it a nickname as well. In this case, we \"nicknamed\" self.robot_drive as 'drive' so in auto you will do self.drive\n }\n\n self.automodes = AutonomousModeSelector('auto-modes', self.components) #pass in the folder with all your auto modes and the components you want in auto", "title": "" }, { "docid": "3d074b71cd2df9e8e96397c8e4bf4195", "score": "0.6452325", "text": "def setup(self):\n # This is abstracted out from '__init__' because we need to do this\n # first time 'render' is called\n self.log.debug(\"Setting up simulation environment\")\n pyb.resetSimulation()\n pyb.setGravity(0, 0, -9.81)\n # Extract time step for sleep during rendering\n self.dt = pyb.getPhysicsEngineParameters()['fixedTimeStep']\n # Load ground plane for robots to walk on\n self.log.debug(\"Loading ground plane\")\n self.plane_id = pyb.loadURDF('plane/plane.urdf')\n assert self.plane_id >= 0, \"Could not load 'plane.urdf'\"\n self.log.debug(\"Gym environment setup complete\")", "title": "" }, { "docid": "f44be213f4060c94830b7ae394d57115", "score": "0.64304525", "text": "def __init__(self):\n\t\trospy.init_node('robot_location_server')\n\t\trospy.Service('robot_locator', RobotLocator, self.handle)\n\t\t# Total number of robots on the dance floor.\n\t\tself.n = rospy.get_param('total_robot_n')\n\t\t# Used to store the pose of each robot.\n\t\tself.robot_poses = {\"sphero\"+str(i+1): [0,0] for i in range(self.n)}\n\t\t# Used to store the transform of each robot.\n\t\tself.robot_transforms = {\"sphero\"+str(i+1): [0,0] for i in range(self.n)}\n\t\t# Create a pose subscriber for each robot.\n\t\tfor i in range(self.n):\n\t\t\tname = 'sphero'+str(i+1)\n\t\t\trospy.Subscriber(name + '/odom', Odometry, self.pose_callback, name)\n\t\t\trospy.Subscriber(name + '/transform', Vector3, self.transform_callback, name)\n\t\t# Print a ready message and then spin forever.\n\t\tprint \"Ready to locate robots.\"\n\t\trospy.spin()", "title": "" }, { "docid": "765b952517158a8ba6ff33145d5a02cf", "score": "0.638082", "text": "def init_ros_node(self):\n rospy.init_node(\"corobot_manager\")\n rospy.Subscriber(\"pose\", Pose, self.pose_callback)\n rospy.Subscriber(\"goals_reached\", Point, self.goals_reached_callback)\n rospy.Subscriber(\"goals_failed\", Point, self.goals_failed_callback)\n rospy.Subscriber(\"confirm_msg\", UIConfirm, self.confirm_ui_callback)\n rospy.Subscriber(\"diagnostics\", DiagnosticArray, self.diagnostics_callback)\n rospy.Subscriber(\"goals_nav\", Point, self.goals_callback)\n rospy.wait_for_service(\"get_landmark\")\n self.get_landmark = rospy.ServiceProxy(\"get_landmark\", GetLandmark)\n\t\"\"\"\n\t#Tristan-WebcamServices\n\trospy.wait_for_service(\"WebcamService\")\n\tself.webcam_service = rospy.ServiceProxy(\"WebcamService\", WebcamService)\n\t#!Tristan\n\t\"\"\"\n\tself.recov = False\n self.goals_pub = rospy.Publisher(\"goals\", Point,queue_size=10)\n self.goals_nav_pub = rospy.Publisher(\"goals_nav\", Point, queue_size=10)\n self.show_msgs_pub = rospy.Publisher(\"show_msg\", UIMessage, queue_size=10)\n rospy.loginfo(\"Listening for client robots.\")\n rospy.on_shutdown(self.shutdown)", "title": "" }, { "docid": "0c0cdf09491fff87459284b10e56110a", "score": "0.6329976", "text": "def robotInit(self):\r\n self.frontLeft = wpilib.Talon(2)\r\n self.rearLeft = wpilib.Talon(1)\r\n self.left = wpilib.SpeedControllerGroup(self.frontLeft, self.rearLeft)\r\n\r\n self.frontRight = wpilib.Talon(4)\r\n self.rearRight = wpilib.Talon(3)\r\n self.right = wpilib.SpeedControllerGroup(self.frontRight, self.rearRight)\r\n\r\n self.drive = wpilib.drive.DifferentialDrive(self.left, self.right)\r\n self.drive.setSafetyEnabled(False)\r\n self.Assistant = wpilib.Joystick(0)\r\n self.Driver = wpilib.Joystick(1)\r\n self.timer = wpilib.Timer()\r\n self.grabberLeft = wpilib.Victor(7)\r\n self.grabberRight = wpilib.Victor(6)\r\n self.elevatorLeft = wpilib.Victor(9)\r\n self.elevatorRight = wpilib.Victor(8)\r\n self.myCompressor = wpilib.Compressor(0)\r\n #self.toggle = toggle(self.Assistant, 6)\r", "title": "" }, { "docid": "5b01e6e62638316f2bba985d8ec77914", "score": "0.63021934", "text": "def __init__(self):\n\n self._current = Pose() # initlize correctly\n \"\"\"\n self._odom_list.waitForTransform('/odom', '/base_link', rospy.Time(0), rospy.Duration(1.0))\n (position, orientation) = self._odom_list.lookupTransform('/odom','/base_link', rospy.Time(0))\n \"\"\"\n self._current.position.x = 0 #!FIXME Do we need this?\n self._current.position.y = 0\n self._current.orientation.x = 0\n self._current.orientation.y = 0\n self._current.orientation.z = 0\n self._current.orientation.w = 0 #!FIXME Do we need this?\n self._odom_list = tf.TransformListener()\n rospy.Timer(rospy.Duration(.1), self.timerCallback)\n self._vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)\n rospy.Subscriber('/move_base_simple/goal', PoseStamped, self.navToPose, queue_size=1) # handle nav goal events\n rospy.sleep(0.2)\n self.yaw = 0 #!FIXME Do we need this?\n self.pitch = 0\n self.roll = 0 #!FIXME Do we need this?", "title": "" }, { "docid": "7bbb8b396df0b0c6ffbd94680ed08470", "score": "0.63008624", "text": "def robotInit(self):\n \n self.lstick = wpilib.Joystick(0)\n self.encoder=wpilib.Encoder(0,1)\n self.encoderCascada = wpilib.Encoder(2,3)\n\n self.motordedo = wpilib.Spark(2)\n self.motorPelotas = wpilib.Spark(1)\n\n self.l_motor_del = wpilib.VictorSP(9)\n self.r_motor_del = wpilib.VictorSP(7)\n self.l_motor_tras = wpilib.VictorSP(5)\n self.r_motor_tras = wpilib.VictorSP(6)\n self.l_motor=wpilib.SpeedControllerGroup(self.l_motor_del,self.l_motor_tras)\n self.r_motor=wpilib.SpeedControllerGroup(self.r_motor_del,self.r_motor_tras)\n\n self.robot_drive = wpilib.drive.DifferentialDrive(self.l_motor, self.r_motor)\n\n self.servomotor=wpilib.Servo(8)\n self.valvula=wpilib.DoubleSolenoid(0,7)\n\n self.motorcascada1 = wpilib.Spark(2)\n self.motorcascada2 = wpilib.Spark(3)\n self.cascada = wpilib.SpeedControllerGroup(self.motorcascada1,self.motorcascada2)", "title": "" }, { "docid": "fbec0e930e80addd56daf48dc606e926", "score": "0.62832654", "text": "def _setup_scene(self, scene_config: Optional[sapien.SceneConfig] = None):\n if scene_config is None:\n scene_config = self._get_default_scene_config()\n self._scene = self._engine.create_scene(scene_config)\n self._scene.set_timestep(1.0 / self._sim_freq)", "title": "" }, { "docid": "48cd3099b2cc49a73605d4c3a268eca2", "score": "0.6262875", "text": "def build(self):\n target_pos = gen_start_position(.25, self.world.floor) + [.25]\n car_pos = gen_start_position(.3, self.world.floor) + [.25]\n self.targetUniqueId = self.world.create_shape(pybullet.GEOM_BOX, target_pos, size=0.2, color=self.color)\n #self.targetUniqueId = self.physics.loadURDF(os.path.join(self.urdf_root, \"target.urdf\"), target_pos)\n\n config = {\n 'power': 20,\n 'resolution': 1,\n 'is_discrete': False,\n 'target_pos': target_pos,\n 'initial_pos': car_pos\n }\n\n self.robot = Turtlebot(self.physics, config=config)\n self.robot.set_position(car_pos)\n #pybullet.configureDebugVisualizer(pybullet.COV_ENABLE_RENDERING, 0)\n\n for i in range(10):\n self.physics.stepSimulation()", "title": "" }, { "docid": "d0d56756704145335b0283c8dc978837", "score": "0.62539154", "text": "def initialize():\n global pub\n\n # Provide a name for the node\n rospy.init_node(\"explore\", anonymous=True)\n\n # Give some feedback in the terminal\n rospy.loginfo(\"Exploration node initialization\")\n\n # Subscribe to and synchronise the infra-red sensors in front of the robot\n ir_front_left = message_filters.Subscriber(\"ir_front_left\", Range)\n ir_front_right = message_filters.Subscriber(\"ir_front_right\", Range)\n ir_front_left_center = message_filters.Subscriber(\n \"ir_front_left_center\", Range)\n ir_front_right_center = message_filters.Subscriber(\n \"ir_front_right_center\", Range)\n # Wait for all topics to arrive before calling the callback\n ts_ir_front = message_filters.TimeSynchronizer([\n ir_front_left,\n ir_front_left_center,\n ir_front_right_center,\n ir_front_right], 1)\n # Register the callback to be called when all sensor readings are ready\n ts_ir_front.registerCallback(process_ir_front)\n\n # Publish the linear and angular velocities so the robot can move\n pub = rospy.Publisher(\"cmd_vel\", Twist, queue_size=1)\n\n # Register the callback for when the node is stopped\n rospy.on_shutdown(stop_robot)\n\n # spin() keeps python from exiting until this node is stopped\n rospy.spin()", "title": "" }, { "docid": "b28320550d10897bdb0dcd198b8fe8fe", "score": "0.62511814", "text": "def load_robot(self):\n\t\tself.robot = Robot()\n\t\tself.robot_sprite = pygame.sprite.RenderPlain((self.robot))", "title": "" }, { "docid": "4203154e144d565a97a9a6e9bea917ea", "score": "0.6229118", "text": "def __init__(self, seed=None):\n\n self.robot_controller = RddaUr5ControlClient()\n\n # UR5e\n self.robot_start_pos_x = 0.6\n self.robot_start_pos_y = 0\n self.robot_start_pos_z = 0.2\n self.robot_velocity_high = 0.1\n self.robot_velocity_low = 0.02\n\n # RDDA\n self.fixed_stiffness = 1\n self.low_stiffness = 0.2\n self.high_stiffness = 4\n self.theta_baseline = 0\n\n # X range\n self.y_origin = -0.32\n self.y_left_limit = 0\n self.y_right_limit = 14\n self.y_hardware_right_limit = 10\n self.num_y_slots = self.y_right_limit + 1\n self.size_scale = 0.08 # 1 unit size in the code means 0.08 meter in reality\n\n # Bumps\n self.y_bump1 = None\n self.y_bump2 = None\n self.min_bump_distance = 2\n self.max_bump_distance = int(self.y_right_limit / 2)\n self.y_bump1_limit_min = 2\n self.y_bump2_limit_min = self.y_bump1_limit_min + self.min_bump_distance\n self.y_bump2_limit_max = self.y_right_limit - 2\n self.y_bump1_limit_max = self.y_bump2_limit_max - self.min_bump_distance\n\n # Theta\n self.FINGER_POINT_RIGHT = THETA_RIGHT\n self.FINGER_POINT_DOWN = THETA_NEUTRAL\n self.FINGER_POINT_LEFT = THETA_LEFT\n self.max_theta = THETA_RIGHT\n self.angle_str = {THETA_LEFT: 't_left', THETA_NEUTRAL: 't_neutral', THETA_RIGHT: 't_right'}\n\n # Action\n self.action_space = spaces.Discrete(4)\n self.norm_action = self.action_space.n - 1\n self.action_str = {ACT_LEFT_SOFT: 'Left&Soft', ACT_LEFT_HARD: 'Left&Hard', ACT_RIGHT_SOFT: 'Right&Soft',\n ACT_RIGHT_HARD: 'Right&Hard'}\n\n # States and Obs\n self.discount = 1.0\n self.observation_space = spaces.Box(low=-float('inf'), high=float('inf'), shape=(3,), dtype=np.float32)\n self.y_g_left_limit = self.y_left_limit\n self.y_g_right_limit = self.y_right_limit\n self.y_g = None\n self.theta = self.FINGER_POINT_DOWN\n self.start_state = None\n\n # Belief\n self.belief = np.zeros((self.num_y_slots, self.num_y_slots))\n\n # numpy random\n self.np_random = None\n self.seed(seed)\n self.action = None", "title": "" }, { "docid": "4b2ce7e1c7f526b97f733700b8b134ae", "score": "0.62194717", "text": "def robotInit(self):\r\n self.robot_drive = wpilib.RobotDrive(0,1,2,3)\r\n\r\n self.stick = wpilib.Joystick(0)\r\n self.elevator_stick = wpilib.Joystick(1)\r\n\r\n #self.accelerometer = wpili.BuiltInAccelerometer()\r\n\r\n '''self.talon_0 = wpilib.Talon(0)\r\n self.talon_1 = wpilib.Talon(1)\r\n self.talon_2 = wpilib.Talon(2)\r\n self.talon_3 = wpilib.Talon(3)'''\r\n\r\n self.elevator_jag = wpilib.CANJaguar(1)\r\n\r\n # self.elevator_jag.setPositionModeQuadEncoder(360,80,0.000,4)\r\n self.elevator_jag.setPercentModeQuadEncoder(360)\r\n self.elevator_jag.enableControl()", "title": "" }, { "docid": "ef687045c0fb61c1bf4a74c4ca76985d", "score": "0.6179697", "text": "def init_scene(self):\r\n self.scene = Scene()\r\n self.initial_scene()", "title": "" }, { "docid": "866c3e0a80196c5cef85f4882e166851", "score": "0.6168282", "text": "def __create_robot(self,genome,base_pos=[0,0,0]):\n global touch_logging,log_body_touching\n\n # Joint Power\n BACK_FORCE = genome.max_forces[0]\n RH_FORCE = genome.max_forces[1]\n RK_FORCE = genome.max_forces[2]\n RA_FORCE = genome.max_forces[3]\n RT_FORCE = genome.max_forces[4]\n FH_FORCE = genome.max_forces[5]\n FK_FORCE = genome.max_forces[6]\n T_FORCE = genome.max_forces[7]\n H_FORCE = genome.max_forces[8]\n\n # Flexibility for the joints.\n ERP = genome.erp\n\n CFM = genome.cfm\n\n joint_range = 1.0\n\n joint_ranges = genome.joint_ranges\n\n height = 0.75*genome.bf # Scale up by the genome base factor.\n\n # Torso body dimensions\n body_seg_dims = genome.body_dimensions[0:3]\n\n # Store main body dimensions for flipped checking.\n self.main_body_dimensions = [i for i in body_seg_dims[1]]\n\n # Torso body positions\n body_seg_pos = [\n [-body_seg_dims[1][0]/2.-body_seg_dims[0][0]/2.,height,0.], # Rear Segment\n [0,height,0.], # Mid Segment\n [body_seg_dims[1][0]/2.+body_seg_dims[2][0]/2.,height,0.] # Front Segment\n ]\n\n # Masses of the segments.\n body_masses = genome.body_masses\n\n # Leg Dimensions \n leg_dims = genome.body_dimensions[3:]\n\n # Leg Rotations\n leg_rotations = [\n genome.body_rotations[0], # Rear Upper Legs\n genome.body_rotations[1], # Rear Mid Legs\n genome.body_rotations[2], # Rear Low Legs\n genome.body_rotations[3], # Rear Feet\n genome.body_rotations[4], # Front Upper Legs\n genome.body_rotations[5], # Front Lower Legs\n ]\n\n # Body segement and upper leg joint positions\n fudge_factor = 0.0001 # Put the legs slightly outside the body, helps with stability.\n body_seg_joint_pos = [\n [-body_seg_dims[1][0]/2.,height,0.], # Rear - Mid Connection\n [body_seg_dims[1][0]/2.,height,0.], # Mid - Front Connection\n [body_seg_pos[0][0]-body_seg_dims[0][0]/4., height, body_seg_dims[0][2]/2.+leg_dims[0][1]+fudge_factor], # Rear Upper Left Leg\n [body_seg_pos[0][0]-body_seg_dims[0][0]/4., height, -body_seg_dims[0][2]/2.-leg_dims[0][1]-fudge_factor], # Rear Upper Right Leg\n [body_seg_pos[2][0]+body_seg_dims[0][0]/4., height, body_seg_dims[2][2]/2.+leg_dims[4][1]+fudge_factor], # Front Upper Left Leg\n [body_seg_pos[2][0]+body_seg_dims[0][0]/4., height, -body_seg_dims[2][2]/2.-leg_dims[4][1]-fudge_factor], # Front Upper Right Leg\n [body_seg_pos[0][0]-body_seg_dims[0][0]/2., height, 0.], # Tail Connection\n [body_seg_pos[2][0]+body_seg_dims[2][0]/2., height, 0.], # Head Connection\n ]\n\n # Tail Rotations\n tail_rotations = [\n genome.body_rotations[6], # Base of Tail\n genome.body_rotations[7], # Mid Tail\n genome.body_rotations[8], # End Tail\n ]\n\n # Tail Dimensions\n tail_dimensions = genome.tail_dimensions\n\n # Head Dimensions\n head_dims = genome.head_dimensions\n\n # Head Rotations\n head_rotations = [90,45,0]\n\n # Set the reference point for the MOI pivot.\n self.ref_moi_pivot = [-body_seg_dims[0][0]/4.,0.,0.]\n \n # Main Body (3 sections\n self.man.create_box(0,body_seg_dims[0],body_seg_pos[0],density=body_masses[0],mass_flag=mass_flag_fix) # Rear Segment\n self.man.create_box(1,body_seg_dims[1],body_seg_pos[1],density=body_masses[1],mass_flag=mass_flag_fix) # Mid Segment\n self.man.create_box(2,body_seg_dims[2],body_seg_pos[2],density=body_masses[2],mass_flag=mass_flag_fix) # Front Segment\n\n self.man.create_universal(0,body_seg_joint_pos[0],[0,1],axis1=[0,0,1],axis2=[0,1,0],loStop1=-joint_range,hiStop1=joint_range,loStop2=-joint_range,hiStop2=joint_range,fmax=BACK_FORCE[0],fmax2=BACK_FORCE[1])\n self.man.create_universal(1,body_seg_joint_pos[1],[1,2],axis1=[0,0,1],axis2=[0,1,0],loStop1=-joint_range,hiStop1=joint_range,loStop2=-joint_range,hiStop2=joint_range,fmax=BACK_FORCE[0],fmax2=BACK_FORCE[1])\n\n # Upper Legs\n self.man.create_capsule(3, body_masses[3], leg_dims[0][0], leg_dims[0][1],[0,0,0],rot=[0.,0.,0.],mass_flag=mass_flag_fix)\n Placement.place_capsule_at_trans(self.man.bodies[3],pos=body_seg_joint_pos[2],rot=leg_rotations[0])\n self.man.create_flexible_universal(2, body_seg_joint_pos[2],[0,3],axis1=[1,0,0],axis2=[0,0,1],loStop1=joint_ranges[2][1][0],hiStop1=joint_ranges[2][1][1],loStop2=joint_ranges[2][0][0],hiStop2=joint_ranges[2][0][1],fmax=RH_FORCE[0],fmax2=RH_FORCE[1],erp1=ERP[1][0],erp2=ERP[1][1],cfm1=CFM[1][0],cfm2=CFM[1][1])\n self.man.create_capsule(4, body_masses[3], leg_dims[0][0], leg_dims[0][1],[0,0,0],rot=[0.,0.,0.],mass_flag=mass_flag_fix)\n Placement.place_capsule_at_trans(self.man.bodies[4],pos=body_seg_joint_pos[3],rot=leg_rotations[0])\n self.man.create_flexible_universal(3, body_seg_joint_pos[3],[0,4],axis1=[1,0,0],axis2=[0,0,1],loStop1=joint_ranges[2][1][0],hiStop1=joint_ranges[2][1][1],loStop2=joint_ranges[2][0][0],hiStop2=joint_ranges[2][0][1],fmax=RH_FORCE[0],fmax2=RH_FORCE[1],erp1=ERP[1][0],erp2=ERP[1][1],cfm1=CFM[1][0],cfm2=CFM[1][1])\n\n # Mid Legs\n self.man.create_capsule(5, body_masses[4], leg_dims[1][0], leg_dims[1][1],[0,0,0],rot=[0.,0.,0.],mass_flag=mass_flag_fix)\n j_loc = Placement.place_capsule_trans(self.man.bodies[3],self.man.bodies[5],rot=leg_rotations[1]) \n self.man.create_flexible_universal(4, j_loc,[3,5],axis1=[1,0,0],axis2=[0,0,1],loStop1=joint_ranges[3][1][0],hiStop1=joint_ranges[3][1][1],loStop2=joint_ranges[3][0][0],hiStop2=joint_ranges[3][0][1],fmax=RK_FORCE[0],fmax2=RK_FORCE[1],erp1=ERP[2][0],erp2=ERP[2][1],cfm1=CFM[2][0],cfm2=CFM[2][1])\n self.man.create_capsule(6, body_masses[4], leg_dims[1][0], leg_dims[1][1],[0,0,0],rot=[0.,0.,0.],mass_flag=mass_flag_fix)\n j_loc = Placement.place_capsule_trans(self.man.bodies[4],self.man.bodies[6],rot=leg_rotations[1]) \n self.man.create_flexible_universal(5, j_loc,[4,6],axis1=[1,0,0],axis2=[0,0,1],loStop1=joint_ranges[3][1][0],hiStop1=joint_ranges[3][1][1],loStop2=joint_ranges[3][0][0],hiStop2=joint_ranges[3][0][1],fmax=RK_FORCE[0],fmax2=RK_FORCE[1],erp1=ERP[2][0],erp2=ERP[2][1],cfm1=CFM[2][0],cfm2=CFM[2][1])\n\n # Low Legs\n self.man.create_capsule(7, body_masses[5], leg_dims[2][0], leg_dims[2][1],[0,0,0],rot=[0.,0.,0.],mass_flag=mass_flag_fix)\n j_loc = Placement.place_capsule_trans(self.man.bodies[5],self.man.bodies[7],rot=leg_rotations[2]) \n self.man.create_flexible_universal(6, j_loc,[5,7],axis1=[1,0,0],axis2=[0,0,1],loStop1=joint_ranges[4][1][0],hiStop1=joint_ranges[4][1][1],loStop2=joint_ranges[4][0][0],hiStop2=joint_ranges[4][0][1],fmax=RA_FORCE[0],fmax2=RA_FORCE[1],erp1=ERP[3][0],erp2=ERP[3][1],cfm1=CFM[3][0],cfm2=CFM[3][1])\n self.man.create_capsule(8, body_masses[5], leg_dims[2][0], leg_dims[2][1],[0,0,0],rot=[0.,0.,0.],mass_flag=mass_flag_fix)\n j_loc = Placement.place_capsule_trans(self.man.bodies[6],self.man.bodies[8],rot=leg_rotations[2]) \n self.man.create_flexible_universal(7, j_loc,[6,8],axis1=[1,0,0],axis2=[0,0,1],loStop1=joint_ranges[4][1][0],hiStop1=joint_ranges[4][1][1],loStop2=joint_ranges[4][0][0],hiStop2=joint_ranges[4][0][1],fmax=RA_FORCE[0],fmax2=RA_FORCE[1],erp1=ERP[3][0],erp2=ERP[3][1],cfm1=CFM[3][0],cfm2=CFM[3][1])\n\n # Feet \n self.man.create_capsule(9, body_masses[6], leg_dims[3][0], leg_dims[3][1],[0,0,0],rot=[0.,0.,0.],mass_flag=mass_flag_fix)\n j_loc = Placement.place_capsule_trans(self.man.bodies[7],self.man.bodies[9],rot=leg_rotations[3]) \n self.man.create_flexible_universal(8, j_loc,[7,9],axis1=[1,0,0],axis2=[0,0,1],loStop1=joint_ranges[5][1][0],hiStop1=joint_ranges[5][1][1],loStop2=joint_ranges[5][0][0],hiStop2=joint_ranges[5][0][1],fmax=RT_FORCE[0],fmax2=RT_FORCE[1],erp1=ERP[4][0],erp2=ERP[4][1],cfm1=CFM[4][0],cfm2=CFM[4][1])\n self.man.create_capsule(10, body_masses[6], leg_dims[3][0], leg_dims[3][1],[0,0,0],rot=[0.,0.,0.],mass_flag=mass_flag_fix)\n j_loc = Placement.place_capsule_trans(self.man.bodies[8],self.man.bodies[10],rot=leg_rotations[3]) \n self.man.create_flexible_universal(9, j_loc,[8,10],axis1=[1,0,0],axis2=[0,0,1],loStop1=joint_ranges[5][1][0],hiStop1=joint_ranges[5][1][1],loStop2=joint_ranges[5][0][0],hiStop2=joint_ranges[5][0][1],fmax=RT_FORCE[0],fmax2=RT_FORCE[1],erp1=ERP[4][0],erp2=ERP[4][1],cfm1=CFM[4][0],cfm2=CFM[4][1])\n\n # Upper Front Legs\n self.man.create_capsule(11, body_masses[7], leg_dims[4][0], leg_dims[4][1],[0,0,0],rot=[0.,0.,0.],mass_flag=mass_flag_fix)\n Placement.place_capsule_at_trans(self.man.bodies[11],pos=body_seg_joint_pos[4],rot=leg_rotations[4])\n self.man.create_flexible_universal(10, body_seg_joint_pos[4],[2,11],axis1=[1,0,0],axis2=[0,0,1],loStop1=joint_ranges[6][1][0],hiStop1=joint_ranges[6][1][1],loStop2=joint_ranges[6][0][0],hiStop2=joint_ranges[6][0][1],fmax=FH_FORCE[0],fmax2=FH_FORCE[1],erp1=ERP[5][0],erp2=ERP[5][1],cfm1=CFM[5][0],cfm2=CFM[5][1])\n self.man.create_capsule(12, body_masses[7], leg_dims[4][0], leg_dims[4][1],[0,0,0],rot=[0.,0.,0.],mass_flag=mass_flag_fix)\n Placement.place_capsule_at_trans(self.man.bodies[12],pos=body_seg_joint_pos[5],rot=leg_rotations[4])\n self.man.create_flexible_universal(11, body_seg_joint_pos[5],[2,12],axis1=[1,0,0],axis2=[0,0,1],loStop1=joint_ranges[6][1][0],hiStop1=joint_ranges[6][1][1],loStop2=joint_ranges[6][0][0],hiStop2=joint_ranges[6][0][1],fmax=FH_FORCE[0],fmax2=FH_FORCE[1],erp1=ERP[5][0],erp2=ERP[5][1],cfm1=CFM[5][0],cfm2=CFM[5][1])\n\n # Front Feet \n self.man.create_capsule(13, body_masses[8], leg_dims[5][0], leg_dims[5][1],[0,0,0],rot=[0.,0.,0.],mass_flag=mass_flag_fix)\n j_loc = Placement.place_capsule_trans(self.man.bodies[11],self.man.bodies[13],rot=leg_rotations[5]) \n self.man.create_flexible_universal(12, j_loc,[11,13],axis1=[1,0,0],axis2=[0,0,1],loStop1=joint_ranges[7][1][0],hiStop1=joint_ranges[7][1][1],loStop2=joint_ranges[7][0][0],hiStop2=joint_ranges[7][0][1],fmax=FK_FORCE[0],fmax2=FK_FORCE[1],erp1=ERP[6][0],erp2=ERP[6][1],cfm1=CFM[6][0],cfm2=CFM[6][1])\n self.man.create_capsule(14, body_masses[8], leg_dims[5][0], leg_dims[5][1],[0,0,0],rot=[0.,0.,0.],mass_flag=mass_flag_fix)\n j_loc = Placement.place_capsule_trans(self.man.bodies[12],self.man.bodies[14],rot=leg_rotations[5]) \n self.man.create_flexible_universal(13, j_loc,[12,14],axis1=[1,0,0],axis2=[0,0,1],loStop1=joint_ranges[7][1][0],hiStop1=joint_ranges[7][1][1],loStop2=joint_ranges[7][0][0],hiStop2=joint_ranges[7][0][1],fmax=FK_FORCE[0],fmax2=FK_FORCE[1],erp1=ERP[6][0],erp2=ERP[6][1],cfm1=CFM[6][0],cfm2=CFM[6][1])\n\n # Tail\n \n self.man.create_capsule(15, body_masses[9], tail_dimensions[0][0], tail_dimensions[0][1],[0,0,0],rot=[0.,0.,0.],mass_flag=mass_flag_fix)\n Placement.place_capsule_at_trans(self.man.bodies[15],pos=body_seg_joint_pos[6],rot=tail_rotations[0])\n self.man.create_flexible_universal(14, body_seg_joint_pos[6],[0,15],axis1=[0,1,0],axis2=[0,0,1],loStop1=joint_ranges[8][1][0],hiStop1=joint_ranges[8][1][1],loStop2=joint_ranges[8][0][0],hiStop2=joint_ranges[8][0][1],fmax=T_FORCE[0],fmax2=T_FORCE[1],erp1=ERP[7][0],erp2=ERP[7][1],cfm1=CFM[7][0],cfm2=CFM[7][1])\n \n self.man.create_capsule(16, body_masses[10], tail_dimensions[0][0], tail_dimensions[0][1],[0,0,0],rot=[0.,0.,0.],mass_flag=mass_flag_fix)\n j_loc = Placement.place_capsule_trans(self.man.bodies[15],self.man.bodies[16],rot=tail_rotations[1]) \n self.man.create_flexible_universal(15, j_loc,[15,16],axis1=[0,1,0],axis2=[0,0,-1],loStop1=joint_ranges[9][1][0],hiStop1=joint_ranges[9][1][1],loStop2=joint_ranges[9][0][0],hiStop2=joint_ranges[9][0][1],fmax=T_FORCE[0],fmax2=T_FORCE[1],erp1=ERP[7][0],erp2=ERP[7][1],cfm1=CFM[7][0],cfm2=CFM[7][1])\n \n self.man.create_capsule(17, body_masses[11], tail_dimensions[0][0], tail_dimensions[0][1],[0,0,0],rot=[0.,0.,0.],mass_flag=mass_flag_fix)\n j_loc = Placement.place_capsule_trans(self.man.bodies[16],self.man.bodies[17],rot=tail_rotations[2]) \n self.man.create_flexible_universal(16, j_loc,[16,17],axis1=[0,1,0],axis2=[0,0,1],loStop1=joint_ranges[9][1][0],hiStop1=joint_ranges[9][1][1],loStop2=joint_ranges[9][0][0],hiStop2=joint_ranges[9][0][1],fmax=T_FORCE[0],fmax2=T_FORCE[1],erp1=ERP[7][0],erp2=ERP[7][1],cfm1=CFM[7][0],cfm2=CFM[7][1])\n\n # Head\n self.man.create_capsule(18, body_masses[12], head_dims[0], head_dims[1],[0,0,0],rot=[0.,0.,0.],mass_flag=mass_flag_fix)\n Placement.place_capsule_at_trans(self.man.bodies[18],pos=body_seg_joint_pos[7],rot=head_rotations)\n self.man.create_flexible_universal(17, body_seg_joint_pos[7],[2,18],axis1=[0,1,0],axis2=[0,0,1],loStop1=joint_ranges[10][1][0],hiStop1=joint_ranges[10][1][1],loStop2=joint_ranges[10][0][0],hiStop2=joint_ranges[10][0][1],fmax=H_FORCE[0],fmax2=H_FORCE[1],erp1=ERP[8][0],erp2=ERP[8][1],cfm1=CFM[8][0],cfm2=CFM[8][1])\n\n # Add in information about the feet.\n self.sensor.add_touch_sensor([9,10,13,14])\n touch_logging.append({man.get_geom_by_key(9):0,man.get_geom_by_key(10):0,man.get_geom_by_key(13):0,man.get_geom_by_key(14):0})\n touch_logging.append({man.get_geom_by_key(9):0,man.get_geom_by_key(10):0,man.get_geom_by_key(13):0,man.get_geom_by_key(14):0})\n\n # Add body touch sensors (Only if logging.)\n if log_body_touching:\n self.sensor.add_body_touch_sensor([i for i in range(19)])\n\n # Add in joint positions sensors.\n self.sensor.register_joint_sensors([i for i in range(17)])\n\n # Turn feedback on for the actuated joints.\n for i in range(self.joint_feedback_range[0],self.joint_feedback_range[1]):\n self.man.joints[i].setFeedback()", "title": "" }, { "docid": "c74a16bf8b18d0df8f111ea5d9962f1b", "score": "0.6153382", "text": "def __init__(self,\n urdf_version=None,\n control_time_step=0.006,\n action_repeat=6,\n control_latency=0,\n pd_latency=0,\n on_rack=False,\n motor_kp=1.0,\n motor_kd=0.02,\n remove_default_joint_damping=False,\n render=False,\n num_steps_to_log=1000,\n env_randomizer=None,\n log_path=None):\n super(spotWalkEnv,\n self).__init__(urdf_version=urdf_version,\n accurate_motor_model_enabled=True,\n motor_overheat_protection=True,\n hard_reset=False,\n motor_kp=motor_kp,\n motor_kd=motor_kd,\n remove_default_joint_damping=remove_default_joint_damping,\n control_latency=control_latency,\n pd_latency=pd_latency,\n on_rack=on_rack,\n render=render,\n num_steps_to_log=num_steps_to_log,\n env_randomizer=env_randomizer,\n log_path=log_path,\n control_time_step=control_time_step,\n action_repeat=action_repeat)\n\n action_dim = 12\n action_high = np.array([0.1] * action_dim)\n self.action_space = spaces.Box(-action_high, action_high)\n self._cam_dist = 1.0\n self._cam_yaw = 30\n self._cam_pitch = -30", "title": "" }, { "docid": "239a48012bfb7a02e04d32ee84442269", "score": "0.61433923", "text": "def _init_and_spin_ros(self): \n self.car_poly_pub = rospy.Publisher(\"/3D_car\", PolygonStamped, queue_size=1)\n self.pose_pub = rospy.Publisher(\"/robot_pose\", PoseStamped, queue_size=1)\n self.path_plan_pub = rospy.Publisher(\"/path_plan\", Path, queue_size=1)\n self.target_pub = rospy.Publisher(\"/target\", PointStamped, queue_size=1)\n self.longer_pub = rospy.Publisher(\"/plan_longer\", Bool, queue_size=1)\n rospy.Subscriber('/robot_pose', PoseStamped, self.update_pose, queue_size=1)\n # publishes the new cx cy from the pathplanner when obstacle is detected\n rospy.Subscriber(\"/path_floor_2\", path_pure_pursuit, self._update_path, queue_size=1)\n rospy.Subscriber('/map_obstacle', OccupancyGrid, self._get_obsmap_cb, queue_size=1)\n rospy.sleep(3)\n ## Initial Pose\n print('Waiting for initial pose')\n rospy.wait_for_message('/initialpose', PoseWithCovarianceStamped)\n print('Received initial pose')\n while not self.car_position.x:\n print('waiting for initial car position')\n rospy.sleep(1)\n\n rospy.spin()", "title": "" }, { "docid": "12db4eb133ec3559bae38365ab7ba35c", "score": "0.6140351", "text": "def __init__(self):\n self.uw = UrbiWrapper()\n\n # check if the result from the dummy request fits; otherwise abort\n if not self.uw.isConnected:\n raise RuntimeError('Connection to Flash failed.')\n \n joint_zero_positions = eval(self.uw.send('_Head_ZeroPosition')[0])\n\n # create joints\n for idx, info in enumerate(Head.JOINTS): \n joint = Joint(self.uw, info[1], info[2], info[3], joint_zero_positions[idx])\n setattr(self, info[0], joint)\n\n # center joints\n for joint in Head.JOINTS:\n getattr(self, joint[0]).center() \n\n # create sensors\n self.touch_up = Sensor(self.uw, 'robot.body.neck.head.sensor[up]', 0, 2000)\n self.touch_left = Sensor(self.uw, 'robot.body.neck.head.sensor[left]', 0, 2000)\n self.touch_front = Sensor(self.uw, 'robot.body.neck.head.sensor[front]', 0, 2000)\n self.touch_right = Sensor(self.uw, 'robot.body.neck.head.sensor[right]', 0, 2000)\n self.touch_down = Sensor(self.uw, 'robot.body.neck.head.sensor[down]', 0, 2000)", "title": "" }, { "docid": "aa703837cb67e90fde2d6b3d9b42e30f", "score": "0.6137695", "text": "def start():\n\n global x, y, h, v, w\n global pub\n\n #ROS setup\n pub = rospy.Publisher('turtle1/pose', PoseStamped, queue_size = 1000)\n rospy.Subscriber('turtle1/cmd_vel', Twist, tw_callback)\n rospy.init_node('Robotic_Agent')\n r = rospy.Rate(100)\n\n #ROS main loop\n while not rospy.is_shutdown():\n\n #simulate turtlebot dynamics\n dx = v*cos(h)\n dy = v*sin(h)\n dh = w\n x = x + 0.01 * dx\n y = y + 0.01 * dy\n h = h + 0.01 * dh\n\n #ROS Pose format with Quaternions Orientation:\n ps = PoseStamped()\n ps.header.stamp = rospy.Time.now()\n ps.header.frame_id = '/base_link'\n ps.pose.position.x = x\n ps.pose.position.y = y\n ps.pose.position.z = 0\n ps.pose.orientation.x = 0\n ps.pose.orientation.y = 0\n ps.pose.orientation.z = sin(h/2.0)\n ps.pose.orientation.w = cos(h/2.0)\n\n #Publish message to topic\n pub.publish(ps)\n r.sleep()", "title": "" }, { "docid": "b6430bb15d00dc310a28c181b9527eeb", "score": "0.6114805", "text": "def robotInit(self):\n\n # All commands can get access to the subsystems here\n subsystems.LEDS = LEDs()\n subsystems.DRIVETRAIN = Drivetrain()\n subsystems.ELEVATOR = Elevator()\n subsystems.PAYLOAD = Payload()\n subsystems.SERIAL = SerialEvent()\n subsystems.PIGEON = Pigeon()\n self.compressor = wpilib.Compressor(0)\n self.compressor.setClosedLoopControl(True)\n #self.compressor.start()\n \"\"\"\n Since OI instantiates commands and commands need access to subsystems,\n OI must be initialized after subsystems.\n \"\"\"\n subsystems.JOYSTICK = oi.get_joystick()\n wpilib.CameraServer.launch()", "title": "" }, { "docid": "0ebbff7a8a6720897279a58a91ef5a5a", "score": "0.60850686", "text": "def __init__(self,man,genome,base_pos=[0,0,0],logging=False):\n global output_path\n\n self.man = man\n self.body_keys = []\n self.joint_feedback_range = [2,15]\n\n # Sensors for robot.\n self.sensor = Sensors(man,logging=logging,log_path=output_path)\n\n # Hardware Limits\n self.ref_moi_pivot = \"\"\n\n # Initialize the robot.\n self.__create_robot(genome,base_pos=base_pos)", "title": "" }, { "docid": "23f12f2ae240e5af1337607ae2444739", "score": "0.6057076", "text": "def __init__(self, Nodeid, h0, a, b, period, amp=0, phase=0, flag_up=False, flag_real_time=False, flag_real=False):\n self.Time = Robot_Time(period=period, flag_real_time=flag_real_time)\n self.trajectory = Trajectory(h0, a, b, period, amp=amp, phase=phase, flag_up=flag_up)\n self.Nodeid = Nodeid\n self.flag_real = flag_real\n\n if flag_real:\n # init the real robot control\n self.RgmNet = RgmNetwork() # create an can net for motor control\n self.RgmNet.add_node(self.Nodeid, 'config.ini') # add the motor id in the can network\n # change the can network state\n self.RgmNet.control_state(self.Nodeid, 'RESET FAULT') # reset fault\n self.RgmNet.control_state(self.Nodeid, 'SHUT DOWN') # close every thing\n self.RgmNet.control_state(self.Nodeid, 'SWITCH 0N') # add the power but not enable\n angle_path_init = np.array(self.angle_init) + self.trajectory.get_control_angle(0)\n\n self.motion_flag = False # a flag to note whether motion is allowed\n self.init_flag = True # init flag\n\n else:\n # init the simulation environment\n init_pos = [0, 0, h0 + 0.033]\n self.robot = BipedRobot(init_pos, step=period / self.Time.sample_num, is_gui=True)\n self.robot.reset_joint_state(self.trajectory.get_control_angle(0))", "title": "" }, { "docid": "a2b818d262b2279ed1b23c4c1b9938c6", "score": "0.60500866", "text": "def __init__(self):\n self.__logger = logging.getLogger()\n self.__logger.debug(\"targetAcquisition/__init__: Started\")\n\n # Contains BoundBox objects (see utils.py), each of which contains opposite corners of a rectangle by percentage\n # of height and width of the image as (xmin, ymin) to (xmax, ymax)\n self.bbox=[((0, 0), (0, 0))]\n self.coordinates = []\n self.telemetryData = {}\n self.currentFrame = np.empty(0)\n self.yolo = Detection()\n\n self.__logger.debug(\"targetAcquisition/__init__: Finished\")", "title": "" }, { "docid": "88bf0081635e9ebf2bb4592db6934bf0", "score": "0.60447055", "text": "def __init__(self, p, urdfPath, initPos):\n # use the pybullet library \n self.p = p\n # load the robot \n self.id = self.p.loadURDF(urdfPath)\n self.numJoints = self.p.getNumJoints(self.id)\n\n # info aboout the kuka iiwa specifically \n self.endEffectorIndex = 6\n self.restOrn = [0, 0, 0, 1]\n # set limits for the null space \n self.ll = [-.967, -2, -2.96, 0.19, -2.96, -2.09, -3.05] # lower limits\n self.ul = [.967, 2, 2.96, 2.29, 2.96, 2.09, 3.05] # upper limits\n self.jr = [5.8, 4, 5.8, 4, 5.8, 4, 6] # joint ranges \n self.rp = [0, 0, 0, 0.5 * math.pi, 0, -math.pi * 0.5 * 0.66, 0] # rest poses\n\n # set to desired starting position, with end effector down \n self.p.resetBasePositionAndOrientation(self.id, posObj=initPos, ornObj=self.restOrn)\n # start tracking the robots's position\n self.baseInfo = self.p.getLinkState(bodyUniqueId=self.id, linkIndex=0)\n # make a bounding box of the robot's workspace (CW, NE->NW)\n self.vertices = [0.5, -1.5, 0.5, -0.5, -0.5, -0.5, -0.5, -1.5]\n (self.rBB, self.rBBC) = boundBox(self.baseInfo[0], self.vertices)\n # # show the bounding box --> actually handle this in main...\n self.initBB = drawCont(self.p, self.rBB, [0,0,0])", "title": "" }, { "docid": "59450253a5eec4f5508e4224bc95b6bf", "score": "0.6023487", "text": "def __init__(self, robot = None):\n self.robot = robot\n self.ultrasonic_sensor = ev3.UltrasonicSensor('in2')\n self.FSM = {'seeking': self.updateSeeking,\n 'found': self.updateFound}\n self.state = 'seeking'\n self.robot.runforever(0.1)", "title": "" }, { "docid": "0804c744e3de060af0423be4b0269e91", "score": "0.60124016", "text": "def robotInit(self):\n\n #Set up motors to drive robot\n self.M2 = wpilib.VictorSP(2)\n self.M3 = wpilib.VictorSP(3)\n #self.M2.setInverted(True)\n #self.M3.setInverted(True)\n self.left = wpilib.SpeedControllerGroup(self.M2,self.M3)\n \n self.M0 = wpilib.VictorSP(0)\n self.M1 = wpilib.VictorSP(1)\n self.right = wpilib.SpeedControllerGroup(self.M0,self.M1)\n\n self.drive = wpilib.drive.DifferentialDrive(self.left, self.right)\n \n self.stick = wpilib.Joystick(0)\n self.timer = wpilib.Timer()", "title": "" }, { "docid": "a97ae35e9e6215b469fa4993ec323b77", "score": "0.6000357", "text": "def robotInit(self):\n\n self.stick = wpilib.Joystick(0)\n self.elevator_stick = wpilib.Joystick(1)\n\n self.wings = wpilib.Jaguar(4)\n self.rightWheel = wpilib.Jaguar(6)\n self.leftWheel = wpilib.Jaguar(7)\n self.elevator_jag = wpilib.Jaguar(5)\n self.brake = wpilib.Relay(0)\n\n self.robot_drive = wpilib.RobotDrive(0,1,2,3)\n\n self.timer = wpilib.Timer()\n\n print(\"1. Drive to Autozone with Style\")\n print(\"2. Can and Tote\")\n self.mode = input(\"Enter auto self.mode: \")\n\n try:\n self.mode = int(self.mode)\n except ValueError:\n self.mode = float(self.mode)", "title": "" }, { "docid": "a49cc996d7cde0074c357429b144ad0e", "score": "0.59969807", "text": "def __init__(self):\n self._pub_rate = rospy.Publisher('robot/joint_state_publish_rate',\n UInt16)\n self._left_arm = baxter_interface.limb.Limb(\"left\")\n self._right_arm = baxter_interface.limb.Limb(\"right\")\n self._left_joint_names = self._left_arm.joint_names()\n self._right_joint_names = self._right_arm.joint_names()\n\n # control parameters\n self._rate = 500.0 # Hz\n\n print(\"Getting robot state... \")\n self._rs = baxter_interface.RobotEnable(CHECK_VERSION)\n self._init_state = self._rs.state().enabled\n print(\"Enabling robot... \")\n self._rs.enable()\n\n # set joint state publishing to 500Hz\n self._pub_rate.publish(self._rate)", "title": "" }, { "docid": "069899a71d22a267980d64834433f229", "score": "0.59962165", "text": "def __init__(self, log=False, debug=False):\n self.log = log\n self.debug = debug\n self.path = os.path.dirname(os.path.realpath(__file__)) # gets working dir of this file\n\n # arm movement\n self.target_coords = Float64MultiArray() # the three absolute target coordinates\n self.target_coords.layout.dim.append(MultiArrayDimension()) # coordinates\n self.target_coords.layout.dim[0].label = \"coordinates\"\n self.target_coords.layout.dim[0].size = 3\n\n self.target_coords.layout.dim.append(MultiArrayDimension()) # speed\n self.target_coords.layout.dim[1].label = \"speed\"\n self.target_coords.layout.dim[1].size = 1\n\n # gripper\n self.target_gripper = Float64MultiArray() # gripper stuff\n self.target_gripper.layout.dim.append(MultiArrayDimension()) # gripper\n self.target_gripper.layout.dim[0].label = \"gripper\"\n self.target_gripper.layout.dim[0].size = 3 # width, speed, force\n\n # ros initiation\n if self.debug:\n print(\"Starting ROS node: franka_control_ros\")\n rospy.init_node('franka_control_ros', anonymous=True)\n self.pub_move_to = None\n self.pub_grasp = None\n self.pub_move_grip = None\n\n self.x = None\n self.y = None\n self.z = None\n self.pos_sub = None\n self.start_subscriber()\n time.sleep(0.5)\n print(\"Waiting for subscriber to return initial Franka position. You may need to check that 'franka_controller_sub' is running.\")\n while self.z is None:\n time.sleep(0.1)\n print(\"Initial Franka position found: \", [self.x, self.y, self.z])", "title": "" }, { "docid": "06267b4c01b9334aa8ccf41a8e7fcc1b", "score": "0.5981149", "text": "def __init__(self, configuration, controller):\r\n self.controller = controller\r\n self.controller.set_pilot(self)\r\n self.configuration = configuration\r\n\r\n self.stop_event = threading.Event()\r\n self.kill_event = threading.Event()\r\n threading.Thread.__init__(self, args=self.stop_event)\r\n\r\n self.sensors = None\r\n self.actuators = None\r\n self.brains = None\r\n self.initialize_robot()", "title": "" }, { "docid": "4499151ab1d14acdf9b37a05472c24f0", "score": "0.5978414", "text": "def __init__(self, use_drone = False):\n\n # member variables relating to the drone's control\n self.use_drone = use_drone # are we using the drone?\n if self.use_drone:\n self.navDataSource = NAV_DATA_SOURCE\n self.boundingDataSource = BOUNDING_DATA_SOURCE\n self.droneControlSource = DRONE_CONTROL_SOURCE\n self.init_drone_parameters()\n else:\n print \"Not Using Drone\"\n\n self.airborne = False # not airborne yet\n self.run_without_data_stream = False\n self.battery_level = \"Not read from drone\"\n self.boxHeight = 0\n self.boxX = 0\n self.tarHeight = 35\n self.tarX = 150\n\n # images and other data for the images/windows\n self.bridge = cv_bridge.CvBridge() # the interface to OpenCV\n\n # variables for state machine\n self.state = \"Keyboard\"\n self.lastHover = 0", "title": "" }, { "docid": "325b479c1352f9c27593d0b6c323e62a", "score": "0.5969319", "text": "def __init__(self, sensors_config):\r\n\r\n # Load cameras\r\n cameras_conf = sensors_config.get('Cameras', None)\r\n self.cameras = None\r\n if cameras_conf:\r\n self.cameras = self.__create_sensor(cameras_conf, 'camera')\r\n\r\n # Load lasers\r\n lasers_conf = sensors_config.get('Lasers', None)\r\n self.lasers = None\r\n if lasers_conf:\r\n self.lasers = self.__create_sensor(lasers_conf, 'laser')\r\n\r\n # Load pose3d\r\n pose3d_conf = sensors_config.get('Pose3D', None)\r\n if pose3d_conf:\r\n self.pose3d = self.__create_sensor(pose3d_conf, 'pose3d')", "title": "" }, { "docid": "8a2d07234926dc3489c4b06cbf0248c2", "score": "0.5969135", "text": "def __init__(self):\n rospy.init_node('controller'+chairbot_number, anonymous=True)\n self._port = rospy.get_param('~neato_port', \"/dev/neato\")\n rospy.loginfo(\"Using port: %s\"%(self._port))\n self.ramp_rate=rospy.get_param(\"~ramp_rate\",0.3)\n self.timeout_ticks = rospy.get_param(\"~timeout_ticks\", 2)\n self._robot = Botvac(self._port)\n #self.pub_motor = rospy.Publisher('roboBaseSub', NeatoCommand, queue_size=10)\n self.rate = rospy.get_param(\"~rate\",20)\n self.w = rospy.get_param(\"~base_width\", 0.49)\n self.ramping_enable = rospy.get_param(\"~ramping_enable\", False)\n self.velfactor=rospy.get_param(\"~mps_to_rpm\", 1)\n self.prev_left_vel=0\n self.prev_right_vel=0\n rospy.Subscriber('/neato01/cmd_vel_mux/input/navi', Twist, self.twistCallback)\n\n #############################################################", "title": "" }, { "docid": "541b39d902668d06f4a931078438bbdd", "score": "0.59626794", "text": "def main():\n\n print(\"Initializing node... \")\n rospy.init_node(\"controls_final_left_arm\")\n\n jc = JointController(\"left\",) # instantiate control object\n rospy.on_shutdown(jc.clean_shutdown) # register shutdown callback\n jc.move_to_neutral() # start from neutral arm position\n jc.start_control() # turn on control law", "title": "" }, { "docid": "8d099c430358fd2b85e23f42938a9315", "score": "0.59521586", "text": "def start(self): \n Thread(target=self._init_and_spin_ros, args=()).start()\n return self", "title": "" }, { "docid": "17287f2a982a447410b7e5ff71692825", "score": "0.59488386", "text": "def __init__(self): #,args): #set args in registration listing in __init__.py\n #move robot arm out of the way (behind ANA)\n moveArm = False\n\n if moveArm : \n kr5Loc = path.join('KR5','KR5 sixx R650_moveArm.urdf') #for kr5 arm as assistant \n modelLocs = [kr5Loc,'kima/getUpWithHelperBot3D_armKima_old_moveArm.skel'] \n else :\n kr5Loc = path.join('KR5','KR5 sixx R650.urdf') #for kr5 arm as assistant \n modelLocs = [kr5Loc,'kima/getUpWithHelperBot3D_armKima_old.skel'] \n \n kr5HelperBotLoc = path.join('KR5','KR5 sixx R650_helper.urdf') \n ########################\n ## loading world/skels\n #modelLocs = ['getUpWithHelperBot3D_damp.skel'] #for two biped skels \n #kr5Loc = path.join('KR5','KR5 sixx R650.urdf') #for kr5 arm as assistant \n #for debugging - moves arm out of way\n #kr5Loc = path.join('KR5','KR5 sixx R650_moveArm.urdf') #for kr5 arm as assistant \n\n #modelLocs = [kr5Loc,'getUpWithHelperBot3D_arm.skel'] #regular biped as ana\n #kima biped below with new kima skel - experienced ode issues which killed rllab, might be fixable with updating dart to 6.4.0+ using wenhao's modified lcp.cpp seems to address the issue.\n #modelLocs = [kr5Loc,'kima/getUpWithHelperBot3D_armKima.skel']\n #kima biped below with kima skel from 2/18 - different joint limits, mapping of dofs and euler angle layout of root - MUST KEEP OLD in file name - used for both new experiments and ak's policy with constraint\n #modelLocs = [kr5Loc,'kima/getUpWithHelperBot3D_armKima_old.skel'] \n #for debugging - moves arm Platform out of the way\n #modelLocs = [kr5Loc,'kima/getUpWithHelperBot3D_armKima_old_moveArm.skel'] \n\n #set pose to not be prone - needs to be set before call to parent ctor - do this for testing\n self.setInitPoseAsGoalState = False\n fs = 1.0\n #multiple of frame skip\n ts = .01/fs\n assist2bot_env.DartAssist2Bot_Env.__init__(self, modelLocs, fs, dt=ts, helperBotLoc=kr5HelperBotLoc, disableViewer=False)\n \n #load eef traj final location fleishman distribution object based on sampled locations of standing ana moving reach hand\n _ = self.sampleGoalEefRelLoc() \n \n #connect human to constraint \n self.connectHuman = True\n #connect bot to constraint - if bot is connected then constraint should be passive, but dy\n self.connectBot = True\n\n #initialize all force and trajectory values: extAssistSize=6 will add force application location target to observation\n #NOTE :if bot is connected and actively solving, trajectory _MUST_ be passive or explodes\n #set forcePassive to true, setTrajDyn to true to use optimization process\n #set forcePassive and setTrajDyn to false, use robotArmSkelHolder, to demo behavior\n #set to false to use IK\n forcePassive = True\n #trajTyp = 'servo' #servo joint cannot be moved by external force - can train with servo, but to use bot must use freejoint\n trajTyp = 'gauss' #gauss joint cannot be solved dynamically, must be displaced kinematically, or else set to passive\n self.initAssistTrajVals(extAssistSize=3, useANAHeightTraj=False, trajTyp=trajTyp, setTrajDynamic=True, setTrajPassive=forcePassive)#(self.connectBot or forcePassive))\n #self.initAssistTrajVals(extAssistSize=3, useANAHeightTraj=False, trajTyp='gauss', setTrajDynamic=True, setTrajPassive=False)\n \n #whether or not to stop when trajectory is finished\n self.stopWhenTrajDone = False\n #allow pause directives in code, wait on user input for debugging\n self.allowPauseForInput = True\n #display debug information regarding force application - turn off if training\n self.dbgAssistData = False \n #display ANA reward dbg data - slow, make false if training\n self.dbgANAReward = True\n #calc and display post-step ANA eef force - make false if training\n self.dbgANAEefFrc = False\n #display full ANA force results if dbgANAEefFrc is true\n self.dbgANAFrcDet = False\n ############################\n # set human/robot conection and motion trajectory object and params - if train==True then overrides dynamicbot to be false (OPT to solve for location is expensive)\n #self.trainPolicy is defined as static variable in dart_env_2bot TODO make parent class method that enables this to be trained -after- object is instanced (??)\n #setBotSolve : whether or not helper bot's motion is solved\n #setBotDynamic : whether bot is set to be dynamically simulated or not (if not mobile is set to false)\n #botSolvingMethod is type of solving Bot should engage in : 0 is IK, 1 is constraint optimization dyn, 2 is IK-SPD, 3 is mimic generates desired target force to be applied to bot\n #spd gain is only used for IK_SPD solve \n #frcBasedAssist is whether this environment uses a force as the assistive component of observation, or something else, such as displacement\n botDict = defaultdict(int,{'setBotSolve':1, 'setBotDynamic':1, 'botSolvingMethod':2, 'SPDGain':10000000, 'frcBasedAssist':False})\n self.setTrainAndInitBotState(self.trainPolicy, botDict=botDict) \n\n utils.EzPickle.__init__(self)", "title": "" }, { "docid": "32d090916206a0077a0d61719ee70e59", "score": "0.5934345", "text": "def __init__(self, arg_robot_name):\n # Initialise node\n rospy.init_node('node_moveit_%s' % arg_robot_name, anonymous=True)\n\n self.tf_listener = tf.TransformListener()\n\n self._robot_ns = '/' + arg_robot_name\n\n moveit_commander.roscpp_initialize(sys.argv)\n self._robot = moveit_commander.RobotCommander(\n robot_description=self._robot_ns + \"/robot_description\",\n ns=self._robot_ns)\n self._scene = moveit_commander.PlanningSceneInterface(ns=self._robot_ns)\n self._group = moveit_commander.MoveGroupCommander(\n self._planning_group,\n robot_description=self._robot_ns + \"/robot_description\",\n ns=self._robot_ns)\n\n rospy.Publisher(\n self._robot_ns + '/move_group/display_planned_path',\n moveit_msgs.msg.DisplayTrajectory, queue_size=1)\n\n actionlib.SimpleActionClient(\n self._robot_ns + '/execute_trajectory',\n moveit_msgs.msg.ExecuteTrajectoryAction).wait_for_server()\n\n self._touch_links = self._robot.get_link_names(\n group=self._planning_group)\n\n # Current State of the Robot is needed to add box to planning scene\n self._robot.get_current_state()\n\n rospy.loginfo(\n '\\033[94m' + \"Planning Group: {}\".format(self._group.get_planning_frame()) + '\\033[0m')\n rospy.loginfo(\n '\\033[94m' + \"End Effector Link: {}\".format(\n self._group.get_end_effector_link()) + '\\033[0m')\n rospy.loginfo(\n '\\033[94m' + \"Group Names: {}\".format(self._robot.get_group_names()) + '\\033[0m')\n\n rospy.loginfo(\"Package Path: {}\".format(self.file_path))\n\n rospy.loginfo('\\033[94m' + \" >>> Ur5Moveit init done.\" + '\\033[0m')", "title": "" }, { "docid": "8b4bbe00eff1f8a67be664817a149652", "score": "0.59228784", "text": "def _set_init_pose(self):\n self.create_circle(radius=5)\n machine_name = '/machine_'+str(self.robotID);\n self.command_topic = machine_name+\"/command\"\n self._cmd_vel_pub = rospy.Publisher(self.command_topic, uav_pose, queue_size=1)\n outPose = uav_pose()\n outPose.header.stamp = rospy.Time.now()\n outPose.header.frame_id=\"world\"\n\n outPose.POI.x = 0\n outPose.POI.y = 0\n outPose.POI.z = 0\n\n r = 8\n t = np.random.choice(63,1);\n outPose.position.x = r*np.cos(self.theta[t[0]])\n outPose.position.y = r*np.sin(self.theta[t[0]])\n outPose.position.z = -r\n self._cmd_vel_pub.publish(outPose)", "title": "" }, { "docid": "6b126e009b365da3e6e7b79f0d0c0433", "score": "0.5911258", "text": "def __create_scene(self):\n\n print 'creating a scene'\n\n # create scenegraph by the ifgi scene parser\n\n _infilepath = '../../sampledata/cornel_box.ifgi'\n ifgireader = IfgiSceneReader.IfgiSceneReader()\n if(not ifgireader.read(_infilepath)):\n raise StandardError, ('load file [' + _infilepath + '] failed.')\n\n self.__scenegraph = SceneGraph.create_ifgi_scenegraph(ifgireader)\n self.__scenegraph.update_all_bbox()\n\n # create the global material_name -> material lookup map\n self.__scene_geo_mat.append_ifgi_data(ifgireader)\n self.__scene_geo_mat.print_summary()\n\n # -- now all primitive (TriMesh) can look up the material\n\n # set the camera\n # default camera should exist\n print ifgireader.camera_dict_dict\n assert('default' in ifgireader.camera_dict_dict)\n\n cur_cam = self.__scenegraph.get_current_camera()\n cur_cam.set_config_dict(ifgireader.camera_dict_dict['default'])\n print 'resize the camera resolution from [' +\\\n str(cur_cam.get_resolution_x()) + ' ' +\\\n str(cur_cam.get_resolution_y()) + '] -> ' +\\\n str(self.__image_xsize) + ' ' +\\\n str(self.__image_ysize) + ']'\n cur_cam.set_resolution_x(self.__image_xsize)\n cur_cam.set_resolution_y(self.__image_ysize)\n\n # added RGBA buffer and Hit buffer to the current camera.\n imgsz = (cur_cam.get_resolution_x(), cur_cam.get_resolution_y(), 4)\n cur_cam.set_film('Hit', Film.ImageFilm(imgsz, 'Hit'))\n cur_cam.set_film('RGBA', Film.ImageFilm(imgsz, 'RGBA'))\n cur_cam.set_film('Zbuffer', Film.ImageFilm(imgsz, 'Zbuffer'))\n # cur_cam.print_obj()", "title": "" }, { "docid": "2093c3dc39a45f008d6abe5dca6758ea", "score": "0.5906151", "text": "def init(contr):\n\n init_logging()\n\n if MULTINODE_SUPPORT:\n # Configuration for the multi-node simulation\n try:\n node_name = scene_config.node_config[\"node_name\"]\n server_address = scene_config.node_config[\"server_address\"]\n server_port = scene_config.node_config[\"server_port\"]\n except (NameError, AttributeError) as detail:\n logger.warning(\"No node configuration found. Using default values for this simulation node.\\n\\tException: \", detail)\n node_name = \"temp_name\"\n server_address = \"localhost\"\n server_port = 65000\n GameLogic.node_instance = morse.core.multinode.SimulationNodeClass(node_name, server_address, server_port)\n\n\n logger.log(SECTION, 'PRE-INITIALIZATION')\n # Get the version of Python used\n # This is used to determine also the version of Blender\n GameLogic.pythonVersion = sys.version_info\n GameLogic.blenderVersion = bpy.app.version\n logger.info (\"Python Version: %s.%s.%s\" % GameLogic.pythonVersion[:3])\n logger.info (\"Blender Version: %s.%s.%s\" % GameLogic.blenderVersion)\n\n GameLogic.morse_initialised = False\n GameLogic.base_clock = time.clock()\n GameLogic.current_time = 0.0\n # Variable to keep trac of the camera being used\n GameLogic.current_camera_index = 0\n init_ok = True\n\n\n logger.log(SECTION, 'SUPERVISION SERVICES INITIALIZATION')\n init_ok = init_supervision_services()\n \n logger.log(SECTION, 'SCENE INITIALIZATION')\n init_ok = init_ok and create_dictionaries()\n init_ok = init_ok and add_modifiers()\n init_ok = init_ok and link_middlewares()\n init_ok = init_ok and link_services()\n init_ok = init_ok and load_overlays()\n\n if init_ok:\n logger.log(ENDSECTION, 'SCENE INITIALIZED')\n check_dictionaries()\n GameLogic.morse_initialised = True\n else:\n logger.critical('INITIALIZATION FAILED!')\n logger.info(\"Exiting now.\")\n contr = GameLogic.getCurrentController()\n close_all(contr)\n quit(contr)\n\n\n # Set the default value of the logic tic rate to 60\n #GameLogic.setLogicTicRate(60.0)\n #GameLogic.setPhysicsTicRate(60.0)", "title": "" }, { "docid": "23e60a196e673cd6eacb4c3a88b76bb9", "score": "0.59039694", "text": "def __init__(self, init_position=[0.0, 0.0, 0.0]):\n\n # Robot construction parameters\n self.R = 26 # mm\n self.L = 152 # mm\n\n ##################################################\n # Motors and sensors setup\n\n # Create an instance of the BrickPi3 class. BP will be the BrickPi3 object.\n self.BP = brickpi3.BrickPi3()\n\n # Configure sensors, for example a touch sensor.\n self.BP.set_sensor_type(self.BP.PORT_1, self.BP.SENSOR_TYPE.TOUCH)\n\n # reset encoder B-right and C-left (or all the motors you are using)\n self.BP.offset_motor_encoder(self.BP.PORT_B,\n self.BP.get_motor_encoder(self.BP.PORT_B))\n self.BP.offset_motor_encoder(self.BP.PORT_C,\n self.BP.get_motor_encoder(self.BP.PORT_C))\n\n # reset encoder A-hook\n self.BP.offset_motor_encoder(self.BP.PORT_A,\n self.BP.get_motor_encoder(self.BP.PORT_A))\n\n ##################################################\n # odometry shared memory values\n self.x = Value('d',0.0)\n self.y = Value('d',0.0)\n self.th = Value('d',0.0)\n self.finished = Value('b',1) # boolean to show if odometry updates are finished\n\n # if we want to block several instructions to be run together, we may want to use an explicit Lock\n self.lock_odometry = Lock()\n\n # Initialize camera\n self.cam = picamera.PiCamera()\n self.cam.resolution = (320, 240)\n # It'll contain the photo\n self.rawCapture = PiRGBArray(self.cam, size=(320, 240))\n\n # odometry update period --> UPDATE value!\n self.P = .005 # 5 ms\n \n # allow the camera to warmup\n time.sleep(0.1)", "title": "" }, { "docid": "ef30b1352aba3cd10063347f9e0111b4", "score": "0.59008735", "text": "def __init__(self):\n\n # The version number is displayed on the PSLA main GUI title line.\n self.version = \"Planetary System LRGB Aligner 0.5.1\"\n\n # Set internal parameters which cannot be changed by the user.\n self.wait_for_workflow_initialization = 0.1\n self.polling_interval = 0.1\n self.feature_matching_norm = cv2.NORM_HAMMING\n self.cross_check = True\n\n # Set initial values for parameters which can be modified by the user.\n self.restore_standard_parameters()\n\n # Set a flag that the configuration has been changed.\n self.configuration_changed = True", "title": "" }, { "docid": "711bdf150fefce62cc3ee5d3bb0197fc", "score": "0.5896364", "text": "def __init__(self, log=True, ip='192.168.0.88', debug=False):\n self.log = log\n self.ip_address = ip\n self.debug = debug\n self.path = os.path.dirname(os.path.realpath(__file__)) # gets working dir of this file\n\n # arm movement\n self.target_coords = Float64MultiArray() # the three absolute target coordinates\n self.target_coords.layout.dim.append(MultiArrayDimension()) # coordinates\n self.target_coords.layout.dim[0].label = \"coordinates\"\n self.target_coords.layout.dim[0].size = 3\n\n self.target_coords.layout.dim.append(MultiArrayDimension()) # speed\n self.target_coords.layout.dim[1].label = \"speed\"\n self.target_coords.layout.dim[1].size = 1\n\n # gripper\n self.target_gripper = Float64MultiArray() # gripper stuff\n self.target_gripper.layout.dim.append(MultiArrayDimension()) # gripper\n self.target_gripper.layout.dim[0].label = \"gripper\"\n self.target_gripper.layout.dim[0].size = 3 # width, speed, force\n\n # ros initiation\n rospy.init_node('franka_python_node', anonymous=True)\n self.pub_move_to = rospy.Publisher('franka_move_to', Float64MultiArray, queue_size=1)\n self.pub_grasp = rospy.Publisher('franka_gripper_grasp', Float64MultiArray, queue_size=0)\n self.pub_move_grip = rospy.Publisher('franka_gripper_move', Float64MultiArray, queue_size=0)\n\n time.sleep(0.5)", "title": "" }, { "docid": "ee8704d546848512443f76cfc8757105", "score": "0.5889011", "text": "def _load_model(self):\n super()._load_model()\n\n # Adjust base pose accordingly\n xpos = self.robots[0].robot_model.base_xpos_offset[\"table\"](self.table_full_size[0])\n self.robots[0].robot_model.set_base_xpos(xpos)\n\n # Load model for table top workspace\n mujoco_arena = UltrasoundArena()\n\n # Arena always gets set to zero origin\n mujoco_arena.set_origin([0, 0, 0])\n\n # Initialize torso object\n self.torso = SoftBoxObject(name=\"torso\") if self.use_box_torso else SoftTorsoObject(name=\"torso\")\n\n if self.torso_solref_randomization:\n # Randomize torso's stiffness and damping (values are taken from my project thesis)\n stiffness = np.random.randint(1300, 1600)\n damping = np.random.randint(17, 41)\n\n self.torso.set_damping(damping)\n self.torso.set_stiffness(stiffness)\n\n # Create placement initializer\n if self.placement_initializer is not None:\n self.placement_initializer.reset()\n self.placement_initializer.add_objects(self.torso)\n else:\n self.placement_initializer = UniformRandomSampler(\n name=\"ObjectSampler\",\n mujoco_objects=[self.torso],\n x_range=[0, 0], #[-0.12, 0.12],\n y_range=[0, 0], #[-0.12, 0.12],\n rotation=None,\n ensure_object_boundary_in_range=False,\n ensure_valid_placement=True,\n reference_pos=self.table_offset,\n z_offset=0.005,\n )\n\n # task includes arena, robot, and objects of interest\n self.model = UltrasoundTask(\n mujoco_arena=mujoco_arena,\n mujoco_robots=[robot.robot_model for robot in self.robots], \n mujoco_objects=[self.torso]\n )", "title": "" }, { "docid": "10fdc16326185ef606a84c24555c28a7", "score": "0.58832616", "text": "def __init__(self, init_pose=None, init_velocities=None, \n init_angle_velocities=None, runtime=5., target_pos=None):\n # Simulation\n self.sim = PhysicsSim(init_pose, init_velocities, init_angle_velocities, runtime) \n self.action_repeat = 3\n\n self.state_size = self.action_repeat * 6\n self.action_low = 400\n self.action_high = 900\n self.action_size = 4\n\n # Goal\n self.target_pos = target_pos if target_pos is not None else np.array([0., 0., 10.])\n # Level flight not spinning in any direction\n self.target_angles = np.array([0.,0.,0.]) \n self.target_Ang_V = np.array([0., 0., 0.])\n # We should only be going up. Only Z velocity is a target\n self.target_v = init_velocities if init_velocities is not None else np.array([0., 0., 10.]) \n self.init_pose = init_pose", "title": "" }, { "docid": "41f1a9186df216999f47da6f9786a48b", "score": "0.58743954", "text": "def initialize_agents(self):\n\n self.ROS_states_set = False\n # Wait for the /all_agents/model_states callback to set the states for all agents\n while not self.ROS_states_set:\n continue\n\n for agent_name in self.ROS_states:\n # Wait for the /agent_name/goal callback to set the goal for each agent\n while agent_name not in self.goals:\n continue\n\n for agent_name in self.ROS_states:\n ros_state = self.ROS_states[agent_name]\n goal = self.goals[agent_name]\n\n px, py = ros_state.get_position()\n vx, vy = ros_state.get_velocity()\n radius = ros_state.get_radius()\n pitch, roll = ros_state.get_theta()\n yaw = ros_state.get_heading()\n\n # Todo: Make a goal class and use getter\n gx, gy = goal.x, goal.y\n vpref = ros_state.get_vpref()\n dt = self.dt\n # Sim Heading is same as real heading before any actions are taken\n sim_heading = yaw\n\n # Commands are issued in body frame for ballbot and global frame for sphere obstacles\n if agent_name == 'ballbot':\n frame = 'body'\n else:\n frame = 'global'\n\n controller_type = rospy.get_param('/sim_experiment_node/' + agent_name + '/controller')\n controller = controller_factory[controller_type]()\n\n # Additional steps for initializing ORCA\n if isinstance(controller, ORCA):\n controller.set_radius(radius)\n\n # Additional steps for initializing MPC controller\n if isinstance(controller, MPC):\n rollout_type = rospy.get_param('/sim_experiment_node/' + agent_name + '/rollout_policy')\n rollout_policy = controller_factory[rollout_type]()\n\n rollout_horizon = self.rollout_horizon\n num_rollouts = self.num_rollouts\n span = self.span\n\n traj_gen = TrajGen(dt, frame, rollout_policy, rollout_horizon, num_rollouts, span)\n controller.set_traj_gen(traj_gen)\n controller.set_traj_gen_record(self.traj_gen_recorder)\n\n cost_params = load_cost_params(agent_name)\n num_obs = len(self.ROS_states) - 1\n\n traj_eval = TrajEval(cost_params, num_obs, vpref, dt, rollout_horizon, radius)\n controller.set_traj_eval(traj_eval)\n\n # Create agent and store it in class member variable (dict)\n self.agents[agent_name] = Agent(px, py, vx, vy, radius, vpref, roll, pitch, yaw, gx, gy, sim_heading, dt,\n frame, controller)\n\n # Toggle to false now that the experiment has been reset\n self.reset_experiment = False", "title": "" }, { "docid": "7c414d23e89d51ae91d1c41d44e3ba89", "score": "0.5863105", "text": "def __init__(self):\n # type: () -> None\n # Init package\n rospack = rospkg.RosPack()\n self.package_path = rospack.get_path('bitbots_vision')\n\n rospy.init_node('bitbots_dynamic_color_space')\n rospy.loginfo('Initializing dynamic color-space...')\n\n self.bridge = CvBridge()\n\n # Init params\n self.vision_config = {}\n\n # Subscribe to 'vision_config'-message\n # The message topic name MUST be the same as in the config publisher in vision.py\n self.sub_vision_config_msg = rospy.Subscriber(\n 'vision_config',\n Config,\n self.vision_config_callback,\n queue_size=1,\n tcp_nodelay=True)\n\n rospy.spin()", "title": "" }, { "docid": "0a553f864df44f183d265d75cafc8787", "score": "0.58541", "text": "def __init__(self):\n self._pub_rate = rospy.Publisher('/robot/joint_state_publish_rate', UInt16)\n self._left_arm = baxter_interface.limb.Limb(\"left\")\n self._right_arm = baxter_interface.limb.Limb(\"right\")\n self._right_joint_names = self._right_arm.joint_names()\n self._left_joint_names = self._left_arm.joint_names()\n self._head = baxter_interface.Head()\n\n # set joint state publishing to 500Hz\n self._rs = baxter_interface.RobotEnable(CHECK_VERSION)\n self._init_state = self._rs.state().enabled\n self._rs.enable()\n self._pub_rate.publish(500)", "title": "" }, { "docid": "9c1538ad5f62a2ff18b6e31c6a5208db", "score": "0.5846115", "text": "def __init__(self):\n #: Parent all objects to apply camera rotation, while camera stays the same.\n self.objects = SceneNode.create(\"objects\")\n\n defCam = getDefaultCamera()\n defCam.setControllerEnabled(False)\n defCam.setNearFarZ(0.001, 1000)\n defCam.setPosition(-defCam.getHeadOffset())\n\n #: Absolute position and orientation of camera, headOffset is substracted.\n #: View in negative Z\n self.initialCamRotation = [0, 0, 0]\n self.initialCamPosition = [0, 0, 0]\n\n #: Parent camera to abstract from headOffset\n self.cameraObject = SceneNode.create(\"Camera\")\n self.cameraObject.addChild(defCam)", "title": "" }, { "docid": "8a75cc5e4b2b6736850d3938fd74f8a8", "score": "0.5842022", "text": "def __init__(self):\n # create a node name run_turtlebot_node\n rospy.init_node('run_turtlebot_node', anonymous=True)\n\n sound_client = SoundClient()\n sound = rospy.Publisher('sound', SoundClient, queue_size=10)\n rospy.sleep(2)\n sound.publish('/home/realm/Downloads/chime_up.wav')\n\n # set update rate; i.e. how often we send commands (Hz)\n self.rate = rospy.Rate(10)\n\n # create transform listener to transform coords from turtlebot frame to absolute frame\n self.listener = tf.TransformListener()\n\n # create a position of type Point\n self.position = Point()\n \n # create an object to send commands to turtlebot of type Twist\n # allows us to send velocity and rotation commands\n self.move_command = Twist()\n\n # set odom_frame to the /odom topic being published by the turtlebot\n self.odom_frame = '/odom'\n\n\n #create a publisher node to send velocity commands to turtlebot\n self.command_publisher = rospy.Publisher('cmd_vel', Twist, queue_size=10)\n\n # create a subscriber to get measurements from lidar sensor.\n # currently not used, but is left here in case lidar measurements\n # are needed in the future. See also the laser_data.py script.\n rospy.Subscriber('/scan', LaserScan, laser_data.get_laser_data)\n\n # create a subscriber for battery level\n rospy.Subscriber('battery_state', BatteryState, battery_status.battery)\n\n #TODO what's this line do?\n rospy.on_shutdown(self.shutdown)\n\n\n # find the coordinate conversion from the turtlebot to the ground truth frame\n try:\n self.listener.waitForTransform(self.odom_frame, \"base_footprint\", rospy.Time(), rospy.Duration(1.0))\n self.base_frame = \"base_footprint\"\n except(tf.Exception, tf.LookupException, tf.ConnectivityException):\n try:\n self.listener.waitForTransform(self.odom_frame, 'base_link', rospy.Time(), rospy.Duration(1.0))\n self.base_frame = 'base_link'\n except(tf.Exception, tf.ConnectivityException, tf.LookupException):\n rospy.loginfo(\"Cannot find transform between odom and base_link or base_footprint\")\n rospy.signal_shutdown(\"tf Exception\")\n\n # IMPORTANT: When substituting in a new control script, you must update this line\n self.command = turtlebot_command(self.command_publisher, self.rate, self.listener, self.position, self.move_command, self.odom_frame, self.base_frame)\n\n # bool flag to stop commands from running multiple times (see run_turtlebot() below)\n self.first_turn = True", "title": "" }, { "docid": "3ac14d9c178bdc089ab39b00bb9b4c12", "score": "0.5836219", "text": "def __init__(self, scene, camera, params=None):\n self.scene = scene\n self.camera = camera\n self.params = params if params is not None else DEFAULT_RENDERER_PARAMS", "title": "" }, { "docid": "ef5dcb4ac167b46aece3240ebb62158f", "score": "0.58286494", "text": "def reconfigure(self):\n self._clear()\n\n self._setup_scene()\n self._load_agent()\n self._load_actors()\n self._load_articulations()\n self._setup_cameras()\n self._setup_lighting()\n\n # Cache actors and articulations\n self._actors = self.get_actors()\n self._articulations = self.get_articulations()\n\n self._load_background()\n\n if self._viewer is not None:\n self._setup_viewer()", "title": "" }, { "docid": "ca9187c975f6897c7a1e80c3cb9145b6", "score": "0.58255166", "text": "def __init__(self):\n \n \"\"\"\n Initializes a new CATVehicle environment.\n Turtlebot2 doesnt use controller_manager, therefore we wont reset the \n controllers in the standard fashion. For the moment we wont reset them.\n \n To check any topic we need to have the simulations running, we need to do two things:\n 1) Unpause the simulation: without that the stream of data doesnt flow. This is for simulations\n that are pause for whatever the reason\n 2) If the simulation was running already for some reason, we need to reset the controlers.\n This has to do with the fact that some plugins with tf, dont understand the reset of the simulation\n and need to be reseted to work properly.\n \"\"\"\n \n rospy.logdebug(\"Start CATVehicle_ENV INIT...\")\n \n # These 4 publishers control the 4 wheels of the car\n self.controllers_list = []\n self.publishers_array = []\n self.robot_name_space = \"\"\n self.reset_controls = False\n\n \n \n # We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv\n super(CATVehicleEnv, self).__init__(controllers_list=self.controllers_list,\n robot_name_space=self.robot_name_space,\n reset_controls=False,\n start_init_physics_parameters=False,\n reset_world_or_sim=\"WORLD\")\n \n \n #self._bl = rospy.Publisher(\"/catvehicle/joint1_velocity_controller/command\", Float32, self.back_left_vel)\n #self._br = rospy.Publisher(\"/catvehicle/joint2_velocity_controller/command\", Float32, self.back_right_vel)\n #self._fl = rospy.Publisher(\"/catvehicle/front_left_steering_position_controller/command\", Float 32, self.front_left_steering)\n #self._fr = rospy.Publisher(\"/catvehicle/front_right_steering_position_controller/command\", Float 32, self.front_right_steering)\n \n self.gazebo.unpauseSim()\n self._check_all_sensors_ready()\n \n self._cmd_vel_pub = rospy.Publisher('/catvehicle/cmd_vel', Twist, queue_size=1)\n \n rospy.Subscriber(\"/catvehicle/distanceEstimatorSteeringBased/dist\", Float64, self._dist_callback)\n rospy.Subscriber(\"/catvehicle/distanceEstimatorSteeringBased/angle\", Float64, self._angle_callback)\n \n self._check_publishers_connection()\n self.gazebo.pauseSim()\n \n rospy.logdebug(\"Finished TurtleBot2Env INIT...\")", "title": "" }, { "docid": "1bab8ba09858656e0d0b386402cbf60f", "score": "0.5811727", "text": "def __init__(self):\n\n # print \"Hello from IsotropicLinearElastic.__init__!\"\n # print \"\"\n self.materialModel = 1\n self.numberProperties = 3\n self.materialVariation = False\n self.numberStateVariables = 2\n self.propertyDict = {'density': None,\n 'youngsModulus': None,\n 'poissonsRatio': None}\n self.propertyPosition = ['density',\n 'youngsModulus',\n 'poissonsRatio']\n self.propertyList = [0.0,\n 0.0,\n 0.0]\n return", "title": "" }, { "docid": "b89d13bba743c6f6fbe30a5b2fe11302", "score": "0.58108926", "text": "def __init__ (self, environment, agents, y = None, x = None):\n \n # Create parameters.\n self._x = 0\n self._y = 0\n \n if (x == None):\n self._x = random.randint(0, 99)\n # If x has no value, assign it a random integer.\n else:\n self._x = x\n # Otherwise, assign it the value from the web data.\n \n if (y == None):\n self._y = random.randint(0, 99)\n # If y has no value, assign it a random integer.\n else:\n self._y = y\n # Otherwise, assign it the value from the web data.\n \n # Allow agents to access environment data.\n self.environment = environment\n \n # Allow agents to access other agent positions.\n self.agents = agents\n \n # Set default agent store to zero.\n self.store = 0", "title": "" }, { "docid": "19cc92b5156e0ac47da36fb293301c88", "score": "0.58042127", "text": "def __init__(self, robot):\n super().__init__(name = \"shooter\")\n self.robot = robot\n\n self.top = rev.CANSparkMax(9, rev.CANSparkMax.MotorType.kBrushless)\n self.bottom = rev.CANSparkMax(10, rev.CANSparkMax.MotorType.kBrushless)", "title": "" }, { "docid": "4dfc1ed18f8a6b84adf1cf325e69f13e", "score": "0.57950777", "text": "def robotInit(self):\n #want to change this to Xbox 360 controller eventually... probably sooner rather\n #than later.\n #\n #This is for a USB camera. Uncomment it if we aren't using the Axis.\n self.camera = wpilib.USBCamera()\n self.camera.setExposureManual(50)\n self.camera.setBrightness(80)\n self.camera.updateSettings()\n self.camera.setFPS(10)\n self.camera.setSize(320, 240)\n self.camera.setWhiteBalanceAuto()\n #self.camera.setQuality(30)\n\n server = wpilib.CameraServer.getInstance()\n server.startAutomaticCapture(self.camera)\n\n self.drive = wpilib.RobotDrive(3, 1, 2, 0)\n self.drive.setExpiration(0.1)\n\n self.stick_left = wpilib.Joystick(0)\n self.stick_right = wpilib.Joystick(1)\n\n self.drive.setInvertedMotor(self.drive.MotorType.kFrontRight, True)\n self.drive.setInvertedMotor(self.drive.MotorType.kRearRight, True)\n\n #self.gyro = wpilib.Gyro(0)\n\n self.aux_left = wpilib.Jaguar(6)\n self.aux_right = wpilib.Jaguar(4)\n self.window_motor = wpilib.Jaguar(5)\n\n self.smart_dashboard = NetworkTable.getTable(\"SmartDashboard\")\n\n self.mast_pot = wpilib.AnalogPotentiometer(0)\n self.grabba_pot = wpilib.AnalogPotentiometer(1)\n self.lift_pot = wpilib.AnalogPotentiometer(2)\n\n def aux_combined(output):\n \"\"\"use for PID control\"\"\"\n self.aux_left.pidWrite(output)\n self.aux_right.pidWrite(output)\n\n self.grabba_pid = wpilib.PIDController(4, 0.07, 0, self.grabba_pot.pidGet, self.window_motor.pidWrite)\n self.grabba_pid.disable()\n\n self.lift_pid = wpilib.PIDController(4, 0.07, 0, self.lift_pot.pidGet, aux_combined)\n self.lift_pid.disable()", "title": "" }, { "docid": "5d170d869c6ea33e7fc6139fcf46274c", "score": "0.5784534", "text": "def set_up(self):\n\n\n super(GKStrategy, self).set_up()\n self.sock.sendto('', self.VISION_SERVER)\n\n\n #Array for Initial Position for each team, index 0 has red robot positions\n InitialPositions = [\n [\n RobotPosition(0, 0, 180),\n RobotPosition(50, 10, 180)\n ],\n [\n RobotPosition(0, 0, 0),\n RobotPosition(-50, -10, 0)\n ]\n ]\n #setting robot positions up\n self.goalkeeper_controller = Controller()\n #self.defensor_controller = Controller()", "title": "" }, { "docid": "cd4b6220a318ef8e62c2c550646b3173", "score": "0.577826", "text": "def __init__(self):\n\n # Only variable needed to be set here\n \"\"\"\n For this version, we consider 6 actions\n 1-2) Increment/Decrement haa_joint\n 3-4) Increment/Decrement hfe_joint\n 5-6) Increment/Decrement kfe_joint\n \"\"\"\n rospy.logdebug(\"Start HopperStayUpEnv INIT...\")\n\n # This is the path where the simulation files, the Task and the Robot gits will be downloaded if not there\n ros_ws_abspath = rospy.get_param(\"/monoped/ros_ws_abspath\", None)\n assert ros_ws_abspath is not None, \"You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \\'YOUR/SIM_WS/PATH\\'\"\n assert os.path.exists(ros_ws_abspath), \"The Simulation ROS Workspace path \" + ros_ws_abspath + \\\n \" DOESNT exist, execute: mkdir -p \" + ros_ws_abspath + \\\n \"/src;cd \" + ros_ws_abspath + \";catkin_make\"\n\n ROSLauncher(rospackage_name=\"legged_robots_sims\",\n launch_file_name=\"start_world.launch\",\n ros_ws_abspath=ros_ws_abspath)\n\n # Load Params from the desired Yaml file\n LoadYamlFileParamsTest(rospackage_name=\"openai_ros\",\n rel_path_from_package_to_file=\"src/openai_ros/task_envs/hopper/config\",\n yaml_file_name=\"hopper_stay_up.yaml\")\n\n number_actions = rospy.get_param('/monoped/n_actions')\n self.action_space = spaces.Discrete(number_actions)\n\n # We set the reward range, which is not compulsory but here we do it.\n self.reward_range = (-numpy.inf, numpy.inf)\n\n # Actions and Observations\n\n self.init_joint_states = Vector3()\n self.init_joint_states.x = rospy.get_param(\n '/monoped/init_joint_states/haa_joint')\n self.init_joint_states.y = rospy.get_param(\n '/monoped/init_joint_states/hfe_joint')\n self.init_joint_states.z = rospy.get_param(\n '/monoped/init_joint_states/kfe_joint')\n\n # Get Desired Point to Get\n self.desired_point = Point()\n self.desired_point.x = rospy.get_param(\"/monoped/desired_point/x\")\n self.desired_point.y = rospy.get_param(\"/monoped/desired_point/y\")\n self.desired_point.z = rospy.get_param(\"/monoped/desired_point/z\")\n self.accepted_error_in_des_pos = rospy.get_param(\n \"/monoped/accepted_error_in_des_pos\")\n\n self.desired_yaw = rospy.get_param(\"/monoped/desired_yaw\")\n\n self.joint_increment_value = rospy.get_param(\n \"/monoped/joint_increment_value\")\n self.init_move_time = rospy.get_param(\"/monoped/init_move_time\", 1.0)\n self.move_time = rospy.get_param(\"/monoped/move_time\", 0.05)\n self.check_position = rospy.get_param(\"/monoped/check_position\", True)\n\n self.accepted_joint_error = rospy.get_param(\n \"/monoped/accepted_joint_error\")\n self.update_rate = rospy.get_param(\"/monoped/update_rate\")\n\n self.dec_obs = rospy.get_param(\n \"/monoped/number_decimals_precision_obs\")\n\n self.desired_force = rospy.get_param(\"/monoped/desired_force\")\n\n self.max_x_pos = rospy.get_param(\"/monoped/max_x_pos\")\n self.max_y_pos = rospy.get_param(\"/monoped/max_y_pos\")\n\n self.min_height = rospy.get_param(\"/monoped/min_height\")\n self.max_height = rospy.get_param(\"/monoped/max_height\")\n\n self.distance_from_desired_point_max = rospy.get_param(\n \"/monoped/distance_from_desired_point_max\")\n\n self.max_incl_roll = rospy.get_param(\"/monoped/max_incl\")\n self.max_incl_pitch = rospy.get_param(\"/monoped/max_incl\")\n self.max_contact_force = rospy.get_param(\"/monoped/max_contact_force\")\n\n self.maximum_haa_joint = rospy.get_param(\"/monoped/maximum_haa_joint\")\n self.maximum_hfe_joint = rospy.get_param(\"/monoped/maximum_hfe_joint\")\n self.maximum_kfe_joint = rospy.get_param(\"/monoped/maximum_kfe_joint\")\n self.min_kfe_joint = rospy.get_param(\"/monoped/min_kfe_joint\")\n\n # We place the Maximum and minimum values of observations\n self.joint_ranges_array = {\"maximum_haa\": self.maximum_haa_joint,\n \"minimum_haa_joint\": -self.maximum_haa_joint,\n \"maximum_hfe_joint\": self.maximum_hfe_joint,\n \"minimum_hfe_joint\": self.maximum_hfe_joint,\n \"maximum_kfe_joint\": self.maximum_kfe_joint,\n \"min_kfe_joint\": self.min_kfe_joint\n }\n\n high = numpy.array([self.distance_from_desired_point_max,\n self.max_incl_roll,\n self.max_incl_pitch,\n 3.14,\n self.max_contact_force,\n self.maximum_haa_joint,\n self.maximum_hfe_joint,\n self.maximum_kfe_joint,\n self.max_x_pos,\n self.max_y_pos,\n self.max_height\n ])\n\n low = numpy.array([0.0,\n -1*self.max_incl_roll,\n -1*self.max_incl_pitch,\n -1*3.14,\n 0.0,\n -1*self.maximum_haa_joint,\n -1*self.maximum_hfe_joint,\n self.min_kfe_joint,\n -1*self.max_x_pos,\n -1*self.max_y_pos,\n self.min_height\n ])\n\n self.observation_space = spaces.Box(low, high)\n\n rospy.logdebug(\"ACTION SPACES TYPE===>\"+str(self.action_space))\n rospy.logdebug(\"OBSERVATION SPACES TYPE===>\" +\n str(self.observation_space))\n\n # Rewards\n self.weight_joint_position = rospy.get_param(\n \"/monoped/rewards_weight/weight_joint_position\")\n self.weight_contact_force = rospy.get_param(\n \"/monoped/rewards_weight/weight_contact_force\")\n self.weight_orientation = rospy.get_param(\n \"/monoped/rewards_weight/weight_orientation\")\n self.weight_distance_from_des_point = rospy.get_param(\n \"/monoped/rewards_weight/weight_distance_from_des_point\")\n\n self.alive_reward = rospy.get_param(\"/monoped/alive_reward\")\n self.done_reward = rospy.get_param(\"/monoped/done_reward\")\n\n # Here we will add any init functions prior to starting the MyRobotEnv\n super(HopperStayUpEnv, self).__init__(ros_ws_abspath)\n\n rospy.logdebug(\"END HopperStayUpEnv INIT...\")", "title": "" }, { "docid": "e73482a33704847d6357b368d0405acf", "score": "0.5773767", "text": "def __init__(self,vScene,sog,rop,vUrl):\r\n self.scene = vScene\r\n self.sog = sog\r\n self.url = vUrl\r\n self.mod = None\r\n self.scene.EventManager.OnObjectGrab += self.clicked;\r\n \r\n if rop.RexClassName == \"\":\r\n print \"create new actor\"\r\n rop.RexClassName = \"rxactor.Actor\"", "title": "" }, { "docid": "c989a59eb1b0cfb3a9bd92f4bdb55ffc", "score": "0.5773349", "text": "def __init__(self, robot, actions):\n self.robot = robot\n self.actions = actions\n self.couple_robot_to_actions()", "title": "" }, { "docid": "c989a59eb1b0cfb3a9bd92f4bdb55ffc", "score": "0.5773349", "text": "def __init__(self, robot, actions):\n self.robot = robot\n self.actions = actions\n self.couple_robot_to_actions()", "title": "" }, { "docid": "644afbd7125fe447e8b1538f930c3ec6", "score": "0.57627267", "text": "def __init__(self,\n cfg_path='template_project.yaml',\n use_visualizer=True,\n name=\"SafenetEnv\"):\n super().__init__(cfg_path, use_visualizer, name)\n self.robot_interface.reset()\n self.init_ori = self.robot_interface.ee_orientation\n self.boundary_color = [1, 0, 0] # Red for safenet boundary box.", "title": "" }, { "docid": "55507f7b9211288e34b9080587be984e", "score": "0.57596695", "text": "def __init__(self):\n rospy.init_node(\"tracker\")\n\n camera_matrix = numpy.matrix(rospy.get_param(\"~camera_matrix\"))\n use_sim = rospy.get_param(\"~use_sim\", False)\n\n if use_sim:\n self.camera = SimulatedCamera(camera_matrix)\n self.camera.set_target(\n image_file=rospy.get_param(\"~target_image\"),\n position=rospy.get_param(\"~target_position\"),\n size_in_meters=rospy.get_param(\"~target_size\"))\n else:\n self.camera = OpenCVCamera(camera_matrix)\n\n self.tracking = False\n self.last_frame_time = 0\n self.last_seen_time = 0\n self.initialize_track_filter()\n\n self.image_bridge = CvBridge()\n self.image_publisher = rospy.Publisher(\"tracker/image\",\n sensor_msgs.msg.Image, queue_size=1)\n\n self.track_publisher = rospy.Publisher(\"tracker/track\",\n TrackStamped, queue_size=1)", "title": "" }, { "docid": "2962562bdb893ca71469b03ce077b405", "score": "0.5733383", "text": "def __init__(self, state, window):\n # get points that define the non-rotated, non-translated mav and the mesh colors\n self.mav_points, self.mav_meshColors = self.get_points()\n\n mav_position = np.array([[state.pn], [state.pe], [-state.h]]) # NED coordinates\n # attitude of mav as a rotation matrix R from body to inertial\n R = Euler2Rotation(state.phi, state.theta, state.psi)\n # rotate and translate points defining mav\n rotated_points = self.rotate_points(self.mav_points, R)\n translated_points = self.translate_points(rotated_points, mav_position)\n # convert North-East Down to East-North-Up for rendering\n R = np.array([[0, 1, 0], [1, 0, 0], [0, 0, -1]])\n\n translated_points = R @ translated_points\n # convert points to triangular mesh defined as array of three 3D points (Nx3x3)\n mesh = self.points_to_mesh(translated_points)\n self.mav_body = gl.GLMeshItem(vertexes=mesh, # defines the triangular mesh (Nx3x3)\n vertexColors=self.mav_meshColors, # defines mesh colors (Nx1)\n drawEdges=True, # draw edges between mesh elements\n smooth=False, # speeds up rendering\n computeNormals=False) # speeds up rendering\n window.addItem(self.mav_body) # add body to plot", "title": "" }, { "docid": "c4f8c179884cbaa6e9886d6cda077618", "score": "0.5720224", "text": "def setUp(self):\n self.biadjacency = movie_actor()\n n_row, n_col = self.biadjacency.shape\n self.slr = SparseLR(self.biadjacency, [(np.random.rand(n_row), np.random.rand(n_col))])", "title": "" }, { "docid": "785401153e0faaa2bb6b80e8600e9f23", "score": "0.57200974", "text": "def __init__(self, robot = None):\n self.flag = False\n self.ultrasonic_sensor = ev3.UltrasonicSensor('in2')\n self.robot = robot\n self.FSM = {'seeking': self.updateSeeking,\n 'found': self.updateFound}\n self.state = 'seeking'\n self.robot.runforever(0.1)", "title": "" }, { "docid": "42dd8d6632ead299e9bba502a471d510", "score": "0.5717807", "text": "def __init__(self, tag):\n SceneObject.__init__(self, tag)\n self.transform.position = Vector3([0.0, 0.0, 3.0])\n self.target = Vector3([0.0, 0.0, 0.0])\n self.mouse_sensitivity = 0.25\n self.transform.eulerAngles = Vector3([0.0, 0.0, 0.0])\n self.transform.forward = Vector3([0.0, 0.0, 1.0])\n self.pitch = 89.9999\n self.yaw = -89.9999\n self.follow_distance = 4.0", "title": "" }, { "docid": "5f56a7fb4c85ee1cf9e89125c73f180b", "score": "0.571316", "text": "def _prepare_resources(self):\n # Set up scene.\n self.scene = SSO(\"scene\")\n # Physics.\n self.bbase = BulletBase()\n self.bbase.init()\n self.bbase.gravity = self.params[\"physics\"][\"gravity\"]\n self.bbase.sim_par = {\n \"size\": self.params['simulation'][\"step_size\"],\n \"n_subs\": self.params['simulation']['n_substeps'],\n \"size_sub\": self.params['simulation'][\"substep_size\"],\n }", "title": "" }, { "docid": "c36cbd0d28eac3da483bfc2297e31b30", "score": "0.57115835", "text": "def __init__(self, limb, experiment, number, threed):\n self._arm_robot = limb\n if self._arm_robot == 'left':\n self._arm_human = 'right'\n else:\n self._arm_human = 'left'\n self._experiment = experiment\n self._number = number\n self._threed = threed\n\n self._limb_robot = baxter_interface.Limb(self._arm_robot)\n self._rec_joint_robot = JointClient(limb=self._arm_robot,\n rate=settings.recording_rate)\n self._limb_human = baxter_interface.Limb(self._arm_human)\n self._rec_joint_human = JointClient(limb=self._arm_human,\n rate=settings.recording_rate)\n self._head = baxter_interface.Head()\n\n if self._threed:\n self._rec_kinect = RecorderClient('kinect_recorder')\n self._rec_senz3d = RecorderClient('senz3d_recorder')\n self._rec_flash = RecorderClient('flash_recorder')\n\n self._pub_rate = rospy.Publisher('robot/joint_state_publish_rate',\n UInt16, queue_size=10)\n s = 'data/limb/' + self._arm_robot + '/cfg/des'\n self._pub_cfg_des_robot = rospy.Publisher(s, JointCommand,\n queue_size=10)\n s = 'data/limb/' + self._arm_human + '/cfg/des'\n self._pub_cfg_des_human = rospy.Publisher(s, JointCommand,\n queue_size=10)\n\n print \"\\nGetting robot state ... \"\n self._rs = baxter_interface.RobotEnable(CHECK_VERSION)\n self._init_state = self._rs.state().enabled\n print \"Enabling robot... \"\n self._rs.enable()\n\n self._limb_robot.set_joint_position_speed(0.3)\n self._limb_human.set_joint_position_speed(0.3)\n self._pub_rate.publish(settings.recording_rate)", "title": "" }, { "docid": "8de67379e825356011a4d767d5770e37", "score": "0.57054174", "text": "def __init__(self, init_pose=None, init_velocities=None,\n init_angle_velocities=None, runtime=5., target_pos=None):\n # Simulation\n self.sim = PhysicsSim(init_pose, init_velocities, init_angle_velocities, runtime)\n self.action_repeat = 3\n\n self.state_size = self.action_repeat * 6\n self.action_low = 0\n self.action_high = 900\n self.action_size = 4\n\n\n # Goal\n self.target_pos = target_pos if target_pos is not None else np.array([0., 0., 10.])", "title": "" }, { "docid": "02b2f27f226b16ecc81e6471f80a346b", "score": "0.5703097", "text": "def __init__(self, init_pose=None, init_velocities=None, \n init_angle_velocities=None, runtime=5., target_pos=None):\n # Simulation\n self.sim = PhysicsSim(init_pose, init_velocities, init_angle_velocities, runtime) \n self.action_repeat = 3\n\n self.state_size = self.action_repeat * 6\n self.action_low = 100\n self.action_high = 900\n self.action_size = 4\n\n # Goal\n self.target_pos = target_pos if target_pos is not None else np.array([0., 0., 10.])", "title": "" }, { "docid": "5d3e1e7ce1bee02d2e17a150efc96095", "score": "0.5700927", "text": "def __init__(self, init_pose=None, init_velocities=None, \n init_angle_velocities=None, runtime=400., target_pos=None):\n # Simulation\n self.sim = PhysicsSim(init_pose, init_velocities, init_angle_velocities, runtime) \n self.action_repeat = 3\n\n self.state_size = self.action_repeat * 12\n self.action_low = 0\n self.action_high = 900\n self.action_size = 4\n\n # Goal\n self.target_pos = target_pos if target_pos is not None else np.array([0., 0., 50.])", "title": "" }, { "docid": "c8173a13088965440cb87bc3853096bf", "score": "0.56979835", "text": "def __init__(self, robot):\n super().__init__(name = \"climbers\")\n\n self.motor1 = WPI_VictorSPX(5)\n self.motor2 = WPI_VictorSPX(6)", "title": "" }, { "docid": "6d169eb0dd35964cc7617d981097754a", "score": "0.56902856", "text": "def __init__(self, rotors, reflector, positions=None, ring_settings=None, plugboard=None):\n self.rotors = EnigmaMachine._check_rotors(rotors)\n self.reflector = EnigmaMachine._check_reflector(reflector)\n self.plugboard = EnigmaMachine._check_plugboard(plugboard if plugboard is not None else [])\n if positions is not None:\n self.set_positions(positions)\n if ring_settings is not None:\n self.set_ring_settings(ring_settings)", "title": "" }, { "docid": "8329e0fe629a68ee2311e47452c0357d", "score": "0.56833386", "text": "def __init__(self, models_dir, images_dir):\n self.camera = None\n self.camera_sensor = None\n self.focal_length = None\n self.principal_point = None\n self.G_camera_image = None\n self.bilinear_lut = None\n\n self.__load_intrinsics(models_dir, images_dir)\n self.__load_lut(models_dir, images_dir)", "title": "" }, { "docid": "168a2f92f4300cc527d5cbe21f60ea45", "score": "0.56828344", "text": "def __init__(self,\n goal_pose=None,\n init_pose=None,\n init_q=None,\n ):\n self.init_q = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n self.init_pose = [0.374, -2.439454888092385e-19, 0.6299999999999999, 0.0, 0.0, 0.0, 1.0 ]\n self.init_vel = [0.0]*6\n #self._goal_pose = [0.2665122782069116, 0.40988654527750035, 0.16178715865384272 , 0.1685172244060629, 0.568363626537449, 0.16851711653306128, 0.7875066441262947]\n \n self.observation_space = gym.spaces.Box(\n low=np.array([-np.inf, -np.inf, -np.inf, -np.inf, -np.inf, -np.inf, -np.inf]),\n high=np.array([np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf]))\n \n self.action_space = gym.spaces.Box(\n low = np.array([-1.91, -1.4, -2.5, -2.0, -1.5, -1]), \n high = np.array([1.91, 0.9, 0.8, 2.0, 1.5, 0.0]))\n \n self.bot = pb.loadURDF(\"irb120_3_58.urdf\",[0, 0, 0], useFixedBase=1)\n self.distance_threshold = 0.001\n self.reward_type = 'dense'\n \n self.action_low = [-1.91, -1.4, -2.5, -2.0, -1.5, -1] \n self.action_high = [1.91, 0.9, 0.8, 2.0, 1.5, 0.0]\n #self.goal_pose = self.goal_gen()\n self.viewer = None\n self.state = None\n self.reset()", "title": "" }, { "docid": "a76c1e36d4d18ade1ce8fe4ecfd270f8", "score": "0.5682066", "text": "def __init__(self, init_pose=None, init_velocities=None,\r\n init_angle_velocities=None, runtime=5., target_pos=None):\r\n # Simulation\r\n self.sim = PhysicsSim(init_pose, init_velocities,\r\n init_angle_velocities, runtime)\r\n self.action_repeat = 1\r\n\r\n self.state_size = self.action_repeat * 9\r\n self.action_low = 0\r\n self.action_high = 900\r\n self.action_size = 4\r\n self.done = False\r\n\r\n # Goal\r\n self.target_pos = target_pos if target_pos is not None else np.array([0., 0., 10.])", "title": "" }, { "docid": "af07da03835f45f0cb6c958b2fff0015", "score": "0.56816524", "text": "def load(self):\n self.vis[self.prefix].delete()\n\n # Intercept load message via mock LCM.\n mock_lcm = DrakeMockLcm()\n DispatchLoadMessage(self._scene_graph, mock_lcm)\n load_robot_msg = lcmt_viewer_load_robot.decode(\n mock_lcm.get_last_published_message(\"DRAKE_VIEWER_LOAD_ROBOT\"))\n # Translate elements to `meshcat`.\n for i in range(load_robot_msg.num_links):\n link = load_robot_msg.link[i]\n [source_name, frame_name] = link.name.split(\"::\")\n\n for j in range(link.num_geom):\n geom = link.geom[j]\n # MultibodyPlant currenly sets alpha=0 to make collision\n # geometry \"invisible\". Ignore those geometries here.\n if geom.color[3] == 0:\n continue\n\n element_local_tf = RigidTransform(\n RotationMatrix(Quaternion(geom.quaternion)),\n geom.position).GetAsMatrix4()\n\n if geom.type == geom.BOX:\n assert geom.num_float_data == 3\n meshcat_geom = meshcat.geometry.Box(geom.float_data)\n elif geom.type == geom.SPHERE:\n assert geom.num_float_data == 1\n meshcat_geom = meshcat.geometry.Sphere(geom.float_data[0])\n elif geom.type == geom.CYLINDER:\n assert geom.num_float_data == 2\n meshcat_geom = meshcat.geometry.Cylinder(\n geom.float_data[1],\n geom.float_data[0])\n # In Drake, cylinders are along +z\n # In meshcat, cylinders are along +y\n # Rotate to fix this misalignment\n extra_rotation = tf.rotation_matrix(\n math.pi/2., [1, 0, 0])\n element_local_tf[0:3, 0:3] = \\\n element_local_tf[0:3, 0:3].dot(\n extra_rotation[0:3, 0:3])\n elif geom.type == geom.MESH:\n meshcat_geom = \\\n meshcat.geometry.ObjMeshGeometry.from_file(\n geom.string_data[0:-3] + \"obj\")\n else:\n print(\"UNSUPPORTED GEOMETRY TYPE {} IGNORED\".format(\n geom.type))\n continue\n\n # Turn a list of R,G,B elements (any indexable list of >= 3\n # elements will work), where each element is specified on range\n # [0., 1.], into the equivalent 24-bit value 0xRRGGBB.\n def Rgb2Hex(rgb):\n val = 0\n for i in range(3):\n val += (256**(2 - i)) * int(255 * rgb[i])\n return val\n\n self.vis[self.prefix][source_name][str(link.robot_num)][\n frame_name][str(j)]\\\n .set_object(meshcat_geom,\n meshcat.geometry\n .MeshLambertMaterial(\n color=Rgb2Hex(geom.color)))\n self.vis[self.prefix][source_name][str(link.robot_num)][\n frame_name][str(j)].set_transform(element_local_tf)", "title": "" }, { "docid": "ade6c84979c8178af3635c7b96705504", "score": "0.56684595", "text": "def simulator(Initial_pos=[0, 0, 0], Initial_angle=[0, 0, 0]):\r\n # connect physical server\r\n physicsClient = p.connect(p.GUI)\r\n p.setAdditionalSearchPath(pybullet_data.getDataPath()) # optionally\r\n # set gravity\r\n p.setGravity(0, 0, 0)\r\n # create an empty link list to store link id\r\n LinkId = []\r\n # model initial location\r\n StartPos = Initial_pos\r\n # model initial orientation in Euler\r\n StartOrientation = p.getQuaternionFromEuler(Initial_angle)\r\n # load model file and set the initial position and fixed base link\r\n boxId = p.loadURDF(\"barrett_hand_target.urdf\", StartPos, StartOrientation, useFixedBase=True)\r\n # load object model\r\n object = p.loadURDF(\"object.urdf\", useFixedBase=True)\r\n # set gripper be the loaded model\r\n gripper = boxId\r\n # set camera parameters\r\n p.resetDebugVisualizerCamera(cameraDistance=1, cameraYaw=45, cameraPitch=0, cameraTargetPosition=[0.5, 0.5, 0.2])\r\n\r\n # for loop to obtain the joint information and set parameters\r\n for i in range(0, p.getNumJoints(gripper)):\r\n p.setJointMotorControl2(gripper, i, p.POSITION_CONTROL, targetPosition=0, force=0)\r\n # obtain the limit rotation angle range of the joint\r\n lower_limit = p.getJointInfo(gripper, i)[8]\r\n upper_limit = p.getJointInfo(gripper, i)[9]\r\n # obtain the joint name\r\n linkName = p.getJointInfo(gripper, i)[12].decode(\"ascii\")\r\n # set the gui control board\r\n LinkId.append(p.addUserDebugParameter(linkName, lower_limit, upper_limit, 0))\r\n\r\n while p.isConnected():\r\n # start do simulation\r\n p.stepSimulation()\r\n time.sleep(1. / 240.)\r\n\r\n # move joints following command\r\n for i in range(0, p.getNumJoints(gripper)):\r\n linkPos = p.readUserDebugParameter((LinkId[i]))\r\n p.setJointMotorControl2(gripper, i, p.POSITION_CONTROL, targetPosition=linkPos)\r\n p.saveBullet('Test.bullet')\r\n p.disconnect()", "title": "" }, { "docid": "8fec993af031c5a949fee5643a04364a", "score": "0.56579846", "text": "def __init__(self, xml_path, **kwargs):\n # default env config\n self._env_config = {\n \"frame_skip\": 5,\n \"apply_force\": 0,\n \"prob_apply_force\": 0.1,\n \"perturb_action\": 0,\n \"prob_perturb_action\": 0,\n \"ctrl_reward\": 1e-3,\n \"diayn_reward\": 0.1,\n \"init_randomness\": 1e-5,\n \"max_episode_steps\": 500,\n \"unstable_penalty\": 100,\n \"screen_width\": 500,\n \"screen_height\": 500,\n \"seed\": 42\n }\n\n # update env config with keyward args\n self._env_config.update({ k:v for k,v in kwargs.items() if k in self._env_config })\n\n logger.setLevel(logging.INFO)\n\n self.render_mode = 'no' # ['no', 'human', 'rgb_array']\n self._screen_width = self._env_config[\"screen_width\"]\n self._screen_height = self._env_config[\"screen_height\"]\n self._seed = self._env_config[\"seed\"]\n self._gym_disable_underscore_compat = True\n\n # Load model\n self._load_model(xml_path)", "title": "" }, { "docid": "c221bb9b5e55ace762f2506d859e1694", "score": "0.5654646", "text": "def interface(\n scene, start_conf, object_list, object_poses, flag_compute_grasp, add_table=False\n):\n scene.traj.start = start_conf\n scene.env.clear()\n start_time = time.time()\n populate_scene(\n scene, object_list, object_poses, flag_compute_grasp, add_table=False\n )\n print(\"populate scene time: {:.3f}\".format(time.time() - start_time))", "title": "" }, { "docid": "4f660f1fd6a68f278a38a8dbe20ef5f5", "score": "0.5652673", "text": "def __init__(self, init_pose=None, init_velocities=None, \n init_angle_velocities=None, runtime=5., target_pos=None):\n # Simulation\n self.sim = PhysicsSim(init_pose, init_velocities, init_angle_velocities, runtime) \n self.action_repeat = 3\n state_s = len(self.get_state())\n\n self.state_size = self.action_repeat * state_s\n self.action_low = 850\n self.action_high = 860\n self.action_size = 4\n\n # Goal: reach a target coordinate (x, y, z) as fast as possible and hover there for the\n # remainder of the episode.\n self.target_pos = target_pos if target_pos is not None else np.array([0., 0., 10.])", "title": "" }, { "docid": "ae4fa41cccfa6539a7c64f5aae2154b8", "score": "0.5645972", "text": "def main(self):\n\t\t# init my ROS node\n\t\trospy.init_node('PyGameDemo', anonymous=True) \n\t\t\n\t\t# load the background\t\t\n\t\tself.load_background()\n\t\n\t\t# get the game clock, used for tracking time\n\t\tclock = pygame.time.Clock()\n\n\t\t# load teh robot sprite\n\t\tself.load_robot()\n\n\t\t# 10 Hz\n\t\trate = rospy.Rate(10)\n\n\t\t# Loop until the process is shut down\n\t\twhile not rospy.is_shutdown(): \n\n\t\t\t# Always publish the robots current position\n\t\t\tpos = (self.robot.rect.x, self.robot.rect.y)\n\t\t\trospy.loginfo(\"current position is %s\", str(pos))\n\n\t\t\t# Publishes the current position\n\t\t\tself.publish_current_pos(pos)\n\n\t\t\t# Check to see if the game has been exited or if a mouse click occurred\n\t \t\tfor event in pygame.event.get(): \n\t \t\t\tif event.type is pygame.QUIT:\n\t \t\t\t\treturn\n\t \t\t\tif event.type is pygame.MOUSEBUTTONDOWN:\n\t \t\t\t\tpt = pygame.mouse.get_pos()\n\t \t\t\t\t# The location of the mouse click is the new desired position\n\t \t\t\t\trospy.loginfo(\"desired position is %s\", str(pt))\n\t \t\t\t\t# Publish the new desired position \n\t \t\t\t\tself.publish_desired_pos(pt)\n\n\t \t\t# Move the game clock forward by 20 ticks\n\t \t\tclock.tick(20)\n\t \t\t# Redraw the background\n\t \t\tself.screen.blit(self.background, self.background_rect) \n\t \t\t# Draw the robot on the screen\n\t \t\tself.robot_sprite.draw(self.screen)\n\n\t \t\t# Update the entire contents of the screen\n\t \t\tpygame.display.flip()\n\n\t \t\t# Sleeps the process to ensure that this loop is executed only 10 times per second\n\t \t\trate.sleep()", "title": "" }, { "docid": "20701837a562bef47016046e5f66386a", "score": "0.5645624", "text": "def robotInit(self):\n \"\"\"\n 1. Chute\n 2. Loader\n 3. Climber\n \"\"\"\n \n # Configure shooter motor controller.\n self.Gyro = wpilib.ADXRS450_Gyro()\n self.Chute = ctre.wpi_talonsrx.WPI_TalonSRX(7)\n self.Chute = ctre.wpi_talonsrx.WPI_TalonSRX(8)# Create a CANTalon object.\n self.Chute.configSelectedFeedbackSensor(ctre.wpi_talonsrx.WPI_TalonSRX.FeedbackDevice.QuadEncoder, 0 , 0) # Choose an encoder as a feedback device. The default should be QuadEncoder already, but might as well make sure.\n # I thought the encoder was 20 pulses per revolution per phase, which would require \"80\" as an argument below, but after trying it, it looks like there are 12.\n # Setting this parameter changes things so getPosition() returns decimal revolutions, and getSpeed() returns RPM.\n #self.shooter.configEncoderCodesPerRev(48)\n # resets shooter position on startup\n #self.unloader.setQuadraturePosition(0, 0)\n #self.unloader.setNeutralMode(ctre.wpilib.Spark.NeutralMode.Coast)# This should change between brake and coast modes.\n \n\n self.l_motor1 = ctre.wpi_talonsrx.WPI_TalonSRX(0)\n self.l_motor2 = ctre.wpi_talonsrx.WPI_TalonSRX(1) \n self.l_motor1.setInverted(False)\n self.l_motor2.setInverted(False)\n self.r_motor1 = ctre.wpi_talonsrx.WPI_TalonSRX(2)\n self.r_motor2 = ctre.wpi_talonsrx.WPI_TalonSRX(3)\n self.r_motor1.setInverted(False)\n self.r_motor2.setInverted(False)\n # Configure shooter motor controller.\n # Create a CANTalon object.\n self.l_motor1.setNeutralMode(ctre.wpi_talonsrx.WPI_TalonSRX.NeutralMode.Coast)\n self.l_motor1.configSelectedFeedbackSensor(ctre.wpi_talonsrx.WPI_TalonSRX.FeedbackDevice.QuadEncoder, 0, 0)\n self.l_motor2.setNeutralMode(ctre.wpi_talonsrx.WPI_TalonSRX.NeutralMode.Coast)\n self.l_motor2.configSelectedFeedbackSensor(ctre.wpi_talonsrx.WPI_TalonSRX.FeedbackDevice.QuadEncoder, 0, 0)\n self.l_motor1.setNeutralMode(ctre.wpi_talonsrx.WPI_TalonSRX.NeutralMode.Coast)\n self.r_motor1.configSelectedFeedbackSensor(ctre.wpi_talonsrx.WPI_TalonSRX.FeedbackDevice.QuadEncoder, 0, 0)\n self.l_motor2.setNeutralMode(ctre.wpi_talonsrx.WPI_TalonSRX.NeutralMode.Coast)\n self.r_motor2.configSelectedFeedbackSensor(ctre.wpi_talonsrx.WPI_TalonSRX.FeedbackDevice.QuadEncoder, 0, 0)# Choose an encoder as a feedback device. The default should be QuadEncoder already, but might as well make sure.\n # I thought the encoder was 20 pulses per revolution per phase, which would require \"80\" as an argument below, but after trying it, it looks like there are 12.\n # Setting this parameter changes things so getPosition() returns decimal revolutions, and getSpeed() returns RPM.\n #self.l_motor1.configEncoderCodesPerRev(48)\n #self.l_motor2.configEncoderCodesPerRev(48)\n #self.r_motor1.configEncoderCodesPerRev(48)\n #self.r_motor2.configEncoderCodesPerRev(48)\n # resets shooter position on startup\n self.l_motor1.setQuadraturePosition(0, 0)\n self.l_motor2.setQuadraturePosition(0, 0)\n self.r_motor1.setQuadraturePosition(0, 0)\n self.r_motor2.setQuadraturePosition(0, 0)\n\n #self.stick = wpilib.Joystick(0)\n self.l_joy = wpilib.Joystick(0)\n self.r_joy = wpilib.Joystick(1)\n self.loader = ctre.wpi_talonsrx.WPI_TalonSRX(4)\n self.climber = wpilib.Spark(6)\n self.loader = ctre.wpi_talonsrx.WPI_TalonSRX(5)\n self.drive = wpilib.RobotDrive(self.l_motor1 , self.l_motor2 , self.r_motor1 , self.r_motor2)\n self.counter = 0\n self.mode = 0\n #wpilib.CameraServer.launch()\n #IP for camera server: http://10.38.81.101:1181/", "title": "" }, { "docid": "7f099ce694ebbde4cf29737ec296d946", "score": "0.5635404", "text": "def init(target):\n\n renderer = RendererPython(640, 480, bg_color=(1.0, 1.0, 1.0, 1.0), shading='flat')\n\n # base scene (static support)\n ground_volume = resource.get(f\"{DATA_PATH}/cube.ply\").convert('VolumeGrid')\n renderer.add_object(0, f\"{DATA_PATH}/cube.ply\")\n\n env = [ground_volume]\n env_meshes = [trimesh.load(f\"{DATA_PATH}/cube.ply\")]\n env_ids = [0]\n env_Ts = [np.eye(4)]\n\n # initialize remaining scene objects based on target object\n Tgt = np.eye(4)\n if target == 'bowl':\n # bowl\n obj_id = 13\n cloud, ply, mesh = get_tgt_model(obj_id, renderer)\n Tgt[:3, 3] = np.array([0, 0, -models_info[obj_id]['min_z'] - obj_toff[obj_id - 1][2]])\n t_obj_offset = [-125, 0, 0]\n elif target == 'marker':\n # marker\n obj_id = 18\n cloud, ply, mesh = get_tgt_model(obj_id, renderer)\n Tgt[:3, :3] = Rotation.from_euler('xz', [21.5, 90.0], degrees=True).as_dcm()\n Tgt[:3, 3] = [33.42, 0, 30.14]\n t_obj_offset = [-300, 25, 50]\n\n # banana\n env_model, env_mesh = get_env_model(10, renderer)\n T = np.eye(4)\n T[:3, :3] = Rotation.from_euler('xyz', [0, 11.8, 0], degrees=True).as_dcm()\n T[:3, 3] = [0, 0, 16]\n env_mesh.apply_transform(T)\n\n env.append(env_model)\n env_meshes.append(env_mesh)\n env_ids.append(-10)\n env_Ts.append(T.copy())\n elif target == 'clamp':\n # clamp\n obj_id = 19\n cloud, ply, mesh = get_tgt_model(obj_id, renderer)\n Tgt[:3, :3] = Rotation.from_euler('xyz', [-6, -1.4, -90], degrees=True).as_dcm()\n Tgt[:3, 3] = [9.59, 0, 91.74]\n t_obj_offset = [-150, 0, 65]\n\n # pudding\n env_model, env_mesh = get_env_model(7, renderer)\n T = np.eye(4)\n T[:3, :3] = Rotation.from_euler('xyz', [-88.4, 0.4, 90], degrees=True).as_dcm()\n T[:3, 3] = [-61.66, 1.24, 44.27]\n env_mesh.apply_transform(T)\n\n env.append(env_model)\n env_meshes.append(env_mesh)\n env_ids.append(-7)\n env_Ts.append(T.copy())\n\n # jello\n env_model, env_mesh = get_env_model(8, renderer)\n T = np.eye(4)\n T[:3, :3] = Rotation.from_euler('xyz', [-90, 90, 180], degrees=True).as_dcm()\n T[:3, 3] = [74.90, -1.09, 36.37]\n env_mesh.apply_transform(T)\n\n env.append(env_model)\n env_meshes.append(env_mesh)\n env_ids.append(-8)\n env_Ts.append(T.copy())\n else:\n raise ValueError(\"'target' must be one of 'bowl', 'marker' or 'clamp'.\")\n Tsyms = misc.get_symmetry_transformations(models_info[obj_id], 0.01) # symmetry\n\n # prepare renderer: intrinsics and extrinsics\n K = np.array([1066.778, 0.0, 312.9869, 0.0, 1067.487, 241.3109, 0.0, 0.0, 1.0]).reshape(3, 3)\n Rview = Rotation.from_euler('zx', [-90 if target == 'bowl' else 0, 90 if target == 'bowl' else 110],\n degrees=True).as_dcm()\n tview = np.array([-t_obj_offset[1], t_obj_offset[2], 850+t_obj_offset[0]])\n\n return env_meshes, env, env_ids, env_Ts, mesh, cloud, ply, Tgt, Tsyms, renderer, K, Rview, tview", "title": "" }, { "docid": "73e1a6f1d90f13790251c9b31d82f2d2", "score": "0.563278", "text": "def __init__(self, robot_interface, ee_link='right_gripper'):\n \n self.franka = robot_interface\n self.config_modulator = RobotConfigModulator()\n self.home_q = [-0.059293474684699775, -1.6124685985429639, -0.19709729059328113, -2.5317617220662476, \n -0.09526965726127999, 1.678176488975683, 0.5879584750097497]\n\n # initialize forward kinematics\n self.base_link = 'base_link'\n self.ee_link = ee_link\n\n success, kdl_tree = urdf.treeFromParam('/robot_description')\n if not success:\n raise RuntimeError(\n \"Could not create kinematic tree from /robot_description.\")\n\n self.kdl_chain = kdl_tree.getChain(self.base_link, ee_link)\n print(\"Number of joints in KDL chain:\", self.kdl_chain.getNrOfJoints())\n self.kdl_fk = kdl.ChainFkSolverPos_recursive(self.kdl_chain)\n self.ik_solver = IK(self.base_link, ee_link, timeout=0.05, solve_type=\"Distance\")", "title": "" }, { "docid": "33ddb4670e0f715cb8ef674ae0ba4cd8", "score": "0.562794", "text": "def __init__(self):\n \n # We execute this one before because there are some functions that this\n # TaskEnv uses that use variables from the parent class, like the effort limit fetch.\n super(IriWamTcpToBowlEnv, self).__init__()\n \n # Here we will add any init functions prior to starting the MyRobotEnv\n \n \n # Only variable needed to be set here\n\n rospy.logdebug(\"Start IriWamTcpToBowlEnv INIT...\")\n number_actions = rospy.get_param('/iriwam/n_actions')\n self.action_space = spaces.Discrete(number_actions)\n \n # We set the reward range, which is not compulsory but here we do it.\n self.reward_range = (-numpy.inf, numpy.inf)\n \n self.iri_wam_joint_1 = rospy.get_param(\"/iriwam/init_joints/iri_wam_joint_1\")\n self.iri_wam_joint_2 = rospy.get_param(\"/iriwam/init_joints/iri_wam_joint_2\")\n self.iri_wam_joint_3 = rospy.get_param(\"/iriwam/init_joints/iri_wam_joint_3\")\n self.iri_wam_joint_4 = rospy.get_param(\"/iriwam/init_joints/iri_wam_joint_4\")\n self.iri_wam_joint_5 = rospy.get_param(\"/iriwam/init_joints/iri_wam_joint_5\")\n self.iri_wam_joint_6 = rospy.get_param(\"/iriwam/init_joints/iri_wam_joint_6\")\n self.iri_wam_joint_7 = rospy.get_param(\"/iriwam/init_joints/iri_wam_joint_7\")\n \n self.init_joints_positions_array = [self.iri_wam_joint_1,\n self.iri_wam_joint_2,\n self.iri_wam_joint_3,\n self.iri_wam_joint_4,\n self.iri_wam_joint_5,\n self.iri_wam_joint_6,\n self.iri_wam_joint_7]\n \n \n self.joint_increment_value = rospy.get_param(\"/iriwam/joint_increment_value\")\n \n self.max_distance_from_red_bowl = rospy.get_param(\"/iriwam/max_distance_from_red_bowl\")\n self.min_distance_from_red_bowl = rospy.get_param(\"/iriwam/min_distance_from_red_bowl\")\n \n self.min_laser_distance = rospy.get_param(\"/iriwam/min_laser_distance\")\n \n self.dec_obs = rospy.get_param(\"/iriwam/number_decimals_precision_obs\")\n\n # We place the Maximum and minimum values of observations\n # TODO: Fill when get_observations is done.\n \"\"\"\n We supose that its all these:\n head_pan, right_gripper_l_finger_joint, right_gripper_r_finger_joint, right_j0, right_j1,\n right_j2, right_j3, right_j4, right_j5, right_j6\n \n Plus the first three are the block_to_tcp vector\n \"\"\"\n \n # We fetch the limits of the joinst to get the effort and angle limits\n self.joint_limits = self.init_joint_limits()\n \n high = numpy.array([self.init_joints_x_max,\n self.init_joints_y_max,\n self.init_joints_z_max,\n self.joint_limits.position_upper[0],\n self.joint_limits.position_upper[1],\n self.joint_limits.position_upper[2],\n self.joint_limits.position_upper[3],\n self.joint_limits.position_upper[4],\n self.joint_limits.position_upper[5],\n self.joint_limits.position_upper[6],\n self.joint_limits.position_upper[7],\n self.joint_limits.position_upper[8],\n self.joint_limits.position_upper[9]\n ])\n \n low = numpy.array([ self.init_joints_x_min,\n self.init_joints_y_min,\n self.init_joints_z_min,\n self.joint_limits.position_lower[0],\n self.joint_limits.position_lower[1],\n self.joint_limits.position_lower[2],\n self.joint_limits.position_lower[3],\n self.joint_limits.position_lower[4],\n self.joint_limits.position_lower[5],\n self.joint_limits.position_lower[6],\n self.joint_limits.position_lower[7],\n self.joint_limits.position_lower[8],\n self.joint_limits.position_lower[9]\n ])\n\n \n self.observation_space = spaces.Box(low, high)\n \n rospy.logdebug(\"ACTION SPACES TYPE===>\"+str(self.action_space))\n rospy.logdebug(\"OBSERVATION SPACES TYPE===>\"+str(self.observation_space))\n \n # Rewards\n \n self.done_reward =rospy.get_param(\"/iriwam/done_reward\")\n self.closer_to_block_reward = rospy.get_param(\"/iriwam/closer_to_block_reward\")\n\n self.cumulated_steps = 0.0\n \n \n # We init the CVBridge object\n self.bridge_object = CvBridge()\n\n \n \n rospy.logdebug(\"END IriWamTcpToBowlEnv INIT...\")", "title": "" }, { "docid": "75bd0c8b5ef67d07eec07b8b6d45ad13", "score": "0.56230056", "text": "def __init__(self):\n self.moveControl = MovementControlThread()\n self.moveControl.start()\n\n self.depthControl = DepthSensorThread()\n self.depthControl.start()\n\n self.imageControl = ImageSensorThread()\n self.imageControl.start()\n\n rospy.on_shutdown(self.exit)", "title": "" }, { "docid": "aee78e6c4a34392b8e0d50beb3781429", "score": "0.56226236", "text": "def setup(self):\n # if not system.restore_snapshot():\n # self.log.debug(\"No snapshot to restore, if this is not expected please contact automation team\")\n pos.connect()", "title": "" } ]
a17d1a571b52d3953500e8792effebe5
This decorator sets the callback function for dynamically setting the JWT decode key based on the UNVERIFIED contents of the token. Think carefully before using this functionality, in most cases you probably don't need it. The decorated function must take two arguments. The first argument is a dictionary containing the header data of the unverified JWT. The second argument is a dictionary containing the payload data of the unverified JWT. The decorated function must return a string that is used to decode and verify the token.
[ { "docid": "61bda5e43c504087b6a950d9534098ad", "score": "0.52614766", "text": "def decode_key_loader(self, callback: Callable) -> Callable:\n self._decode_key_callback = callback\n return callback", "title": "" } ]
[ { "docid": "77ed2c7c0ec1e4f13cee23bcaa72e8e6", "score": "0.614135", "text": "def jwt_decode_handler(token):\n options = {\n 'verify_exp': False,\n }\n\n return decode(\n token,\n api_settings.JWT_SECRET_KEY,\n api_settings.JWT_VERIFY,\n options=options,\n leeway=api_settings.JWT_LEEWAY,\n audience=api_settings.JWT_AUDIENCE,\n issuer=api_settings.JWT_ISSUER,\n algorithms=[api_settings.JWT_ALGORITHM]\n )", "title": "" }, { "docid": "44faad042f15c8202f181a5e7bc731cf", "score": "0.6082187", "text": "def decode_jwt(encoded_token, secret, algorithms, identity_claim_key, user_claims_key, csrf_value=..., audience=..., leeway=..., allow_expired=..., issuer=...):\n ...", "title": "" }, { "docid": "f686713cf5bd56100f20e1c24e98c4bf", "score": "0.56426835", "text": "def authenticated(func):\n\n @functools.wraps(func)\n def wrapper(payload):\n try:\n user = jwt.decode(payload['token'], JWT_SECRET_KEY, algorithm='HS256')\n except:\n return Message(\n Codes.FORBIDDEN,\n { 'message': 'A token was not supplied or not valid.' }\n )\n\n del payload['token']\n return func(payload, user)\n\n return wrapper", "title": "" }, { "docid": "6ebce1144d601842052133bf2b6ceb3e", "score": "0.5627261", "text": "def requires_auth(f):\n\n @wraps(f)\n def decorated(*args, **kwargs):\n token = get_token_auth_header()\n jsonurl = urlopen(\"https://\" + AUTH0_DOMAIN +\n \"/.well-known/jwks.json\")\n jwks = json.loads(jsonurl.read())\n unverified_header = jwt.get_unverified_header(token)\n rsa_key = {}\n for key in jwks[\"keys\"]:\n if key[\"kid\"] == unverified_header[\"kid\"]:\n rsa_key = {\n \"kty\": key[\"kty\"],\n \"kid\": key[\"kid\"],\n \"use\": key[\"use\"],\n \"n\": key[\"n\"],\n \"e\": key[\"e\"]\n }\n if rsa_key:\n try:\n payload = jwt.decode(\n token,\n rsa_key,\n algorithms=unverified_header[\"alg\"],\n audience=\"https://api.deephire.io\",\n issuer=\"https://\" + AUTH0_DOMAIN + \"/\")\n except jwt.ExpiredSignatureError:\n return handle_error({\n \"code\": \"token_expired\",\n \"description\": \"token is expired\"\n }, 401)\n except jwt.JWTClaimsError:\n return handle_error({\n \"code\":\n \"invalid_claims\",\n \"description\":\n \"incorrect claims,\"\n \"please check the audience and issuer\"\n }, 401)\n except Exception:\n return handle_error({\n \"code\":\n \"invalid_header\",\n \"description\":\n \"Unable to parse authentication\"\n \"token.\"\n }, 400)\n\n _app_ctx_stack.top.current_user = payload\n return f(*args, **kwargs)\n return handle_error({\n \"code\": \"invalid_header\",\n \"description\": \"Unable to find appropriate key\"\n }, 400)\n\n return decorated", "title": "" }, { "docid": "fa8ea1640ad14c51e7476b6fd448327a", "score": "0.55949736", "text": "def decode_jwt(encoded_token, secret, identity_claim_key,\n csrf_value=None, audience=None, allow_expired=False,\n issuer=None):\n token = encoded_token\n headers = jwt.get_unverified_headers(token)\n kid = headers['kid']\n # search for the kid in the downloaded public keys\n key_index = -1\n for i in range(len(secret)):\n if kid == secret[i]['kid']:\n key_index = i\n break\n if key_index == -1:\n raise JWTDecodeError(\"Invalid key attribute: kid\")\n # construct the public key\n public_key = jwk.construct(secret[key_index])\n # get the last two sections of the token,\n # message and signature (encoded in base64)\n message, encoded_signature = str(token).rsplit('.', 1)\n # decode the signature\n decoded_signature = base64url_decode(encoded_signature.encode('utf-8'))\n # verify the signature\n if not public_key.verify(message.encode(\"utf8\"), decoded_signature):\n raise JWTDecodeError(\"Signature verification failed\")\n # since we passed the verification, we can now safely\n # use the unverified claims\n data = jwt.get_unverified_claims(token)\n if identity_claim_key not in data:\n raise JWTDecodeError(\"Missing claim: {}\".format(identity_claim_key))\n if not allow_expired and time.time() > data['exp']:\n ctx_stack.top.expired_jwt = token\n raise ExpiredSignatureError(\"Token has expired\")\n # check iss\n if 'iss' not in data:\n data['iss'] = None\n if data['iss'] != issuer:\n raise JWTDecodeError(\"Missing or invalid issuer\")\n # check aud if id_token\n if data['token_use'] == 'id':\n if 'aud' not in data:\n data['aud'] = None\n if data['aud'] != audience:\n raise JWTDecodeError(\"Missing or invalid audience\")\n # check clientid if access_token\n if data['token_use'] == 'access':\n if 'client_id' not in data:\n data['client_id'] = None\n if data['client_id'] != audience:\n raise JWTDecodeError(\"Missing or invalid audience\")\n # check csrf\n if csrf_value:\n if 'csrf' not in data:\n raise JWTDecodeError(\"Missing claim: csrf\")\n if not safe_str_cmp(data['csrf'], csrf_value):\n raise CSRFError(\"CSRF double submit tokens do not match\")\n return data", "title": "" }, { "docid": "002536cd237a249eb51adcde35e8d198", "score": "0.5567763", "text": "def require_jwt(function):\n @functools.wraps(function)\n def decorated_function(*args, **kws):\n if not 'Authorization' in request.headers:\n abort(401)\n data = request.headers['Authorization']\n token = str.replace(str(data), 'Bearer ', '')\n try:\n jwt.decode(token, JWT_SECRET, algorithms=['HS256'])\n except: # pylint: disable=bare-except\n abort(401)\n\n return function(*args, **kws)\n return decorated_function", "title": "" }, { "docid": "05fb324aecb2c22c9f57b5210d758b43", "score": "0.54594964", "text": "def jwt_decode_no_verification(token):\n return jwt.decode(\n token,\n settings.GRAPHQL_JWT[\"JWT_SECRET_KEY\"],\n options={\n \"verify_exp\": False,\n \"verify_aud\": False,\n \"verify_signature\": False,\n },\n leeway=10,\n audience=settings.GRAPHQL_JWT[\"JWT_AUDIENCE\"],\n issuer=None,\n algorithms=[settings.GRAPHQL_JWT[\"JWT_ALGORITHM\"]],\n )", "title": "" }, { "docid": "b9cad7f86f1e681b997cacbb862402db", "score": "0.5380515", "text": "def decode_jwt():\n if not 'Authorization' in request.headers:\n abort(401)\n data = request.headers['Authorization']\n token = str.replace(str(data), 'Bearer ', '')\n try:\n data = jwt.decode(token, JWT_SECRET, algorithms=['HS256'])\n except: # pylint: disable=bare-except\n abort(401)\n\n\n response = {'email': data['email'],\n 'exp': data['exp'],\n 'nbf': data['nbf'] }\n return jsonify(**response)", "title": "" }, { "docid": "015d4a0e74bffe3c29875f70aac2b87a", "score": "0.53186584", "text": "def decode_key_loader(self, callback: Callable[..., RT]) -> Callable[..., RT]:\n ...", "title": "" }, { "docid": "2396a154abe3cc04970004a2f8155f60", "score": "0.5309064", "text": "def jwt_get_email_from_payload_handler(payload):\n\treturn payload.get('email')", "title": "" }, { "docid": "e9c9b58ace602d5a2e98e263633594e8", "score": "0.52776724", "text": "def token_required(f):\n @wraps(f)\n def wrap(*args, **kwargs):\n token = request.headers['Authorization']\n\n if not token:\n raise ValidationError({'message': jwt_errors['NO_TOKEN_MSG']}, 401)\n\n try:\n public_key = os.getenv(\"SECRET\")\n decoded_token = jwt.decode(token, public_key)\n current_user = decoded_token[\"id\"]\n except jwt.ExpiredSignatureError:\n return jsonify({'message': jwt_errors['EXPIRED_TOKEN_MSG']}), 401\n except ValueError:\n return jsonify({'message': jwt_errors['SERVER_ERROR_MESSAGE']}), 401\n\n setattr(request, 'decoded_token', decoded_token)\n return f(current_user, *args, **kwargs)\n return wrap", "title": "" }, { "docid": "77cb3d4e220417c3e0ba8a9b5005198b", "score": "0.52163726", "text": "def token_required(f):\n\n @wraps(f)\n def decorator(*args, **kwargs):\n token = None\n if 'x-access-tokens' in request.headers:\n token = request.headers['x-access-tokens']\n if not token:\n return jsonify({'message': 'a valid token is missing'})\n try:\n data = jwt.decode(token, app.config[\"SECRET_KEY\"], algorithms=[\"HS256\"])\n except Exception as e:\n return jsonify({'message': f'token is invalid: {e}'})\n user = Users()\n return f(user.first + '_' + user.last,*args, **kwargs)\n\n return decorator", "title": "" }, { "docid": "27dd460a8f1381336c015546629ed438", "score": "0.52006567", "text": "def jwt_optional(*expected_groups):\n def decorator(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n configs = config\n\n try:\n encoded_token = _get_encoded_token_from_request(configs)\n\n except (NoAuthorizationHeaderError, InvalidAuthorizationHeaderError):\n pass\n\n else:\n decoded_token = _decode_token_and_access_control(\n encoded_token,\n configs,\n 'access',\n expected_groups\n )\n\n if _is_token_in_blacklist(decoded_token):\n _request_ctx_stack.top.jwt = decoded_token\n else:\n abort(403)\n\n return fn(*args, **kwargs)\n return wrapper\n return decorator", "title": "" }, { "docid": "5b1a67ebd96fd2e796da64047c89f075", "score": "0.515275", "text": "def decode_token(token):\n try:\n payload = jwt.decode(token, Config.SECRET_KEY)\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return Raise ('Signature expired. Please log in again.')\n except jwt.InvalidTokenError:\n return Raise ('Invalid token. Please log in again.')", "title": "" }, { "docid": "d7cf867fc92dfe6ad58c6dffe62a5a3d", "score": "0.51283854", "text": "def additional_headers_loader(self, callback: Callable) -> Callable:\n self._jwt_additional_header_callback = callback\n return callback", "title": "" }, { "docid": "cbe79b991aa19f9aca3d084f7314cf73", "score": "0.5112126", "text": "def token_required(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n bearer_token = request.headers.get(\"Authorization\")\n if not bearer_token:\n return jsonify({\"message\": \"Token not set\", \"data\": {}}), 401\n\n token = bearer_token.replace(\"Bearer \", \"\")\n try:\n data = jwt.decode(token, Settings.SECRET_KEY, algorithms=['HS256'])\n user = Users.get_user_from_email(data.get('username'))\n kwargs['user'] = user\n except Exception as e:\n print(f\"Authentication: {str(e)}\")\n return jsonify({\"message\": \"Token is invalid or expired\", \"data\": {}}), 401\n\n return f(*args, **kwargs)\n return decorated", "title": "" }, { "docid": "cedb5bc34041c75a5d46e2776fda5ed3", "score": "0.510839", "text": "def decode_user_token(token):\n if token is None:\n return {}\n\n decoded_token = decode_token(token)\n\n if decoded_token is None:\n raise jwt.InvalidTokenError('Auth token audience cannot be verified.')\n if \"email_verified\" not in decoded_token:\n raise jwt.InvalidIssuerError('Can not retrieve the email_verified property from the token')\n if decoded_token[\"email_verified\"] in ('0', 'False', 'false'):\n raise jwt.InvalidIssuerError('Email of the user has not been validated')\n\n return decoded_token", "title": "" }, { "docid": "c380dd073fdce5c89e3e5afbf4c96042", "score": "0.5079819", "text": "def replace_external_key(decorated_method):\n\n def wrapper(self, key, *args, **kargs):\n if(key == self._external_id):\n key = \"_id\"\n\n return decorated_method(self, key, *args, **kargs)\n\n return wrapper", "title": "" }, { "docid": "782c191bbca06a581a54736e33b4fc8f", "score": "0.5068582", "text": "def decrypt_jwt_token(token):\n try:\n payload = jwt.decode(token, settings.SECRET_KEY)\n except JWTError:\n payload = None\n return payload", "title": "" }, { "docid": "fac990c1899c60530fc86aadb9ff3971", "score": "0.5046674", "text": "def _jwt_from_infodict(torrent_config, infodict):\n digest = hashlib.sha1()\n digest.update(bencode.bencode(infodict))\n return jwt_from_infohash(torrent_config, digest.digest())", "title": "" }, { "docid": "225b819103b7e1a30a44fc6e72bc8d42", "score": "0.50300044", "text": "def claims_verification_loader(\n self, callback: Callable[..., RT]\n ) -> Callable[..., RT]:\n ...", "title": "" }, { "docid": "117fbee97efa8a07aa6a1c9513afcf86", "score": "0.50166714", "text": "def process_jwt_payload(payload, key):\n try:\n return JWT(\n key=key,\n jwt=payload,\n check_claims={\"exp\": None}, # Weird syntax but it says\n # \"Check expiration time\"\n algs=[\"ES256\", \"ES384\", \"ES521\", \"RS256\", \"RS384\", \"RS512\", \"PS256\", \"PS384\", \"PS512\"],\n )\n except InvalidJWSObject:\n raise InvalidPayload()\n except JWTExpired:\n raise ExpiredToken()\n except JWTMissingKey:\n raise MissingKey()\n except InvalidJWSSignature:\n raise InvalidSignature()", "title": "" }, { "docid": "a3483d6cdd86e4aec8cb57c88bbfd8d0", "score": "0.5009627", "text": "def jwt_response_payload_handler(token, user=None, request=None):\n return {\n 'token': token\n }", "title": "" }, { "docid": "b67cd9eece1a0c100ee5b95f82936cd1", "score": "0.50089437", "text": "def test_jwt_requies_jwt_no_aud_token_no_identity(self, live_testapp_no_identity):\n with mock.patch('flask_jwt_consumer.decorators.get_jwt_raw',\n return_value=no_aud_token):\n with mock.patch('flask_jwt_consumer.decorators._brute_force_key',\n return_value=JWT_PUBLIC_KEY):\n protected = requires_jwt(identity)\n assert protected('De nada') == 'De nada'", "title": "" }, { "docid": "5328213db3b263b404dd215ab8817169", "score": "0.50021714", "text": "def custom_jwt_payload_handler(user):\n return {\n 'user_id': user.pk,\n 'email': user.email,\n 'is_superuser': user.is_superuser,\n 'username': user.username,\n 'exp': datetime.utcnow() + api_settings.JWT_EXPIRATION_DELTA,\n 'orig_iat': timegm(\n datetime.utcnow().utctimetuple()\n )\n }", "title": "" }, { "docid": "f60747cf672d4ed62cb7bafb0a9c5a3a", "score": "0.49858454", "text": "def token_needed(func):\n @wraps(func)\n def decorated(*args, **kwargs):\n # check headers for token\n token = request.headers.get(\"Token\", None)\n if not token:\n data = {\"message\": \"Please provide authentication token\"}\n return jsonify(data), 401\n\n # Decode token\n try:\n data = jwt.decode(token, app.config[\"SECRET_KEY\"], \"HS256\")\n current_user = Users.query.filter(Users.id == data[\"user_id\"]).first() # noqa E501\n\n except Exception:\n data = {\"message\": \"Invalid token\"}\n return jsonify(data), 401\n return func(current_user, *args, **kwargs)\n return decorated", "title": "" }, { "docid": "732f3248705f668a5e16ea65ef8b684d", "score": "0.49821508", "text": "def decode_token(token: str, verify: bool = True) -> dict:\n\n try:\n return jwt.decode(\n jwt=token,\n key=settings.JWT_AUTH_SETTINGS[\"PUBLIC_KEY\"],\n algorithms=[settings.JWT_AUTH_SETTINGS[\"ALGORITHM\"]],\n issuer=settings.JWT_AUTH_SETTINGS[\"ISSUER\"],\n audience=settings.JWT_AUTH_SETTINGS[\"AUDIENCE\"],\n verify=verify,\n )\n except jwt.exceptions.InvalidSignatureError:\n raise jwt.exceptions.InvalidTokenError(\n \"The token signature could not be verified.\"\n )\n except jwt.exceptions.ExpiredSignatureError:\n raise jwt.exceptions.InvalidTokenError(\"Expired token.\")\n except jwt.exceptions.InvalidAudienceError:\n raise jwt.exceptions.InvalidTokenError(\"Invalid audience.\")\n except jwt.exceptions.InvalidIssuerError:\n raise jwt.exceptions.InvalidTokenError(\"Invalid issuer.\")\n except jwt.exceptions.InvalidTokenError:\n raise jwt.exceptions.InvalidTokenError(\"Invalid token.\")", "title": "" }, { "docid": "f1db032f03099e558230dca5816ab5ac", "score": "0.4974487", "text": "def api_requires_auth(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n\n logger.info('api_requires_auth() %s' % (request.method))\n\n access_token = get_access_token()\n logger.info('api_requires_auth() have token: ' + access_token)\n\n try:\n unverified_header = jwt.get_unverified_header(access_token)\n except jwt.exceptions.DecodeError as e:\n logger.error('api_requires_auth() decode error ' + e.__str__())\n raise\n except jwt.exceptions.InvalidTokenError:\n logger.error('api_requires_auth() invalid header')\n raise AuthError(\n {\"code\": \"invalid_header\",\n \"description\": \"Invalid header. Use RS256 signed JWT\"},\n 401)\n if unverified_header[\"alg\"] == \"HS256\":\n logger.error('api_requires_auth() invalid header alg')\n raise AuthError(\n {\"code\": \"invalid_header\",\n \"description\": \"Invalid header. \"\n \"Use an RS256 signed JWT Access Token\"},\n 401)\n\n # fetch the well known keys\n url = str(\"https://%s/.well-known/jwks.json\"\n % (auth_config['SPA']['domain']))\n jsonurl = urlopen(url)\n logger.info('api_requires_auth() fetched well known keys from: ' + url)\n data = jsonurl.read()\n jwks = json.loads(data.decode('utf8'))\n logger.info('api_requires_auth() jwks: ' + data.decode('utf8'))\n\n for key in jwks[\"keys\"]:\n if key[\"kid\"] == unverified_header[\"kid\"]:\n\n logger.info('api_requires_auth() key id matched')\n rsa_key = {\n \"kty\": key[\"kty\"],\n \"kid\": key[\"kid\"],\n \"use\": key[\"use\"],\n \"n\": key[\"n\"],\n \"e\": key[\"e\"]\n }\n break\n\n if rsa_key:\n try:\n logger.info(\"api_requires_auth() with rsa key for token '%s'\"\n % (access_token))\n payload = jwt.decode(\n access_token,\n key=rsa_key,\n algorithms=[\"RS256\"],\n audience=auth_config['SPA']['audience'],\n issuer=\"https://\" + auth_config['SPA']['domain'] + \"/\"\n )\n\n logger.info('api_requires_auth() key payload decoded')\n\n except jwt.exceptions.ExpiredSignatureError:\n logger.error('api_requires_auth() expired signature')\n raise AuthError({\"code\": \"token_expired\",\n \"description\": \"token is expired\"}, 401)\n except jwt.exceptions.JWTClaimsError:\n logger.error('api_requires_auth() invalid claims')\n raise AuthError({\"code\": \"invalid_claims\",\n \"description\": \"incorrect claims,\"\n \" please check the audience and issuer\"},\n 401)\n except Exception as e:\n logger.error('api_requires_auth() exception')\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Unable to parse authentication\"\n \" token. \" + e.__str__()}, 401)\n\n logger.info('api_requires_auth() all good!\\n%s' % (payload))\n # _request_ctx_stack.top.current_user = payload\n g.authd_user = copy.deepcopy(payload)\n return f(*args, **kwargs)\n\n logger.error('api_requires_auth() no appropriate key')\n\n raise AuthError({\"code\": \"invalid_header\",\n \"description\": \"Unable to find appropriate key\"}, 401)\n\n return decorated", "title": "" }, { "docid": "ff79634f06c279117330ece2e9b5d9b9", "score": "0.4962174", "text": "def jwt_get_username_from_payload_handler(payload):\n return payload['user_data'].get('email')", "title": "" }, { "docid": "bf60c42dcf8e419b268c7e9774d50f04", "score": "0.49545953", "text": "def requires_jwt(endpoint):\n\n @functools.wraps(endpoint)\n def check_auth_call(*args, **kwargs):\n token = request.headers.get(\"Authorization\")\n\n # check token is present\n if not token:\n return jsonify({\"error\": \"No token\"}), 401\n\n token_type, token = token.split(\" \")\n\n if token_type.lower() != \"bearer\":\n return jsonify({\"error\": \"Wrong token type\"}), 401\n\n try:\n jwt.decode(token, SECRET, audience=SERVERNAME, algorithms=[\"HS256\"])\n except Exception:\n return jsonify({\"error\": \"Invalid token\"}), 401\n\n return endpoint(*args, **kwargs)\n\n return check_auth_call", "title": "" }, { "docid": "e5257708f08ab4f3133436d3ffdef23a", "score": "0.4951728", "text": "def requires_auth(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n token = get_token_auth_header()\n if token != \"DUMMY_TOKEN\":\n raise AuthError({\"code\": \"invalid_header\", \"description\": \"Unable to find appropriate key\"}, 400)\n return f(*args, **kwargs)\n\n return decorated", "title": "" }, { "docid": "e075c249ba06f59fd38f0a877ce5ca46", "score": "0.49419388", "text": "def jwt_required(*expected_groups):\n def decorator(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n configs = config\n\n try:\n encoded_token = _get_encoded_token_from_request(configs)\n\n except NoAuthorizationHeaderError:\n abort(400)\n\n except InvalidAuthorizationHeaderError:\n abort(422)\n\n else:\n decoded_token = _decode_token_and_access_control(\n encoded_token,\n configs,\n 'access',\n expected_groups\n )\n\n if _is_token_in_blacklist(decoded_token):\n _request_ctx_stack.top.jwt = decoded_token\n else:\n abort(403)\n\n return fn(*args, **kwargs)\n return wrapper\n return decorator", "title": "" }, { "docid": "7ad94227dd26eb583e7aeb9d4d648f82", "score": "0.4939828", "text": "def _get_key_id_from_jwt_header(a_jwt):\n header = jwt.get_unverified_header(a_jwt)\n return KeyIdentifier(header['kid'])", "title": "" }, { "docid": "8f68a3c11db1a8697a522aeb151ec8bd", "score": "0.49362534", "text": "def decode(token, certs=None, verify=True, audience=None):\n\n return jwt.decode(token, certs, verify, audience)", "title": "" }, { "docid": "04ac2ae83a0b2fa5687c3c41f6f83668", "score": "0.4921669", "text": "def encode_key_loader(self, callback: Callable[..., RT]) -> Callable[..., RT]:\n ...", "title": "" }, { "docid": "e67ef205bb1e9a1e4526b699773d6acc", "score": "0.49112794", "text": "def jwt_output(profile=DEFAULT_PROFILE_NAME, debug=None):\n # type:(str, str)->Callable\n\n def wrapper_function(f):\n @inject(config=Config)\n def get_config(config):\n # type: (Config)->Config\n return config\n\n @wraps(f)\n def wrapper(*args, **kwargs):\n payload = f(*args, **kwargs)\n d = {'token': encode(payload, profile=profile)}\n if debug or (debug is None and get_config().get('DEBUG', False)):\n d['payload'] = payload\n return d\n\n return wrapper\n\n return wrapper_function", "title": "" }, { "docid": "15b57879181a03b5a0b3794e813ca71e", "score": "0.49098518", "text": "def decode_auth_token(auth_token: str) -> Optional[str]:\n try:\n payload = jwt.decode(jwt=auth_token, key=current_app.config.get('SECRET_KEY'), algorithms=['HS256'])\n return payload.get('sub')\n\n except jwt.ExpiredSignatureError:\n return None\n except jwt.InvalidTokenError:\n return None", "title": "" }, { "docid": "5e5c62ef95aeecf342dff2786cb6c5f9", "score": "0.48992422", "text": "def decode_token(serialized_token, public_key,\n required_claims=None, audience=None):\n if required_claims is None:\n required_claims = []\n\n required_claims = set(required_claims).union(_DEFAULT_CLAIMS)\n payload = jwt.decode(serialized_token, public_key,\n audience=audience,\n algorithms=[u'RS256'])\n for claim in required_claims:\n if payload.get(claim) is None:\n raise MissingRequiredClaimError(claim)\n\n return payload", "title": "" }, { "docid": "2c4ac1944450c04ecab060f089f8e30b", "score": "0.48898667", "text": "def decoded_and_verified_token(encoded_token):\n # Modified from\n # https://developers.google.com/identity/sign-in/web/backend-auth\n try:\n # Client ID of the Google API (found in Developer Console)\n\n # Raises ValueError if token is invalid\n decoded_token = id_token.verify_oauth2_token(\n encoded_token, requests.Request())\n\n if decoded_token[\"aud\"] not in [client_ids]:\n raise ValueError(\"Could not verify audience (API client ID)\")\n\n if decoded_token[\"hd\"] not in authorized_email_domains:\n raise ValueError(\"Domain of token not authorized\")\n\n if decoded_token[\"iss\"] not in [\n \"accounts.google.com\", \"https://accounts.google.com\"]:\n raise ValueError(\"Token not issued by Google\")\n\n except ValueError:\n return None\n\n return decoded_token", "title": "" }, { "docid": "59c7aee00e9d5a9d03cc1187c08a8e4b", "score": "0.4889234", "text": "def jwt_decode(text):\n parts = text.split('.')\n ret = global_vars.JWT_HEADER_TAG+ unicode(base64.urlsafe_b64decode(str(parts[0]+'===')), errors='ignore')\n ret += global_vars.JWT_PAYLOAD_TAG + unicode(base64.urlsafe_b64decode(str(parts[1]+'===')), errors='ignore')\n return ret", "title": "" }, { "docid": "54c87e2e6500f5309a0de39a220610f1", "score": "0.4889112", "text": "def get_unverified_header(self, token):\n del token\n return {\"alg\": \"RS256\"}", "title": "" }, { "docid": "e887e2ffe9ceb4127af7af476387d22d", "score": "0.48808736", "text": "def rotate_JWT(func):\n def decorated_function(self, request, *args, **kwargs):\n self.headers = {'Authorization': request.META['HTTP_AUTHORIZATION']}\n if 'headers' in request.META and 'refresh' in request.META['headers']:\n raw_token = request.META['headers']['refresh'].split(\" \")[-1]\n\n token_serializer = TokenRefreshSerializer(data={'refresh': raw_token})\n token_serializer.is_valid(raise_exception=True)\n access = token_serializer.validated_data['access']\n refresh = token_serializer.validated_data['refresh']\n\n self.headers['Refresh'] = settings.JWT_HEADER_TYPE + refresh\n self.headers['Authorization'] = settings.JWT_HEADER_TYPE + access\n\n return func(self, request, *args, **kwargs)\n return decorated_function", "title": "" }, { "docid": "9ce581b322269bc927a19f03b8761375", "score": "0.48788622", "text": "def get_bad_token():\n return \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyIjp7InVzZXJu\" \\\n \"YW1lIjoiU3VwcGx5IENoYWluIE1hbmFnZXIgKG1kcTNRMEZDYlEpIiwiZ\" \\\n \"GVtb0lkIjoiN2U3ZjQ1ZTA1ZTQyNTFiNWFjZDBiMTlmYTRlZDI5OTIiLC\" \\\n \"JlbWFpbCI6ImNocmlzLm1kcTNRMEZDYlFAYWNtZS5jb20iLCJyb2xlcyI\" \\\n \"6W3siY3JlYXRlZCI6IjIwMTYtMDYtMDFUMTE6MTU6MzQuNTE1WiIsImlk\" \\\n \"IjoiM2RlZjE0MzZlYjUxZTQzOWU3ZmI1MDA5ZmVjM2EwZWIiLCJtb2RpZ\" \\\n \"mllZCI6IjIwMTYtMDYtMDFUMTE6MTU6MzQuNTE1WiIsIm5hbWUiOiJzdX\" \\\n \"BwbHljaGFpbm1hbmFnZXIifV0sImlkIjoiN2U3ZjQ1ZTA1ZTQyNTFiNWF\" \\\n \"jZDBiMTlmYTRlZDQ3OTAifSwiZXhwIjoxNDY2MDQ1MzczLCJsb29wYmFj\" \\\n \"a190b2tlbiI6ImhFRnJzeGhSa3lBUEhQWWN0TWtEaE9mSTZOaDY5TlBzc\" \\\n \"FhkRWhxWXVSTzBqZDBLem1HVkZFbnpRZVRwVTV2N28ifQ.I8_iqpK7pwY\" \\\n \"5mmND220MhnsMDS5FtqRhtliEiXoMAGM\"", "title": "" }, { "docid": "8d8b9319aa4d6d30e7ec3daca452704e", "score": "0.48778754", "text": "def decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, app.config.get('JWT_SECRET_KEY'))\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'", "title": "" }, { "docid": "ba95d8ac645f6443138eb431c97fea84", "score": "0.48718378", "text": "def decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, app.config.get('SECRET_KEY'))\n is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)\n if is_blacklisted_token:\n return 'Token blacklisted. Please log in again.'\n else:\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'", "title": "" }, { "docid": "f5a64f9dc8835b63db93e8371f166161", "score": "0.48691732", "text": "def decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, config.SECRET_KEY, algorithms=['HS256'])\n return payload\n except jwt.ExpiredSignatureError as e1:\n raise e1\n except jwt.InvalidTokenError as e2:\n raise e2", "title": "" }, { "docid": "263f2ca6a7cd028cba8eba0993623520", "score": "0.4866307", "text": "def decode_token(token):\n if token.startswith('Bearer '):\n _, token = token.split(' ', 1)\n\n pub_keys = fetch_public_keys(current_app)\n\n for pub_key in pub_keys:\n try:\n pub_key = pub_key.get(\"key\", \"\")\n decoded_token = jwt.decode(token, pub_key, algorithms=['RS256'])\n except jwt.InvalidTokenError:\n current_app.logger.error(\"Auth token couldn't be decoded for public key: {}\"\n .format(pub_key))\n decoded_token = None\n\n if decoded_token:\n break\n\n if not decoded_token:\n raise jwt.InvalidTokenError('Auth token cannot be verified.')\n\n return decoded_token", "title": "" }, { "docid": "bbd292db5f21719f05584b31b4463106", "score": "0.48634", "text": "def decode_jwt(encoded_token, secret, algorithms, identity_claim_key,\n user_claims_key, csrf_value=None, audience=None,\n leeway=0, allow_expired=False, issuer=None):\n options = {}\n if allow_expired:\n options['verify_exp'] = False\n\n # This call verifies the ext, iat, nbf, and aud claims\n data = jwt.decode(encoded_token, secret, algorithms=algorithms, audience=audience,\n leeway=leeway, options=options, issuer=issuer)\n\n # Make sure that any custom claims we expect in the token are present\n if 'jti' not in data:\n data['jti'] = None\n if identity_claim_key not in data:\n raise JWTDecodeError(\"Missing claim: {}\".format(identity_claim_key))\n if 'type' not in data:\n data['type'] = 'access'\n if data['type'] not in ('refresh', 'access'):\n raise JWTDecodeError(\"Missing or invalid claim: type\")\n if data['type'] == 'access':\n if 'fresh' not in data:\n data['fresh'] = False\n if user_claims_key not in data:\n data[user_claims_key] = {}\n if csrf_value:\n if 'csrf' not in data:\n raise JWTDecodeError(\"Missing claim: csrf\")\n if not safe_str_cmp(data['csrf'], csrf_value):\n raise CSRFError(\"CSRF double submit tokens do not match\")\n return data", "title": "" }, { "docid": "8ff0d252645fd46d56043aa8dc1f2c0c", "score": "0.48594368", "text": "def token_required(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n token = None\n if 'x-access-token' in request.headers:\n token = request.headers['x-access-token']\n if not token:\n return response_fn(401, \"error\", \"Token is missing\")\n try:\n data = jwt.decode(token, KEY, algorithms='HS256')\n query = \"\"\"\n SELECT email, id, isAdmin FROM users\n WHERE users.email = '{}'\"\"\".format(data['email'])\n\n user = select_data_from_db(query)\n\n except:\n return response_fn(401, \"error\", \"Token is expired or invalid\")\n\n return f(user, *args, **kwargs)\n return decorated", "title": "" }, { "docid": "fc280c1e0f41285c8901b68bd34f97a5", "score": "0.48559135", "text": "def decode_token():\n token = request.headers.get('Authorization')\n if token is None:\n return {}\n\n if token.startswith('Bearer '):\n _, token = token.split(' ', 1)\n\n pub_key = fetch_public_key(current_app)\n audiences = configuration.BAYESIAN_JWT_AUDIENCE.split(',')\n\n decoded_token = None\n for aud in audiences:\n try:\n # check if the token can be decoded using the provided publick key\n decoded_token = jwt.decode(token.encode('ascii'), pub_key, algorithm='RS256',\n audience=aud)\n except jwt.InvalidTokenError:\n current_app.logger.error('Auth Token could not be decoded for audience {}'.format(aud))\n decoded_token = None\n\n if decoded_token is not None:\n break\n\n if decoded_token is None:\n raise jwt.InvalidTokenError('Auth token audience cannot be verified.')\n\n return decoded_token", "title": "" }, { "docid": "e2c01a8067deb6587dd162304c318e94", "score": "0.48549703", "text": "def decode_auth_token(token):\n secret = os.getenv('SECRET_KEY')\n\n try:\n payload = jwt.decode(token, secret)\n is_token_blacklisted = BlackListToken.check_blacklist(token)\n if is_token_blacklisted:\n return 'Token was Blacklisted, Please login In'\n return payload\n except jwt.ExpiredSignatureError:\n return 'Signature expired, Please sign in again'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please sign in again'", "title": "" }, { "docid": "aa535611449ffbbb852465f6de2dee80", "score": "0.48413685", "text": "def jwt_response_payload_handler(token, user=None, request=None):\n return {\n 'token': token,\n 'exp-time': datetime.utcnow() + api_settings.JWT_EXPIRATION_DELTA\n }", "title": "" }, { "docid": "5c5d8a2f62766cfc1bc67bfcc31e8d5e", "score": "0.4838224", "text": "def verify_self_signed_signature(config):\n\n payload = unverified_entity_statement(config)\n keyjar = KeyJar()\n keyjar.import_jwks(payload['jwks'], payload['iss'])\n\n _jwt = JWT(key_jar=keyjar)\n _val = _jwt.unpack(config)\n return _val", "title": "" }, { "docid": "feeeb29f0084e44be01e0c7d16c2414d", "score": "0.48344132", "text": "def decode_auth_token(token):\n try:\n payload = jwt.decode(token, app.config['SECRET_KEY'], algorithms='HS256')\n is_token_blacklisted = BlackListToken.check_blacklist(token)\n if is_token_blacklisted:\n return 'Token was Blacklisted, Please login In'\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return 'Signature expired, Please sign in again'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please sign in again'", "title": "" }, { "docid": "bddad0a7ee40f660e3f60b330427df06", "score": "0.48289186", "text": "def decode_user_jwt(self, token, options={}):\n # Find the key to use.\n headers = jwt.get_unverified_header(token)\n kid = headers.get(\"kid\", None)\n if kid is None:\n raise InvalidTokenError(\"Missing `kid` header\")\n\n logger.debug(\n \"Using key `%s`, attempting to decode token `%s` with aud `%s` and iss `%s`\",\n kid,\n token,\n self.client_id(),\n self._issuer,\n )\n\n key = \"\"\n if options.get(\"verify_signature\", True):\n key = self._get_public_key(kid)\n\n try:\n return decode(\n token,\n key,\n algorithms=ALLOWED_ALGORITHMS,\n audience=self.client_id(),\n issuer=self._issuer,\n leeway=JWT_CLOCK_SKEW_SECONDS,\n options=dict(require=[\"iat\", \"exp\"], **options),\n )\n except InvalidTokenError as ite:\n logger.warning(\n \"Could not decode token `%s` for OIDC: %s. Will attempt again after \"\n + \"retrieving public keys.\",\n token,\n ite,\n )\n\n # Public key may have expired. Try to retrieve an updated public key and use it to decode.\n try:\n return decode(\n token,\n self._get_public_key(kid, force_refresh=True),\n algorithms=ALLOWED_ALGORITHMS,\n audience=self.client_id(),\n issuer=self._issuer,\n leeway=JWT_CLOCK_SKEW_SECONDS,\n options=dict(require=[\"iat\", \"exp\"], **options),\n )\n except InvalidTokenError as ite:\n logger.warning(\n \"Could not decode token `%s` for OIDC: %s. Attempted again after \"\n + \"retrieving public keys.\",\n token,\n ite,\n )\n\n # Decode again with verify_signature=False, and log the decoded token to allow for easier debugging.\n nonverified = decode(\n token,\n self._get_public_key(kid, force_refresh=True),\n algorithms=ALLOWED_ALGORITHMS,\n audience=self.client_id(),\n issuer=self._issuer,\n leeway=JWT_CLOCK_SKEW_SECONDS,\n options=dict(require=[\"iat\", \"exp\"], verify_signature=False, **options),\n )\n logger.debug(\"Got an error when trying to verify OIDC JWT: %s\", nonverified)\n raise ite", "title": "" }, { "docid": "06c43e319d68421365d2a46ba8fdcdcd", "score": "0.4818566", "text": "def token_required(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n token = None\n\n if 'Authorization' in request.headers:\n '''token = request.headers['Authorization']\n Pass token to the header\n '''\n token = request.headers.get('Authorization')\n\n if not token:\n return jsonify({\"message\": \"Token missing\"})\n\n\n try:\n data = jwt.decode(token, JWT_SECRET, algorithms=[JWT_ALGORITHM])\n\n sql = \"SELECT username, password, id FROM mydiary_users WHERE id=%s\" % (data['id'])\n db_connection.cursor.execute(sql)\n current_user = db_connection.cursor.fetchone()\n except Exception as ex:\n return jsonify({\"Bad token message\": str(ex)})\n\n return f(current_user, *args, **kwargs)\n return decorated", "title": "" }, { "docid": "3a1fb21883ebc592d11066c725dfdb79", "score": "0.48181623", "text": "def jwt_generated_token_private_key(self) -> Optional[str]:\r\n pass", "title": "" }, { "docid": "fb12d9788b4474b5c294528dba577970", "score": "0.48132452", "text": "def decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, os.getenv('SECRET_KEY'))\n return payload['sub']\n except jwt.ExpiredSignatureError:\n abort(403, 'Signature expired. Please log in again.')\n except jwt.InvalidTokenError:\n abort(403, 'Invalid token. Please log in again.')", "title": "" }, { "docid": "50cb493b7dd88f276e9f40923ada2c97", "score": "0.48112884", "text": "def decode_token(token):\n try:\n # Try to decode the token using our SECRET variable\n payload = jwt.decode(token, 'hard to guess string')\n return payload['sub']\n except jwt.ExpiredSignatureError:\n # The token is expired, return an error string\n return 'Expired token. Please login to get a new token'\n except jwt.InvalidTokenError:\n # The token is invalid, return an error string\n return 'Invalid token. Please register or login'", "title": "" }, { "docid": "d1e1b3ae0d4d375c48225b84d6e98086", "score": "0.4808102", "text": "def jwt_decode(token):\n return jwt.decode(\n token,\n settings.GRAPHQL_JWT[\"JWT_SECRET_KEY\"],\n options={\n \"verify_exp\": settings.GRAPHQL_JWT[\"JWT_VERIFY_EXPIRATION\"],\n \"verify_aud\": settings.GRAPHQL_JWT[\"JWT_AUDIENCE\"],\n \"verify_signature\": settings.GRAPHQL_JWT[\"JWT_VERIFY\"],\n },\n leeway=10,\n audience=settings.GRAPHQL_JWT[\"JWT_AUDIENCE\"],\n issuer=None,\n algorithms=[settings.GRAPHQL_JWT[\"JWT_ALGORITHM\"]],\n )", "title": "" }, { "docid": "2e6652410c88e64800968b1718539401", "score": "0.4802989", "text": "def _decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, current_app.config.get('SECRET_KEY'))\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return {'message': 'SIGNATURE EXPIRED'}\n except jwt.InvalidTokenError:\n return {'message': 'INVALID TOKEN'}", "title": "" }, { "docid": "34629277c7b788b8376fad1dd94ab053", "score": "0.4798056", "text": "def validate_api_key():\n\n def decorator(view):\n def wrapper(request, *args, **kwargs):\n return _check_auth(request, view, *args, **kwargs)\n return wrapper\n\n return decorator", "title": "" }, { "docid": "01fdb5ee02f9178087367bda1f78faac", "score": "0.4782678", "text": "def jwt_response_payload_handler(token, user=None, request=None):\n\treturn {\n\t\t'token': token\n\t}", "title": "" }, { "docid": "0a3e5ee89035f41bafbe3c0721115adb", "score": "0.47795203", "text": "def validate_token(token, secret, default=_NO_DEFAULT, **kwargs):\n _assert_stringy(secret=secret)\n kwargs.setdefault('algorithms', [JWT_ALGO[0]])\n try:\n payload = jwt.decode(token, secret, **kwargs)\n return payload\n\n except jwt.exceptions.PyJWTError:\n if default is _NO_DEFAULT:\n raise\n\n return default", "title": "" }, { "docid": "06477d5f41aa149bff1a088550b7b676", "score": "0.47781664", "text": "def token_verification_loader(self, callback: Callable) -> Callable:\n self._token_verification_callback = callback\n return callback", "title": "" }, { "docid": "57a82a7ffedb2dceba737a54c024b159", "score": "0.47628316", "text": "def decode_auth_token(auth_token):\r\n try:\r\n payload = jwt.decode(auth_token, DB_SECRET_KEY)\r\n return payload['sub']\r\n except jwt.ExpiredSignatureError:\r\n return 'Signature expired. Please log in again.'\r\n except jwt.InvalidTokenError:\r\n return 'Invalid token. Please log in again.'", "title": "" }, { "docid": "441cdb0048645ecd6c30bf2d84924a60", "score": "0.47553313", "text": "def decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, key)\n is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)\n if is_blacklisted_token:\n return 'Token blacklisted. Please log in again.'\n else:\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'", "title": "" }, { "docid": "c1c520126e327ddfad41b3ef0c481f10", "score": "0.47550124", "text": "def claims_verification_failed_loader(\n self, callback: Callable[..., RT]\n ) -> Callable[..., RT]:\n ...", "title": "" }, { "docid": "538f4e504e77331a43cb6ade190fa288", "score": "0.4754924", "text": "def decode_auth_token(auth_token: str, secret_key: str = \"notasecret\") -> str:\n try:\n payload = jwt.decode(auth_token, secret_key)\n return payload['sub']\n except jwt.ExpiredSignatureError:\n payload = jwt.decode(auth_token, secret_key, verify=False)\n return TOKEN_EXPIRED, payload['sub']\n except jwt.InvalidTokenError:\n return INVALID_TOKEN", "title": "" }, { "docid": "72694880e29ddb69a9ca29fc3f7a0e92", "score": "0.4750859", "text": "def decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, key)\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return 'ERROR: Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'ERROR: Invalid token. Please log in again.'", "title": "" }, { "docid": "92e75dda1353aaa8df11f1c492405a8d", "score": "0.47440428", "text": "def encode_token(claims, private_key):\n return jwt.encode(claims, private_key, algorithm=u'RS256')", "title": "" }, { "docid": "2b607599a5909749e6ae9ce100d927e0", "score": "0.47439227", "text": "def verify(token, access_token=None):\n # get the key id from the header, locate it in the cognito keys\n # and verify the key\n jwks = well_known_jwks()\n\n header = jwt.get_unverified_header(token)\n key = [k for k in jwks if k[\"kid\"] == header['kid']][0]\n id_token = jwt.decode(token, key, audience=blueprint.config['AWS_COGNITO_CLIENT_ID'], access_token=access_token)\n return id_token", "title": "" }, { "docid": "1b03e5e9c7084395eb774b0c8291a981", "score": "0.47381476", "text": "def decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, os.getenv('SECRET'))\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'", "title": "" }, { "docid": "dabf89fc633b0fb7366dfaaede53bb88", "score": "0.47351003", "text": "def parse_token(token: str, refetch_public_key_on_failure: bool = False) -> Any:\n try:\n public_key = get_public_key()\n return jwt.decode(token, public_key, algorithms=[\"RS256\"])\n except jwt.ExpiredSignatureError:\n raise ValueError(\"The token has expired.\")\n except Exception:\n if refetch_public_key_on_failure:\n update_public_key()\n return parse_token(token, refetch_public_key_on_failure=False)\n else:\n raise ValueError(\"Invalid token.\")", "title": "" }, { "docid": "539721a22342d38bd3e5ab86ee926eac", "score": "0.47344518", "text": "def decode_token(token):\n try:\n payload = jwt.decode(token, str(current_app.config.get('SECRET')))\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return \"Expired token. Please log in to get a new token\"\n except jwt.InvalidTokenError:\n return \"Invalid token. Please register or login\"", "title": "" }, { "docid": "d930145264415c5759e514329bd1e087", "score": "0.47323814", "text": "def decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, key, algorithms='HS256')\n is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)\n if is_blacklisted_token:\n return 'Token blacklisted. Please log in again.'\n else:\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'", "title": "" }, { "docid": "cc15f066ad9c2a74b0860d3564ff8d0d", "score": "0.4728266", "text": "def jwt_refresh_token_required(*expected_groups):\n def decorator(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n configs = config\n\n try:\n encoded_token = _get_encoded_token_from_request(configs)\n\n except NoAuthorizationHeaderError:\n abort(400)\n\n except InvalidAuthorizationHeaderError:\n abort(422)\n\n else:\n decoded_token = _decode_token_and_access_control(\n encoded_token,\n configs,\n 'refresh',\n expected_groups\n )\n\n if _is_token_in_blacklist(decoded_token):\n _request_ctx_stack.top.jwt = decoded_token\n else:\n abort(403)\n\n return fn(*args, **kwargs)\n return wrapper\n return decorator", "title": "" }, { "docid": "ce7a04b5b20109deaad625b7d57b9323", "score": "0.47261345", "text": "def decode_id_token(self, id_token):\n if not id_token:\n raise AuthFailed(self, 'Missing id_token parameter')\n\n try:\n kid = jwt.get_unverified_header(id_token).get('kid')\n public_key = RSAAlgorithm.from_jwk(self.get_apple_jwk(kid))\n decoded = jwt.decode(\n id_token,\n key=public_key,\n audience=self.get_audience(),\n algorithms=['RS256'],\n )\n except PyJWTError:\n raise AuthFailed(self, 'Token validation failed')\n\n return decoded", "title": "" }, { "docid": "9b6768008d95d9e95394ee19440238c3", "score": "0.47256038", "text": "def login_key(func):\n def new_func(*args, **kwargs):\n if 'login_key' in kwargs:\n login_key = kwargs['login_key']\n request = args[0]\n \n if login_key is not None:\n result = key_login(request, login_key)\n \n if result['error']:\n # Ditch out of the view with 401 Unauthorized\n return HttpResponse(content=\"Bad login key.\",\n status=401)\n \n del kwargs['login_key']\n\n return func(*args, **kwargs)\n return new_func", "title": "" }, { "docid": "189b7f0c0fbaeb6e4ca37f05a446ad35", "score": "0.4725391", "text": "def jwt_get_user_id_from_payload_handler(payload):\n\twarnings.warn(\n\t\t'The following will be removed in the future. '\n\t\t'Use `JWT_PAYLOAD_GET_USERNAME_HANDLER` instead.',\n\t\tDeprecationWarning\n\t)\n\n\treturn payload.get('user_id')", "title": "" }, { "docid": "759a51d7521d3fc0f67827b11f3770fa", "score": "0.4721357", "text": "def verify(\n self, payload: bytes, footer: bytes = b\"\", implicit_assertion: bytes = b\"\"\n ) -> bytes:\n raise NotSupportedError(\"A key for local does not have verify().\")", "title": "" }, { "docid": "c88908a0aea14e226aceb8599ffa0ef0", "score": "0.47052684", "text": "def authenticate(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n response_object = {\n 'status': 'fail',\n 'message': 'Please provide a valid auth token'\n }\n\n # get auth token\n post_data = request.get_json()\n auth_header = post_data.get('auth_token')\n eth_address = post_data.get('eth_address')\n\n # print()\n # print(\"=================================\")\n # print(\"auth_header: \", auth_header)\n # print(\"eth_address: \", eth_address)\n\n if not auth_header:\n return jsonify(response_object), 403\n\n if not eth_address:\n response_object = 'Please provide eth address'\n return jsonify(response_object), 401\n\n auth_token = auth_header.split(\" \")[0]\n eth_address = eth_address.split(\" \")[0]\n\n resp = User.decode_auth_token(auth_token)\n\n if isinstance(resp, str):\n response_object['message'] = resp\n return jsonify(response_object), 401\n\n user = User.query.filter_by(id=resp).first()\n\n if not user or not user.active:\n return jsonify(response_object), 401\n\n if user.eth_address != eth_address.strip().lower():\n return jsonify(response_object), 401\n\n return f(resp, post_data, *args, **kwargs)\n return decorated_function", "title": "" }, { "docid": "4dca47e0eea833827ae27540e95e83f2", "score": "0.46993557", "text": "def jwt_required(func):\n\n # apply the decorator\n @wraps(func)\n def wrapper(*args, **kwargs):\n print('config')\n print(current_app.config['TESTING'])\n if current_app.config['TESTING']:\n # skip for test suite\n auth_required_func = func\n else:\n # use original jwt_required function\n auth_required_func = orig_jwt_required(func)\n\n return auth_required_func(*args, **kwargs)\n\n # add header requirements to API docs (i.e. Swagger)\n parameters = {\n 'name': 'Authorization',\n 'in': 'header',\n 'description': 'Bearer <access_token>',\n 'required': 'true'\n }\n\n wrapper._apidoc = getattr(func, '_apidoc', {})\n wrapper._apidoc.setdefault('parameters', []).append(parameters)\n\n return wrapper", "title": "" }, { "docid": "6806533f25bb4f5d200c98e3b75fa735", "score": "0.46985832", "text": "def authorized_optional(func):\n @wraps(func)\n def decorated_function(*args, **kwargs):\n try:\n auth = JWTAuthorization(secret_key=config.SECRET_KEY)\n token = auth.get_acces_token_from_headers(request.headers)\n user_id = auth.get_user_id_from_acces_token(token)\n except AuthorizationException:\n return func(*args, **kwargs)\n return func(*args, **kwargs, user_id=user_id)\n\n return decorated_function", "title": "" }, { "docid": "88d870cdc4b2ef8e28c29c6a755c49ee", "score": "0.46938646", "text": "def decode_auth_token(auth_token):\n from api.users.models import User\n try:\n payload = jwt.decode(auth_token, Config.SECRET_KEY)\n # is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)\n # if is_blacklisted_token:\n # return 'Token blacklisted. Please log in again.'\n # else:\n msg = {\"error\": \"Invalid Token! Please login to continue.\"}\n user = User.get_user(email=payload['sub'])\n return user if user else (jsonify(msg), 403) \n except jwt.ExpiredSignatureError:\n return jsonify({'error': 'Signature expired. Please log in again.'}), 403\n except jwt.InvalidTokenError:\n return jsonify({'error': 'Invalid token. Please log in again.'}), 403", "title": "" }, { "docid": "4a7313d3b30b16260790715cb7fe316d", "score": "0.46921343", "text": "def jwt_claim(request):\n return request.param", "title": "" }, { "docid": "2de8d453680cfd171964543ae698635f", "score": "0.469168", "text": "def _jwt_token(payload, secret, expires_in=30, with_times=True, **kwargs):\n if payload is None:\n payload = {}\n\n headers = kwargs.get('headers', {})\n if 'headers' in kwargs:\n del kwargs['headers']\n\n if secret is None:\n raise ValueError('Bad secret')\n\n if with_times:\n ext_payload = dict(\n exp=_to_numeric(datetime.datetime.utcnow() + datetime.timedelta(seconds=expires_in)),\n nbf=_to_numeric(datetime.datetime.utcnow()),\n )\n\n # Update the extended payload with passed payload\n ext_payload.update(payload)\n payload = ext_payload\n\n # Add 'exp' to headers if not already present\n if 'exp' not in headers:\n headers['exp'] = ext_payload['exp']\n\n return jwt.encode(payload, secret, headers=headers, **kwargs)", "title": "" }, { "docid": "0278464407a6cf16b8b783b12522a75b", "score": "0.4678195", "text": "def revoked_token_loader(self, callback: Callable[..., RT]) -> Callable[..., RT]:\n ...", "title": "" }, { "docid": "8ebe2ab681182eea4ab675665cbbf461", "score": "0.4673034", "text": "def decode_jwt(self, token: bytes):\n\n key = os.getenv(\"JWT_SECRET\")\n\n try:\n decoded = jwt.decode(\n token,\n key,\n algorithms=\"HS256\",\n issuer=AuthSettings.JWT_ISSUER,\n options={\"require\": [\"exp\", \"iss\", \"email\"]},\n )\n except jwt.ExpiredSignatureError:\n self._logger.log(LogEntry(\n LogLevel.INFO,\n __name__,\n \"JWT Token expired for user.\"))\n raise jwt.ExpiredSignatureError(\"Expired token.\")\n\n except jwt.InvalidIssuerError:\n self._logger.log(LogEntry(\n LogLevel.ERROR,\n __name__,\n \"Attempted to decode token with invalid issuer.\"))\n raise jwt.InvalidIssuerError(\"Invalid JWT Issuer.\")\n\n except jwt.InvalidTokenError:\n self._logger.log(LogEntry(\n LogLevel.ERROR,\n __name__,\n \"JWT decoding error when trying to decode token.\"))\n raise jwt.InvalidTokenError(\"Invalid token.\")\n\n return decoded", "title": "" }, { "docid": "6eaee575834994b0101c044385fd78d4", "score": "0.46693385", "text": "def wrap(*args, **kwargs):\n if \"Authorization\" not in request.headers:\n return jsonify({'Error Message': 'Authorization token not provided'}), 400\n\n token = request.headers['Authorization']\n\n try:\n auth.verify_id_token(token, check_revoked=True)\n\n except auth.ExpiredIdTokenError:\n return jsonify({'Error message': 'Token expired'}), 403\n\n except auth.InvalidIdTokenError:\n return jsonify({'Error message': 'Invalid token'}), 403\n\n except auth.RevokedIdTokenError:\n return jsonify({'Error message': 'Token revoked'}), 403\n \n except:\n return jsonify({'Error message': 'Invalid Token'}), 403\n\n return f(*args, **kwargs)", "title": "" }, { "docid": "93e48968c9ea2be26bb54088bc39ac2b", "score": "0.466662", "text": "def decode_jwt(self, auth_token):\n try:\n payload = jwt.decode(auth_token, jwt_key, [jwt_alg])\n return payload[\"user_info\"]\n except jwt.ExpiredSignatureError:\n return make_response(\"Signature expired. Please log in again.\", 403)\n except jwt.InvalidTokenError:\n return make_response(\"Invalid token. Please log in again.\", 403)", "title": "" }, { "docid": "b36023d5e2b6dc58a5ba5ea9a38c4d73", "score": "0.46582806", "text": "def test_jwt_requires_jwt_no_identity(self, live_testapp_no_identity):\n with mock.patch('flask_jwt_consumer.decorators.get_jwt_raw',\n return_value=good_token):\n with mock.patch('flask_jwt_consumer.decorators._brute_force_key',\n return_value=JWT_PUBLIC_KEY):\n protected = requires_jwt(identity)\n with pytest.raises(AuthError) as err:\n protected('De nada')\n assert err.value.code == 401\n assert err.value.content == {'code': 'invalid_claims',\n 'description': 'Incorrect claims, please check the issued at, audience or issuer.'} != {'description': 'Missing claims, please check the audience.'}", "title": "" }, { "docid": "c961bef897d82da0443bae53a62ee4ca", "score": "0.4647734", "text": "def decode_token(access_token):\n try:\n payload = jwt.decode(access_token, current_app.config['SECRET'])\n # blacklisted_token = BlackList.check_token(access_token)\n # if blacklisted_token:\n # return \"Kindly login to perform action.\"\n return payload\n except jwt.ExpiredSignatureError:\n raise UserErrors.Unauthorized(\"Signature Expired. Please login!\")\n except jwt.InvalidTokenError:\n raise UserErrors.Unauthorized(\"Invalid Token. Please Register or Login\")", "title": "" }, { "docid": "e131b3fac9a5bd361290f94ed8b422d3", "score": "0.46457383", "text": "def encode_key_loader(self, callback: Callable) -> Callable:\n self._encode_key_callback = callback\n return callback", "title": "" }, { "docid": "1269124e74c955c723414c53ebfc8da1", "score": "0.4641849", "text": "def __decode_auth_token(self, auth_token: bytes) -> str:\n try:\n payload = jwt.decode(auth_token, config.token['key'])\n\n return payload['sub']\n \n except jwt.ExpiredSignatureError as error:\n raise ExpiredToken\n\n except jwt.InvalidTokenError:\n raise InvalidToken", "title": "" }, { "docid": "dbc54a98ccba4e7be782c2170d062e8f", "score": "0.46360147", "text": "def decode(token: str) -> typing.Dict[str, typing.Any]:\n # Assume that the token has already been verified\n return jwt.decode(token, algorithms=\"RS256\", options={\"verify_signature\": False})", "title": "" }, { "docid": "876bf20f03266915b5022482a6e7cd89", "score": "0.46359658", "text": "def mock_verify_token(*args, **kwargs):\n return {\"error\": \"some error\", \"error_description\": 'A bad error occurred'}", "title": "" }, { "docid": "58946b27b238790b1efd29f1cc734b8d", "score": "0.4630635", "text": "def signature_required(secret_key_func):\r\n def actual_decorator(obj):\r\n\r\n def test_func(request, *args, **kwargs):\r\n secret_key = secret_key_func(request, *args, **kwargs)\r\n return validate_signature(request, secret_key)\r\n\r\n decorator = request_passes_test(test_func)\r\n return wrap_object(obj, decorator)\r\n\r\n return actual_decorator", "title": "" }, { "docid": "fb9543ea8b214480e5bc8a6857fe1efa", "score": "0.46300524", "text": "def decoded_and_verified_token_from_headers(headers):\n encoded_token = get_encoded_token_from_headers(headers)\n decoded_token = decoded_and_verified_token(encoded_token)\n return decoded_token", "title": "" }, { "docid": "0df0349a411b8c56a397ed1782d5876f", "score": "0.4629795", "text": "def test_decode_token_fail_none(self):\n test_data = {'email': '[email protected]', 'exp': 100500}\n token = jwt.encode(test_data, SECRET_KEY, ALGORITHM)\n returned_dict = decode_token(token)\n self.assertIsNone(returned_dict)", "title": "" } ]
46dd7a0b6465ddc360187dce1af3fe54
Given the list containing the parameter dicts for all available rewards, this method returns a dict, whose keys are the rarity indentifiers and the values are lists with all the parameter dicts of the rewards with that rarity CHANGELOG Added 12.06.2019
[ { "docid": "48a7d0face9d02b8b5b0d2d4553e535f", "score": "0.6307732", "text": "def _available_rewards_by_rarity(cls, available_rewards: List[Dict]) -> Dict:\n mapping = {}\n for rarity in Rarity.RARITIES:\n rewards = list(filter(rarity_filter(rarity), available_rewards))\n mapping[rarity] = rewards\n\n return mapping", "title": "" } ]
[ { "docid": "8cd00bdd78f7c3713d53f17a1c7ea04d", "score": "0.58022445", "text": "def get_rewards_by_name(self) -> Dict[str, List]:\n rewards_dict = defaultdict(list)\n for reward in self.rewards:\n rewards_dict[reward.name].append(reward)\n return rewards_dict", "title": "" }, { "docid": "812b044fa0bcfb0f3a4c9f9a405469f8", "score": "0.5524794", "text": "def getAwards(self):\n awardsDict = {teamKey: set([]) for teamKey in self.getRawTeamList()}\n awards = self.getAwardsObj()\n if awards is None:\n return {}\n for award in awards:\n awardType = award[\"award_type\"]\n for recipient in award[\"recipient_list\"]:\n teamKey = recipient[\"team_key\"]\n if teamKey is not None and teamKey in awardsDict:\n awardsDict[teamKey].add(awardType)\n return awardsDict", "title": "" }, { "docid": "be4354e33d00957c3b62dc5b8955da22", "score": "0.5412112", "text": "def bookdict(rating_name = names(),scores = ratings()):\n # Combining the lists into a dictionary\n ratings = dict(zip(rating_name,scores))\n return ratings", "title": "" }, { "docid": "6319493b7359670bc015d485960bfe44", "score": "0.53763264", "text": "def inspect(self) -> dict:\n #return dict({k:v for k,v in zip(list(self.__dict__.values())[-1], list(self.__dict__.values())[0:-2])})\n return dict({k:v for k,v in self.__dict__.items()})\n #return list(zip(list(self.__dict__.values())[-1], list(self.__dict__.values())[0:-2]))", "title": "" }, { "docid": "3f0b50792855e3c6be1d7cf8f1005ea0", "score": "0.5295973", "text": "def to_dict(self):\n # TODO: test\n return {'corrector_species': self.corrector_species,\n 'params': self.bleach_params.valuesdict()}", "title": "" }, { "docid": "18d0aa87b8acb5106049ddfbfadf4caf", "score": "0.5264429", "text": "def params(self) -> Dict[str, Any]:\n return { \"eta\": self._eta_init, \"type\":self._type, \"T\": self._T, \"B\": [ b.family for b in self._base_learners ], \"seed\":self._random_pick._seed }", "title": "" }, { "docid": "fa56b02a270ea703b317286900186c9d", "score": "0.5255655", "text": "def get_block_rarity_dict(self):\n return {block: len(self.get_traces_from_block(block)) for block in self.total_coverage}", "title": "" }, { "docid": "29b6a76877f9ad4f6c8c9966419109f4", "score": "0.52327126", "text": "def reward_distribution_per_step(info_hists_lst):\n step_rewards = [defaultdict(list) for _ in range(cfg.MAX_NUM_OF_STEPS)]\n\n for info_hists in info_hists_lst:\n for info_hist in info_hists:\n for step, step_info in enumerate(info_hist):\n for reward_type, value in step_info.info[\"reward_info\"].items():\n if value != 0:\n step_rewards[step][reward_type].append(abs(value))\n\n return step_rewards", "title": "" }, { "docid": "305f1d41a649fdf6d1ab21fa895fe993", "score": "0.5227123", "text": "def get_args(self):\n\n init_args = {\n 'name': self.name,\n 'ferment_list': [\n ferment.get_args() for ferment in self.ferments\n ]\n }\n\n return init_args", "title": "" }, { "docid": "61829d55fa194650e6073f9b2e1c0f6e", "score": "0.52245694", "text": "def get_params(self):\n params = {}\n for i, m in enumerate(self.marginals):\n for key, value in m.get_params().items():\n params[key + '_' + str(i)] = value\n for key, value in self.copula.get_params().items():\n params[key + '_c'] = value\n return params", "title": "" }, { "docid": "73557bcb6ac039c5de4676e3d08ffe20", "score": "0.5207043", "text": "def LS_ParameterList():\n parameters = {}\n parameters[\"c_armijo\"] = [1e-4, \"Armijo constant for sufficient reduction\"]\n parameters[\"max_backtracking_iter\"] = [10, \"Maximum number of backtracking iterations\"]\n \n return ParameterList(parameters)", "title": "" }, { "docid": "0490f956fddd5f3daf86e162a1e7c9ae", "score": "0.520617", "text": "def _get_approvals(self, signers: List[dict]) -> List[dict]:\n\n fields = [\n {\n \"type\": \"SIGNATURE\",\n \"subtype\": \"FULLNAME\",\n \"extractAnchor\": {\n \"anchorText\": \"Capture Signature\",\n \"index\": 0,\n \"characterIndex\": 0,\n \"anchorPoint\": \"BOTTOMLEFT\",\n \"leftOffset\": 0,\n \"topOffset\": 0,\n \"width\": 150,\n \"height\": 50,\n },\n }\n ]\n\n approvals = []\n for signer in signers:\n approvals.append({\"role\": f\"{signer['id']}\", \"fields\": fields})\n\n return approvals", "title": "" }, { "docid": "d29386e4f807c18e69af747e9be6a873", "score": "0.5172067", "text": "def _export_qualifiers_to_list(self) -> Optional[Dict[Hashable, List[str]]]:\n if self.qualifiers:\n return {key: sorted(vals) for key, vals in self.qualifiers.items()}", "title": "" }, { "docid": "00ab864f47c6d759a4b660d368b119b9", "score": "0.5163818", "text": "def make_reward_opts(trainer):\n return {\n 'K': trainer.reward_top_k,\n 'target': trainer.reward_episode_target,\n 's': trainer.reward_target_weight,\n }", "title": "" }, { "docid": "268e6301c38f86021548e639d2e8ff32", "score": "0.5161237", "text": "def TR_ParameterList():\n parameters = {}\n parameters[\"eta\"] = [0.05, \"Reject step if (actual reduction)/(predicted reduction) < eta\"]\n \n return ParameterList(parameters)", "title": "" }, { "docid": "9b5192213e47c3d46552ddb7e95e3600", "score": "0.51580375", "text": "def get_lr_params():\n params = {\n \"penalty\": [\"l1\", \"l2\"],\n \"C\": [0.001, 0.01, 0.1, 1.0, 10.0, 100.0],\n }\n return params", "title": "" }, { "docid": "ded9920eeea64680a390f263de0e98cf", "score": "0.5148432", "text": "def args(self):\n return {'theta': self.theta, 'beta': self.theta[:-3], 'sigma': self.theta[-3], 'pl': self.theta[-2], 'ph': self.theta[-1]}", "title": "" }, { "docid": "2674517780c9f43b5d629d74cbd7ddad", "score": "0.5098139", "text": "def get_reward_dict(\n self, action: np.ndarray, obs_dict: Dict[str, np.ndarray],\n ) -> Dict[str, np.ndarray]:\n reward_dict = collections.OrderedDict(())\n return reward_dict", "title": "" }, { "docid": "f5a0541456ffe187d610a6132471c707", "score": "0.5087619", "text": "def get_detail_funded_awards_vps(self):\n funded_awards_resources = self.get_funded_awards_resources()\n detail_funded_awards_vps = {}\n for funded_award in funded_awards_resources:\n sorted_gamers_by_awards_resources = sort_keys_by_value(funded_awards_resources[funded_award])\n gamers, awards_resources = dict_keys_values_into_two_lists(sorted_gamers_by_awards_resources)\n awards_resources_matrix = create_list_matrix(awards_resources)\n if len(gamers) > 2:\n gamer_award_vps = {}\n for position in range(0, len(awards_resources_matrix)):\n if awards_resources_matrix[position] == 1:\n gamer_award_vps[gamers[position]] = self.AWARDS[funded_award][0]\n detail_funded_awards_vps[funded_award] = gamer_award_vps\n elif awards_resources_matrix[position] == 2:\n gamer_award_vps[gamers[position]] = self.AWARDS[funded_award][1]\n detail_funded_awards_vps[funded_award] = gamer_award_vps\n else:\n gamer_award_vps[gamers[position]] = 0\n detail_funded_awards_vps[funded_award] = gamer_award_vps\n elif len(gamers) == 2:\n gamer_award_vps = {}\n for position in range(0, len(awards_resources_matrix)):\n if awards_resources_matrix[position] == 1:\n gamer_award_vps[gamers[position]] = self.AWARDS[funded_award][0]\n detail_funded_awards_vps[funded_award] = gamer_award_vps\n else:\n gamer_award_vps[gamers[position]] = 0\n detail_funded_awards_vps[funded_award] = gamer_award_vps\n return detail_funded_awards_vps", "title": "" }, { "docid": "e12eb5b2d1a9a10b83577d97325a1aaa", "score": "0.505004", "text": "def get_params_dict(self) -> StateDict:\n params = {\n \"id_walkers\": {\"dtype\": hash_type},\n \"rewards\": {\"dtype\": float_type},\n \"observs\": {\"dtype\": float_type},\n \"states\": {\"dtype\": float_type},\n }\n return params", "title": "" }, { "docid": "eb9423bf5eddbb6dc95ad8faf7b1018e", "score": "0.5048453", "text": "def as_dict(self) -> Dict[str, List[str]]:\n return {\n 'and_': sorted(self.and_),\n 'or_': sorted(self.or_),\n 'not_': sorted(self.not_),\n 'include': sorted(self.include),\n 'exclude': sorted(self.exclude),\n 'flags': sorted(self.flags),\n }", "title": "" }, { "docid": "3e16657e3b0bf06560ee112764a922ee", "score": "0.50431263", "text": "def to_dict(self):\n # TODO: test\n return {'corrector_species': self.corrector_species,\n 'params': self.bkg_params.valuesdict()}", "title": "" }, { "docid": "28b3c70563f35e8672f1baf819c5d4c1", "score": "0.5028199", "text": "def to_dict(self, paramlist=None):\n out = dict()\n out['Time'] = self.time_stamps\n if paramlist is None:\n for ctr, (key, item) in enumerate(self.params_bid.items()):\n out[key + \"_bid\"] = item\n out[key + \"_ask\"] = self.params_ask[key]\n else:\n for key in paramlist:\n out[key + \"_bid\"] = self.params_bid[key]\n out[key + \"_ask\"] = self.params_ask[key]\n out[CORRELATION_PAR_STR] = self.params_correlation\n return out", "title": "" }, { "docid": "0ce4c84c3e84208c823cb0b82a301650", "score": "0.5026454", "text": "def getParameters(self):\n\n current_params={ 'w0':self.w0, 'K':self.K,'gamma':self.gamma, 'Beta1':self.Beta1, 'A1':self.A1, 'A2':self.A2, 'BetaL1':self.BetaL1, 'BetaL2':self.BetaL2, 'sigma':self.sigma, 'G':self.G, 'alpha_0':self.alpha_0, 'delta':self.delta, 'p':self.p, 'I0':self.I0}\n\n return(current_params)", "title": "" }, { "docid": "c714b1e9073ea082b53f71955a33814c", "score": "0.49862963", "text": "def get_param_helps(self):\n return {k: v[3].strip() for k, v in self._params.items()}", "title": "" }, { "docid": "a2364e16888ca249f51eb1a56724ef3e", "score": "0.49772447", "text": "def parameters(self):\n return dict()", "title": "" }, { "docid": "97693b4eb8261216821331d9fa0524dc", "score": "0.49581233", "text": "def get_rewards(self):\n return self.rewards", "title": "" }, { "docid": "1d24fad29d167144639e5c83e5aebb2f", "score": "0.49540105", "text": "def params_dict(self):\n pdict = {}\n for p in self.aps:\n pdict[str(p)] = p.get_value()\n return pdict", "title": "" }, { "docid": "a2ef589b4b7e39e7c14adf91d2b21c56", "score": "0.49436495", "text": "def rewards(session, type: engagement.RewardType, au: usermgr.AnonUser) -> dict:\n d_rewards = {}\n try:\n highest_rated_photo = RewardManager.max_score_photo(session, au)\n if highest_rated_photo is not None:\n d_rewards['HighestRatedPhotoURL'] = \"preview/{0}\".format(highest_rated_photo.id)\n\n q = session.query(engagement.UserReward). \\\n filter(engagement.UserReward.user_id == au.id)\n ur_l = q.all()\n if ur_l is not None:\n for ur in ur_l:\n total_bulbs = 0\n current_bulbs = 0\n if ur.rewardtype == str(engagement.RewardType.LIGHTBULB):\n total_bulbs = ur.total_balance\n current_bulbs = ur.current_balance\n d_rewards['totalLightbulbs'] = total_bulbs\n d_rewards['unspentBulbs'] = current_bulbs\n\n max_reward = RewardManager.max_reward_day(session, type, au)\n if max_reward is not None:\n d_rewards['mostBulbsInADay'] = max_reward.quantity\n else:\n d_rewards['mostBulbsInADay'] = 0\n\n d_rewards = RewardManager.add_reward_types(ur_l, d_rewards)\n\n if len(d_rewards) == 0:\n return None\n return d_rewards\n except Exception as e:\n logger.exception(msg='[rewardmgr] error reading rewards')\n raise", "title": "" }, { "docid": "6ea00b4f386339a0d0d1d62ef1185e4a", "score": "0.4936266", "text": "def get_reward_list(self):\n return self._gameRewards", "title": "" }, { "docid": "6ea00b4f386339a0d0d1d62ef1185e4a", "score": "0.4936266", "text": "def get_reward_list(self):\n return self._gameRewards", "title": "" }, { "docid": "6ea00b4f386339a0d0d1d62ef1185e4a", "score": "0.4936266", "text": "def get_reward_list(self):\n return self._gameRewards", "title": "" }, { "docid": "8f1d96191b57583094aae9560f803c4f", "score": "0.493253", "text": "def get_params():\n poisson_ratio = okada_pr[10]\n mu = 30\n lmda = (2 * mu * poisson_ratio) / (1 - 2 * poisson_ratio)\n alpha = (lmda + mu) / (lmda + 2 * mu)\n \n x0 = [ okada_pr[0], okada_pr[1], - okada_pr[2] ]\n depth = okada_pr[2]\n dip = okada_pr[4]\n strike_width = [ -okada_pr[5]/2, okada_pr[5]/2 ]\n dip_width = [ -okada_pr[6]/2, okada_pr[6]/2 ]\n dislocation = [ okada_pr[7], okada_pr[8], okada_pr[9] ]\n \n return alpha, x0, depth, dip, strike_width, dip_width, dislocation", "title": "" }, { "docid": "3efade939530d25c5284b2a005015d9e", "score": "0.49295306", "text": "def parameters(self):\n return []", "title": "" }, { "docid": "d19808380453b18d2b747413bfbc67da", "score": "0.49202985", "text": "def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n\n return {\"complain_type\": [self.from_entity(\"complain_type\"), self.from_text()], \"complain_text\": [self.from_entity(entity=\"navigation\"), self.from_text()]}\n\n # return {\"complain_type\": self.from_entity(\"complain_type\"),\"complain_text\": self.from_entity(entity=\"any_thing\")}", "title": "" }, { "docid": "4487f4c3daf3094d8458ee9ab5f804db", "score": "0.49195823", "text": "def captures(self) -> collections.OrderedDict:\n return self._captures", "title": "" }, { "docid": "b8794badb34ecac3831f16958de46654", "score": "0.49157736", "text": "def translate_runtime_parameters_todict(self, r_text):\n translated = {}\n entries = r_text.split('\\n')\n if self.verbose>1:\n print(\"RUNTIME PARAMETERS RAW:\")\n print(r_text)\n idx={} # Dictionary of indices.\n for entry in entries:\n if entry and (entry.find(':') != -1): # valid entries look like 'key: value', the rest are headers or blank\n key, value = entry.split(':')\n\n # Kongsberg sometimes repeats a key to provide additional information\n # which of course over-writes anything we'd previously stored in our\n # translated dictionary. This bit of code checks to see if we've \n # seen the code before. If not. we make no changes to the key and \n # just note it's the first time we saw it in idx. If the key is \n # already in translated, then we modify the key with \"_X\" where X\n # indicates the index of the additional data. \n if key not in translated.keys():\n idx[key] = 0\n else:\n idx[key] += 1\n key = key + '_' + str(idx[key])\n\n\n translated[key] = value.lstrip().rstrip()\n\n # When Yaw Stabilisation is off, Yaw Stabilization Heading Filter is not \n # written. Its absense can break code that is comparing runtime \n # parameters across files. So we give it a value of None here. This is \n # kind of tricky because we want it in the same location relative to the \n # other fields, so we have to convert to/from an OrderedDict. \n if 'Yaw Stabilisation Heading Filter' not in translated.keys():\n translatedO = OrderedDict(translated)\n translatedkeys = [k for k,v in translatedO.items()]\n translatedvalues = [v for k,v in translatedO.items()]\n for k in translatedkeys:\n if k == 'Yaw Stabilisation Mode':\n translatedkeys.insert(\n translatedkeys.index('Yaw Stabilisation Mode')+1,\n 'Yaw Stabilisation Heading Filter')\n translatedvalues.insert(\n translatedkeys.index('Yaw Stabilisation Mode')+1,\n None)\n translated = dict(zip(translatedkeys,translatedvalues))\n\n\n return translated", "title": "" }, { "docid": "009efb2a3fd7b89928c202a92e02f497", "score": "0.49152505", "text": "def list_embed_params(embed_params):\n\n attributes = {}\n for name in dir(embed_params):\n if '__' not in name: # not a python related attribute\n attr = getattr(embed_params, name)\n if not callable(attr):\n attributes[name] = attr\n\n return attributes", "title": "" }, { "docid": "e7fa89c6606579bc914bf3bc897cb6f0", "score": "0.49051553", "text": "def reward_distribution_per_step_per_type(info_hists_lst):\n step_rewards = [defaultdict(list) for _ in range(cfg.MAX_NUM_OF_STEPS)]\n\n # a set containing strings for each type of reward\n reward_types = set()\n\n for info_hists in info_hists_lst:\n for info_hist in info_hists:\n for step, step_info in enumerate(info_hist):\n for reward_type, value in step_info.info[\"reward_info\"].items():\n step_rewards[step][reward_type].append(abs(value))\n reward_types.add(reward_type)\n\n # reverse dict and take average of rewards for each reward_type and step\n reversed_dict = defaultdict(list)\n for step in range(len(step_rewards)):\n for reward_type in reward_types:\n reward_type_lst = step_rewards[step].get(reward_type, [0])\n reversed_dict[reward_type].append(sum(reward_type_lst) / len(reward_type_lst))\n\n return reversed_dict", "title": "" }, { "docid": "4f369a276a5fdbc417038fe73bd134b0", "score": "0.49009642", "text": "def map_efforts_to_rewards(list_of_efforts, map_to_rewards):\n rewards = []\n for e in list_of_efforts:\n rewards.append(map_to_rewards[e])\n \n return rewards", "title": "" }, { "docid": "156492d93bb7af63d606c036eb6871eb", "score": "0.48702434", "text": "def params(self) -> Dict[Text, Any]:\n return {\n \"type\": self.type,\n \"use_energy\": self.use_energy,\n \"frame_shift\": self.frame_shift,\n \"snip_edges\": self.snip_edges,\n \"low_frequency\": self.low_frequency,\n \"high_frequency\": self.high_frequency,\n \"sample_frequency\": self.sample_frequency,\n \"allow_downsample\": self.allow_downsample,\n \"allow_upsample\": self.allow_upsample,\n \"pitch\": self.pitch,\n \"fmllr\": self.fmllr,\n \"splice_left_context\": self.splice_left_context,\n \"splice_right_context\": self.splice_right_context,\n }", "title": "" }, { "docid": "7feda57e5e13bb675e096d978cb1c607", "score": "0.48666477", "text": "def parameters(self):\r\n return self.values.keys()", "title": "" }, { "docid": "bc1e5422fff179e2bd88d19c56fc3778", "score": "0.4866115", "text": "def get_recommend_params(self, learner, valid_activities, valid_kcs):\n # retrieve or calculate features\n last_attempted_activity = self.get_last_attempted_activity(learner)\n if last_attempted_activity:\n # convert to queryset to avoid dimension mismatch between output and tagging matrix in get_tagging_parameter_values()\n last_attempted_activity_qs = Activity.objects.filter(pk=last_attempted_activity.pk)\n\n # construct param dict\n return {\n 'guess': self.get_guess(valid_activities, valid_kcs),\n 'slip': self.get_slip(valid_activities, valid_kcs),\n 'difficulty': self.get_difficulty(valid_activities),\n 'prereqs': self.get_prereqs(valid_kcs),\n 'last_attempted_guess': self.get_guess(last_attempted_activity_qs, valid_kcs)[0] if last_attempted_activity else None,\n 'last_attempted_slip': self.get_slip(last_attempted_activity_qs, valid_kcs)[0] if last_attempted_activity else None,\n 'learner_mastery': self.get_learner_mastery(learner, valid_kcs),\n 'r_star': self.engine_settings.r_star,\n 'L_star': self.engine_settings.L_star,\n 'W_p': self.engine_settings.W_p,\n 'W_r': self.engine_settings.W_r,\n 'W_d': self.engine_settings.W_d,\n 'W_c': self.engine_settings.W_c,\n }", "title": "" }, { "docid": "72c00f86138236e4aaa3d3afde2d4385", "score": "0.48626068", "text": "def get_score_dict(\n self, obs_dict: Dict[str, np.ndarray], reward_dict: Dict[str, np.ndarray],\n ) -> Dict[str, np.ndarray]:\n return collections.OrderedDict(())", "title": "" }, { "docid": "a4176aca44d1a71cc3123f25f7e85b9e", "score": "0.48598146", "text": "def getReadableParameters(self):\n params = []\n rawParams = self.getRawParameters()\n filteredParams = []\n for param in rawParams:\n param_id = param['param_id']\n description = \"description missing\"\n attributes = param['string_attributes']\n for attribute in attributes:\n if 'name' in attribute and attribute['name'] == 'description':\n description = attribute['value']\n break\n params.append({param_id: description})\n return params", "title": "" }, { "docid": "701b9fa0f8dc61349f478d52fca5907d", "score": "0.48461488", "text": "def lenstronomy_params(self):\n\n r_eff, k_eff = self.normalization(self.mass, self.z) # [Mpc, unitless]\n R_sersic_angle = r_eff / self._lens_cosmo.D_d / self._lens_cosmo.cosmo.arcsec\n x, y = np.round(self.x, 4), np.round(self.y, 4)\n\n kwargs = [{'k_eff':k_eff, 'R_sersic':R_sersic_angle, 'center_x':x, 'center_y':y}]\n\n return kwargs, None", "title": "" }, { "docid": "f99caedd4423eeff1b0b0e795b1e6713", "score": "0.48416388", "text": "def _format_args():\n if request and request.args.get(\"prettyprint\"):\n return dict(\n indent=2,\n separators=(\", \", \": \"),\n )\n else:\n return dict(\n indent=None,\n separators=(\",\", \":\"),\n )", "title": "" }, { "docid": "ae6f66573fbd4c3504b4078a962c8092", "score": "0.4830317", "text": "def _parameters_to_print(self, parameters: Mapping[str, Any]) -> Mapping[str, Any]:\n params = {}\n for name, tpe in parameters.items():\n # LogicalPlan is not to print, e.g., LogicalPlan\n is_logical_plan = isclass(tpe.annotation) and isinstance(tpe.annotation, LogicalPlan)\n # Look up the string argument defined as a forward reference e.g., \"LogicalPlan\"\n is_forwardref_logical_plan = getattr(tpe.annotation, \"__forward_arg__\", \"\").endswith(\n \"LogicalPlan\"\n )\n # Wrapped LogicalPlan, e.g., Optional[LogicalPlan]\n is_nested_logical_plan = any(\n isclass(a) and issubclass(a, LogicalPlan)\n for a in getattr(tpe.annotation, \"__args__\", ())\n )\n # Wrapped forward reference of LogicalPlan, e.g., Optional[\"LogicalPlan\"].\n is_nested_forwardref_logical_plan = any(\n getattr(a, \"__forward_arg__\", \"\").endswith(\"LogicalPlan\")\n for a in getattr(tpe.annotation, \"__args__\", ())\n )\n if (\n not is_logical_plan\n and not is_forwardref_logical_plan\n and not is_nested_logical_plan\n and not is_nested_forwardref_logical_plan\n ):\n # Searches self.name or self._name\n try:\n params[name] = getattr(self, name)\n except AttributeError:\n try:\n params[name] = getattr(self, \"_\" + name)\n except AttributeError:\n pass # Simpy ignore\n return params", "title": "" }, { "docid": "f78f6134953b999f4b4b3cb75ab4dbc8", "score": "0.4827424", "text": "def get_parameters(self) -> Dict[str, Any]:\n return {}", "title": "" }, { "docid": "19179c1caf83de5ef098ad7dcd750bfb", "score": "0.48261356", "text": "def get_signature():\n return {'policy_evaluate': {'placeholders': {'decoder_type': ([SAMPLE_DECODER], np.uint8)},\n 'outputs': ['selected_tokens',\n 'log_probs',\n 'draw_prob']},\n 'policy_beam_search': {'placeholders': {'decoder_type': ([GREEDY_DECODER], np.uint8)},\n 'outputs': ['beam_tokens',\n 'beam_log_probs',\n 'draw_prob']},\n 'policy_evaluate_with_state_value': {'placeholders': {'decoder_type': ([SAMPLE_DECODER], np.uint8)},\n 'outputs': ['selected_tokens',\n 'log_probs',\n 'draw_prob',\n 'state_value']},\n 'policy_beam_search_with_state_value': {'placeholders': {'decoder_type': ([GREEDY_DECODER], np.uint8)},\n 'outputs': ['beam_tokens',\n 'beam_log_probs',\n 'draw_prob',\n 'state_value']},\n 'policy_expand': {'placeholders': {'decoder_type': ([TRAINING_DECODER], np.uint8)},\n 'outputs': ['logits']},\n 'policy_log_probs': {'placeholders': {'decoder_type': ([TRAINING_DECODER], np.uint8)},\n 'outputs': ['log_probs', 'draw_prob']},\n 'policy_get_value': {'placeholders': {'decoder_type': ([GREEDY_DECODER], np.uint8)},\n 'outputs': ['state_value']}}", "title": "" }, { "docid": "ec20bb0207b388bab32cbe7bffe33792", "score": "0.48226976", "text": "def profile_args(self):\n if not hasattr(self, '_profile_args'):\n R_sersic = self.R_Lazar(self.mass,self.z)\n self._profile_args = (R_sersic)\n\n return self._profile_args", "title": "" }, { "docid": "37df23ce5ce4866337a5993395808a89", "score": "0.48198253", "text": "def get_rewards_dict(self):\n final_dict = {}\n j = 0\n for colName, _ in self.df.iteritems():\n # j+=1\n # if j ==7:\n # break\n if colName not in ['t', 'anomalous']:\n\n df_temp = self.df[[colName, 'anomalous']]\n map_dict = dict(df_temp.groupby(by=[df_temp[colName], df_temp['anomalous']]).count().reset_index()[\n colName].value_counts())\n df_temp['segment'] = df_temp.apply(lambda x: 2 if map_dict[x[colName]] > 1 else x['anomalous'], axis=1)\n\n df_temp = df_temp.sort_values(by=[colName])\n seg_entropy = self.calc_seg_entropy(df_temp)\n\n reward = self.class_entropy / seg_entropy\n final_dict.update({colName: reward})\n final_dict = {k: v for k, v in sorted(final_dict.items(), key=lambda item: item[1], reverse=True)}\n return final_dict", "title": "" }, { "docid": "b03f8c102bd4f0fec1b3859fabd2c854", "score": "0.48178068", "text": "def reward_type_distribution_per_action_type(info_hists_lst):\n action_type_rewards = {action_type: defaultdict(list) for action_type in {'back', 'filter', 'group'}}\n\n # a set containing strings for each type of reward\n reward_types = set()\n\n for info_hists in info_hists_lst:\n for info_hist in info_hists:\n for step, step_info in enumerate(info_hist):\n action_type = ATENAUtils.OPERATOR_TYPE_LOOKUP[step_info.info[\"raw_action\"][0]]\n for reward_type, value in step_info.info[\"reward_info\"].items():\n action_type_rewards[action_type][reward_type].append(abs(value))\n reward_types.add(reward_type)\n\n # reverse dict and take average of rewards for each reward_type and step\n reversed_dict = defaultdict(float)\n for action_type in action_type_rewards.keys():\n for reward_type in reward_types:\n reward_type_lst = action_type_rewards[action_type].get(reward_type, [0])\n reversed_dict[(reward_type, action_type)] = sum(reward_type_lst) / len(reward_type_lst)\n\n return reversed_dict", "title": "" }, { "docid": "5897852fbd985d4cfc157c886dd1e641", "score": "0.48147434", "text": "def to_dict_list(self):\n out_dicts = []\n for contig in self.chains:\n new_contig = contig.copy()\n new_contig.info_dict.update(dict(self.info_dict))\n contig_out = new_contig.to_dict()\n contig_out['barcode'] = self.name\n out_dicts.append(contig_out)\n return out_dicts", "title": "" }, { "docid": "d75ee153e5a42817436422654940a4cd", "score": "0.4814237", "text": "def params_as_dict(self):\n #print('DEBUG get params for func -- params_as_dict : ',self._name)\n subFuncs = [ self.__dict__[key] for key in self.__dict__ if isinstance(self.__dict__[key], DetObjectFunc) ]\n funcPars={}\n for tfunc in subFuncs:\n sfuncDict = tfunc.params_as_dict()\n for key in sfuncDict:\n #print ('DEBUG get params for func - subfunc - key: ',self._name, tfunc._name, key)\n #funcPars['%s_%s_%s'%(self._name,tfunc._name,key)] = sfuncDict[key]\n funcPars['%s'%(key)] = sfuncDict[key]\n\n parList = {key: self.__dict__[key] for key in self.__dict__ if (isinstance(getattr(self,key), (basestring, int, float, np.ndarray)) and key[0]!='_')}\n parList.update({key: np.array(self.__dict__[key]) for key in self.__dict__ if (isinstance(getattr(self,key), list) and key[0]!='_')})\n remKeys = [key for key in self.__dict__ if (key not in parList)]\n for key, value in iteritems(parList):\n funcPars['%s_%s'%(self._name, key)] = value\n if self._debug: print('DEBUG: keys which are not parameters:',self._name, remKeys)\n #print 'return for ',self._name, funcPars.keys()\n return funcPars", "title": "" }, { "docid": "4009bd310003a2fd617ff137ea1ffd9d", "score": "0.48117903", "text": "def mrz_improver(mrz_list, mrz_format):\n mrz = {}\n for key in mrz_format.keys():\n info_sample_len = []\n info_sample = []\n for i in range(len(mrz_list)):\n info_sample_len.append(len(mrz_list[i][key]))\n info_sample.append(mrz_list[i][key])\n \n while('' in info_sample):\n info_sample.remove('')\n\n #if key == 'surname':\n #print(info_sample)\n\n info_maximized = \"\"\n text_lenght = max(info_sample_len)\n \n for i in range(text_lenght):\n character_list = []\n for j in range(len(info_sample)):\n try:\n character_list.append(info_sample[j][i])\n except:\n character_list.append('<')\n\n max_occ = max(character_list, key = character_list.count)\n info_maximized = info_maximized + max_occ\n mrz[key] = info_maximized\n \n return mrz", "title": "" }, { "docid": "1b4f4c1031126b6e6685d646c9ea40e7", "score": "0.48027864", "text": "def parameters(self):\n raise NotImplementedError\n return {}", "title": "" }, { "docid": "1032463f664e5a9fac87ab0e382f4c4d", "score": "0.48015478", "text": "def reward_params(self, moved):\n\t\tpass", "title": "" }, { "docid": "2804ef2d7a4e23a149386d7c28637885", "score": "0.47941333", "text": "def params(self):\n return [self.d1, self.a2, self.a3, self.d4, self.d5, self.d6]", "title": "" }, { "docid": "02c3bcfefcbe54972debd49587ac46fd", "score": "0.4791272", "text": "def supported_parameters(cls) -> list[str]:\n return [\n \"type\",\n \"name\",\n \"id\",\n \"match\",\n \"smirks\",\n \"sigma\",\n \"epsilon\",\n \"rmin_half\",\n \"charge_increment\",\n \"distance\",\n \"outOfPlaneAngle\",\n \"inPlaneAngle\",\n ]", "title": "" }, { "docid": "f5d3bc76de5ecbd9f60006553ac345e0", "score": "0.47814903", "text": "def mech_info(rct_names_lst, prd_names_lst,ich_dct):\n # Sort reactant and product name lists by formula to facilitate\n # multichannel, multiwell rate evaluations\n formula_str = ''\n rxn_name_lst = []\n formula_str_lst = []\n formula_dct_lst = []\n\n for rct_names, prd_names in zip(rct_names_lst, prd_names_lst):\n rxn_name = '='.join(['+'.join(rct_names), '+'.join(prd_names)])\n rxn_name_lst.append(rxn_name)\n rct_ichs = list(map(ich_dct.__getitem__, rct_names))\n formula_dct = ''\n for rct_ich in rct_ichs:\n formula_i_dct = automol.inchi.formula(rct_ich)\n formula_dct = automol.formula.join(formula_dct, formula_i_dct)\n formula_str = automol.formula.string2(formula_dct)\n formula_dct_lst.append(formula_dct)\n formula_str_lst.append(formula_str)\n \n return formula_dct_lst, formula_str_lst, rxn_name_lst", "title": "" }, { "docid": "d40ba5b52b25ad157429a1ea4ad4f757", "score": "0.47793195", "text": "def skill_defaults():\n return {\"languages\": [], \"technologies\": []}", "title": "" }, { "docid": "eeac2e9a176e8f45ab15f13c85c3b409", "score": "0.4778028", "text": "def get_parameters():\n param_list = ora_db.execute_select('select name, value, isdefault from v$parameter order by name')\n names, values, isdefaults = zip(*param_list) # Splits...\n return {names[i]: {'value': values[i], 'isdefault': isdefaults[i]} for i in range(0, len(names))} # ... and groups.", "title": "" }, { "docid": "77161d317cc4171a0a16544a75e11073", "score": "0.47735962", "text": "def _refactor_arguments(self, header):\n items = self.extract_items(item_class=ListItem)\n lines = [':{0}:'.format(header.lower())]\n prefix = None if len(items) == 1 else '-'\n for item in items:\n lines += add_indent(item.to_rst(prefix))\n return lines", "title": "" }, { "docid": "a5a82d16f3264ff7429f7618452d1cde", "score": "0.4773565", "text": "def get_dict(self):\n\n return {'kernel': self.kernel_list,\n 'tail': self.tail_list,\n 'optimization_problem': self.optimization_problem_dict,\n 'experimental_design': self.experimental_design_dict,\n 'surrogate_model': self.surrogate_model_dict,\n 'adaptive_sampling': self.adaptive_sampling_dict,\n 'strategy': self.strategy_dict,\n 'controller': self.controller_dict,\n 'delay': self.delay_list,\n 'proj_fun': self.projection_list}", "title": "" }, { "docid": "f87e9aa484496f9d6cdf5482453ae34f", "score": "0.47731858", "text": "async def get_allowances(self) -> Dict[str, Decimal]:\n ret_val = {}\n approval_lists: List[str] = await safe_gather(*[\n self._get_gateway_instance().get_allowances(\n self.chain, self.network, self.address, list(self._tokens), spender\n ) for spender in self._all_spenders\n ])\n\n for spender, approval_list in zip(self._all_spenders, approval_lists):\n for token, amount in approval_list[\"approvals\"].items():\n ret_val[f\"{spender}_{token}\"] = Decimal(str(amount))\n return ret_val", "title": "" }, { "docid": "e594904a416b60bb5a000bc57a1d6082", "score": "0.4771714", "text": "def to_rst(self, **kwards):\n item = self.item\n postfix = ' --' if (len(item.definition) > 0) else ''\n lines = []\n lines += [item.term]\n lines += [NEW_LINE]\n number_of_classifiers = len(item.classifiers)\n if number_of_classifiers == 1:\n lines += [' *({0[0]})*{1}'.format(item.classifiers, postfix)]\n elif number_of_classifiers == 2:\n lines += [\n ' *({0[0]} or {0[1]})*{2}'.format(\n item.classifiers, postfix)]\n lines += add_indent(item.definition) # definition is already a list\n lines += [NEW_LINE]\n return lines", "title": "" }, { "docid": "c9166bbd6b4e1b7671476a3dc9eb396d", "score": "0.4763709", "text": "def as_dict(self):\n\n return {\n \"likelihood\": self.likelihood,\n \"z_cc\": self.z_cc,\n \"cc\": self.cc,\n \"stars\": self.stars,\n }", "title": "" }, { "docid": "2400954b2ece08671488338c1a9897ca", "score": "0.47581655", "text": "def getRandomSkillsAndRole(careerSkillPoints):\r\n roleName = choice(getRoleNames())\r\n return {\r\n \"role_name\": roleName,\r\n \"skills\": getRandomSkillsByRole(roleName, careerSkillPoints),\r\n }", "title": "" }, { "docid": "a6fde847b33570c7db055f5121a965dd", "score": "0.4745011", "text": "def all_input_parameters(self):\n d = OrderedDict()\n # import type\n for i in self._data[\"modeling\"][\"parameters\"][\"separator\"]:\n for j, k in i.items():\n if j == \"i\":\n for m in k:\n if \"#text\" in m:\n d[m[\"@name\"]] = m[\"#text\"]\n else:\n if type(k) is list:\n for n in k:\n for p, q in n.items():\n if p == \"i\":\n for r in q:\n if \"#text\" in r:\n d[r[\"@name\"]] = r[\"#text\"]\n else:\n if type(q) is list:\n for s in q:\n if \"#text\" in s:\n d[s[\"@name\"]] = s[\"#text\"]\n return d", "title": "" }, { "docid": "5cfc0e1601a526e4c763249311241997", "score": "0.47434837", "text": "def get_parameters(self):\n return {}", "title": "" }, { "docid": "1277df42dabdfae2369294deeae3e64d", "score": "0.47415686", "text": "def potential_parameters(cls) -> Iterable[str]:\n return \"k\", \"angle\"", "title": "" }, { "docid": "7a3258ce872d008ee4231e1429503651", "score": "0.47294676", "text": "def revealed_attrs(proof: dict) -> dict:\n\n revealed = proof['proofs'][set(proof['proofs']).pop()]['proof']['primary_proof']['eq_proof']['revealed_attrs']\n rv = {attr: [decode(revealed[attr]), revealed[attr]] for attr in revealed}\n return rv", "title": "" }, { "docid": "b4cc1d470382ef0e31987618b9e5935c", "score": "0.4728944", "text": "def GetDispParms(self):\n dispParms = {key:self.GetParent().Params[key] for key in ['lineThickness', 'show_xlabel', \n 'show_ylabel', 'show_title', 'show_fractions', \n 'plototherWith', 'show_legend','eof', 'xticks', \n 'xticklabel', 'yticks', 'yticklabel', 'linscale', 'title']}\n return dispParms", "title": "" }, { "docid": "4f2a24c6263353dfd1b5f9cc756c21e0", "score": "0.4712898", "text": "def get_symbols(arities: Dict[str, int]) -> Dict[str, Any]:\n assert len(arities) > 0\n\n # List of terminal symbols\n terminals = []\n # List of functions\n functions = []\n # Append symbols to terminals or functions by looping over the arities items\n for key, value in arities.items():\n # A symbol with arity 0 is a terminal\n if value == 0:\n # Append the symbols to the terminals list\n terminals.append(key)\n else:\n # Append the symbols to the functions list\n functions.append(key)\n\n assert len(terminals) > 0\n\n return {\"arities\": arities, \"terminals\": terminals, \"functions\": functions}", "title": "" }, { "docid": "429ae797a70f420b1fe94f58adfb373b", "score": "0.47064617", "text": "def _clone_args(self):\n keys = list(self.keys)\n kw = {}\n if self.allow_any or self.extras:\n kw['allow_extra'] = list(self.extras)\n if self.allow_any:\n kw['allow_extra'].append('*')\n kw['allow_extra_trafaret'] = self.extras_trafaret\n if self.ignore_any or self.ignore:\n kw['ignore_extra'] = list(self.ignore)\n if self.ignore_any:\n kw['ignore_any'].append('*')\n return keys, kw", "title": "" }, { "docid": "4a08ec22cee71af656a2e222ad8487cb", "score": "0.4703376", "text": "def informative_pairs(self):\n return []", "title": "" }, { "docid": "61d79cc366060aec5df7bc5ef58a3484", "score": "0.4701568", "text": "def get_params(self):\n return {\"d\": \"57\"}", "title": "" }, { "docid": "a1093c6a0a01fd281ff53a361d5cb677", "score": "0.4691015", "text": "def params(self):\n ret = {}\n for rd in self.decorators:\n args = rd.args\n kwargs = rd.kwargs\n if param in rd:\n is_required = kwargs.get('required', 'default' not in kwargs)\n ret[args[0]] = {'required': is_required, 'other_names': args[1:], 'options': kwargs}\n\n return ret", "title": "" }, { "docid": "a2266fc741aabaf04acda35fcd50a3f8", "score": "0.4689019", "text": "def paramDetails(cls):\n return {\n 'k': (1, 50, 2, 5),\n }", "title": "" }, { "docid": "577216765bda4bae714a63bf26977c09", "score": "0.46826452", "text": "def parameters(self) -> List[str]:\n return []", "title": "" }, { "docid": "83ad60cf21d99204d5e4d8b776474406", "score": "0.46813694", "text": "def get_params_dict(self, x):\n keys = self.mcmc_version.param_keys\n params_dict = dict.fromkeys(keys)\n\n for i, key in enumerate(keys):\n params_dict[key] = x[i]\n\n for key, val in self.constants.items():\n params_dict[key] = val\n\n return params_dict", "title": "" }, { "docid": "48317061201d67671567e3febf4dd023", "score": "0.4680589", "text": "def make_reward_opts_from_opts_dict(trainer_opts_dict):\n return {\n 'K': trainer_opts_dict['reward_top_k'],\n 'target': trainer_opts_dict['reward_episode_target'],\n 's': trainer_opts_dict['reward_target_weight'],\n }", "title": "" }, { "docid": "aa78fce8cb0a4439453e9293fd0a096a", "score": "0.46797276", "text": "def experimental_design_generate_dict(self):\n\n mod_name = experimental_design\n self.experimental_design_dict = self.obj.get_class_names(mod_name)\n for i in self.experimental_design_dict:\n self.experimental_design_dict[i] = \\\n self.obj.get_arguments_and_default_values(self.experimental_design_dict[i])", "title": "" }, { "docid": "dadaf7d9601b901faebdf5b858e32844", "score": "0.4676164", "text": "def get_run_list_dict(likelihood_list, nrun, **kwargs):\n run_list_dict = {}\n for likelihood_name in likelihood_list:\n run_list_dict[likelihood_name] = get_run_list(\n likelihood_name, nrun, **kwargs)\n return run_list_dict", "title": "" }, { "docid": "a717c3ad6adcbbbd5ad1d4250cc1b025", "score": "0.4675989", "text": "def get_parameters(self):\n parameters = {'tau_m': self.tau_m, 'tau_z_post': self.tau_z_post, 'tau_z_pre': self.tau_z_pre,\n 'tau_p': self.tau_p, 'tau_a': self.tau_a, 'g_a': self.g_a, 'g_w': self.g_w,\n 'g_beta': self.g_beta, 'g_I':self.g_I, 'sigma':self.sigma, 'k': self.k,\n 'g_w_ampa': self.g_w_ampa, 'tau_z_post_ampa': self.tau_z_post_ampa,\n 'tau_z_pre_ampa': self.tau_z_pre_ampa, 'epsilon': self.epsilon, 'G': self.G}\n\n return parameters", "title": "" }, { "docid": "cf53102dca2bbff10b6d9a4adb19da24", "score": "0.46759784", "text": "def slot_mappings(self) -> Dict[Text,Union[Dict, List[Dict]]]:\n\n\n return {\n \"description\": [\n self.from_text(),\n ],\n \"subject\": [\n self.from_text(),\n ],\n \"email\": [\n self.from_text(),\n ],\n \"priority\": [\n self.from_text(),\n ],\n \"status\": [\n self.from_text(),\n ],\n\n\n }", "title": "" }, { "docid": "73ebd77a8b41ae1bb5bd022b30d80bd4", "score": "0.46741757", "text": "def slot_mappings(self) -> Dict[Text,Union[Dict, List[Dict]]]:\n\n\n return {\n \"priority-up\": [\n self.from_text(),\n ],\n \"status-up\": [\n self.from_text(),\n ],\n \"ticket_id\": [\n self.from_text(),\n ],\n\n\n }", "title": "" }, { "docid": "2a6fd83341d327138cf16a0a30165175", "score": "0.46732807", "text": "def get_funded_awards_resources(self):\n funded_awards_resources = {}\n for award in self.AWARDS.keys():\n answer = ''\n while (answer != 'y') and (answer != 'n'):\n answer = input(f'Award \"{award}\" was funded [y/n]: ')\n if answer == 'y':\n gamers_resources = {}\n for gamer_name in self.gamers_names:\n if award == 'Landlord':\n tiles_in_play = int(input(f'Enter {gamer_name}\\'s total number of tiles in play: '))\n gamers_resources[gamer_name] = tiles_in_play\n elif award == 'Banker':\n megacredits_production = int(input(f'Enter {gamer_name}\\'s megacredists production: '))\n gamers_resources[gamer_name] = megacredits_production\n elif award == 'Scientist':\n science_tags_in_play = int(input(f'Enter {gamer_name}\\'s science tag in play: '))\n gamers_resources[gamer_name] = science_tags_in_play\n elif award == 'Thermalist':\n heat_resource_cubes = int(input(f'Enter {gamer_name}\\'s heat resource cubes: '))\n gamers_resources[gamer_name] = heat_resource_cubes\n elif award == 'Miner':\n steel_and_titanium_resource_cubes = int(input(f'Enter {gamer_name}\\'s steel and titanium resource cubes: '))\n gamers_resources[gamer_name] = steel_and_titanium_resource_cubes\n funded_awards_resources[award] = gamers_resources\n return funded_awards_resources", "title": "" }, { "docid": "2bf8cee47018ec30f2ffadb0621f5d65", "score": "0.46692625", "text": "def review_reputation(request):\n logger('review_reputation', 'def {}'.format(request.matchdict))\n ui_locales = get_language_from_cookie(request)\n _tn = Translator(ui_locales)\n\n reputation_dict = review_history_helper.get_reputation_history_of(request.authenticated_userid, _tn)\n prep_dict = __main_dict(request, _tn.get(_.reputation))\n prep_dict.update({'reputation': reputation_dict})\n return prep_dict", "title": "" }, { "docid": "4347adcdf92e282891e40a7b87eac44b", "score": "0.46667165", "text": "def _format_modifiable_parameters(self):\n # sort i_inj consts on top of the parameters so they end up on top of the consts block\n self._modifiable_parameters = sorted(self._modifiable_parameters, key=lambda p: p not in self._i_inj_params)\n\n # Add component information\n formatted_params = super()._format_modifiable_parameters()\n for fp, p in zip(formatted_params, self._modifiable_parameters):\n fp['component'] = component_name(p)\n return formatted_params", "title": "" }, { "docid": "b8832d47197670e44ef09c1b1add36bf", "score": "0.46555403", "text": "def get_scores(self, listofparams=None):\n possibleparams = {\n 'thresh': self.depth_thresh,\n 'error': self.depth_errors,\n }\n if listofparams is None:\n listofparams = possibleparams\n\n result = {}\n for param in listofparams:\n if param in possibleparams.keys():\n result.update(possibleparams[param])\n for i in result.keys():\n result[i] = result[i]/self.num_samples\n\n return result", "title": "" }, { "docid": "f6f72658dbb5d80fcf216df233ad4ad4", "score": "0.46518925", "text": "def get_mineral_data(replay):\r\n timeline = replay.timeline\r\n mineral_data = dict()\r\n for player in replay.players:\r\n mineral_data[player] = [\r\n state[player]['resource_collection_rate']['minerals']\r\n for state in timeline\r\n ]\r\n return mineral_data", "title": "" }, { "docid": "af0595dd36152298ee07aa898a7911d0", "score": "0.46428376", "text": "def scopal_arguments(self, scopes=None):\n id_to_lbl = {}\n if scopes is not None:\n for label, nodes in scopes.items():\n for node in nodes:\n id_to_lbl[node.id] = label\n\n scargs = {node.id: [] for node in self.nodes}\n for link in self.links:\n if link.post == HEQ_POST:\n relation = scope.LHEQ\n elif link.post == H_POST:\n relation = scope.QEQ\n else:\n continue\n # get the label if scopes was given\n target = id_to_lbl.get(link.end, link.end)\n scargs[link.start].append((link.role, relation, target))\n\n return scargs", "title": "" }, { "docid": "461b402bd097fc5bed59af56c55f4596", "score": "0.46414754", "text": "def param_list(self):\n result = {}\n for param in self.parameter_start_address:\n address = self.parameter_start_address[param]\n if param in self.parameter_end_address:\n end_address = self.parameter_end_address[param]\n length = end_address - address + 1\n address = \"{}/{}\".format(address, length)\n else:\n address = str(address)\n\n result[param] = address\n\n return result", "title": "" }, { "docid": "bc8e5c73f2f80d32eb30039f306d5524", "score": "0.4635504", "text": "def sampler_function_kwargs(self):\n keys = [\"adapt\", \"swap_ratios\"]\n return {key: self.kwargs[key] for key in keys}", "title": "" }, { "docid": "a0b4474b4430a2a1cdb594f7f574e200", "score": "0.4628914", "text": "def get_parameters(self):\r\n return_value = []\r\n for i in self.state_dict():\r\n return_value.append(self.state_dict()[i])\r\n return return_value", "title": "" }, { "docid": "5ed532567ad80627cf31f8d25e640a6b", "score": "0.46274272", "text": "def get_params_info(cls):\n return dict(\n width='Waveguide width in microns',\n length='Waveguide length in microns'\n )", "title": "" }, { "docid": "e5f66e9fea17364d41399fcb4f1f8ee0", "score": "0.4621992", "text": "def get_parameters(self):\n d = AdaptiveBatAlgorithm.get_parameters(self)\n d.update({\n 'min_loudness': self.min_loudness,\n 'max_loudness': self.max_loudness,\n 'min_pulse_rate': self.min_pulse_rate,\n 'max_pulse_rate': self.max_pulse_rate,\n 'tao_1': self.tao_1,\n 'tao_2': self.tao_2\n })\n return d", "title": "" }, { "docid": "7446e6b91e981a1ee57e4900b9319f2a", "score": "0.4618597", "text": "def estimate_rewards(next_states, actions, rewards, reward_action, v = 1):\n avg_reward = estimate_empirical_reward(actions, rewards, reward_action)\n next_states = next_states[actions == reward_action]\n rewards = rewards[actions == reward_action]\n actions = actions[actions == reward_action]\n state_total_rewards ={}\n for i in range(len(next_states)):\n s = tuple(next_states[i])\n r = rewards[i]\n if s in state_total_rewards:\n state_total_rewards[s].append(r)\n else:\n state_total_rewards[s] = [r]\n for s in state_total_rewards.keys():\n r = np.array(state_total_rewards[s])\n #if np.mean(r) > 3 and len(r) > 5:\n # print masked_feat_labels[np.array(s).astype(int) == 1], str(r)\n state_total_rewards[s],_ = update_prior_mean_with_data(0, v, r)\n return state_total_rewards", "title": "" } ]
7ce4c27c26f7eafeb5c9ba17863fd043
Format the elapsed time from the given date to now or the given timedelta. This function is also available in the template context as filter named `timedeltaformat`.
[ { "docid": "52ff51c9d6ff2c2d1a5b65c3f784e691", "score": "0.68898124", "text": "def format_timedelta(self, datetime_or_timedelta, granularity='second',\n add_direction=False, threshold=0.85):\n if isinstance(datetime_or_timedelta, datetime):\n datetime_or_timedelta = datetime.utcnow() - datetime_or_timedelta\n return dates.format_timedelta(\n datetime_or_timedelta,\n granularity,\n threshold=threshold,\n add_direction=add_direction,\n locale=self.get_locale())", "title": "" } ]
[ { "docid": "2a9b5adbd756d4fe3c4ef0cc217dda0d", "score": "0.6974653", "text": "def format_timedelta(\r\n datetime_or_timedelta,\r\n granularity=\"second\",\r\n add_direction=False,\r\n threshold=0.85,\r\n request=None,\r\n):\r\n if isinstance(datetime_or_timedelta, datetime):\r\n datetime_or_timedelta = datetime.utcnow() - datetime_or_timedelta\r\n\r\n return dates.format_timedelta(\r\n datetime_or_timedelta,\r\n granularity,\r\n threshold=threshold,\r\n add_direction=add_direction,\r\n locale=get_locale(request),\r\n )", "title": "" }, { "docid": "675b00e17c5dca0a6b90f862b2dbe4a8", "score": "0.67092055", "text": "def format_timedelta(datetime_or_timedelta, granularity='second'):\r\n if isinstance(datetime_or_timedelta, datetime):\r\n datetime_or_timedelta = datetime.utcnow() - datetime_or_timedelta\r\n return dates.format_timedelta(datetime_or_timedelta, granularity,\r\n locale=get_locale())", "title": "" }, { "docid": "7aa0142ae71aa51cfe3f2c72cac72571", "score": "0.6279847", "text": "def FormatTimedelta(time_delta):\n seconds = int(time_delta.total_seconds())\n days, time_left = divmod(seconds, 86400) # 86400: seconds in a day = 24*60*60\n hours, time_left = divmod(time_left, 3600) # 3600: seconds in an hour = 60*60\n minutes, seconds = divmod(time_left, 60) # 60: seconds in a minute\n\n pretty_label = '%ss ago' % seconds\n if days > 0:\n pretty_label = '%sd, %sh, %sm ago' % (days, hours, minutes)\n elif hours > 0:\n pretty_label = '%sh, %sm ago' % (hours, minutes)\n elif minutes > 0:\n pretty_label = '%sm, %ss ago' % (minutes, seconds)\n return pretty_label", "title": "" }, { "docid": "846320db21573c35eb0f1b329703cf65", "score": "0.60039836", "text": "def strfdelta(tdelta, fmt):\n d = {\"days\": tdelta.days}\n d[\"hours\"], rem = divmod(tdelta.seconds, 3600)\n d[\"minutes\"], d[\"seconds\"] = divmod(rem, 60)\n d[\"ms\"] = tdelta.microseconds\n return fmt.format(**d)", "title": "" }, { "docid": "fcb3bf833a7892ec3d96026e1c5fd9eb", "score": "0.5943049", "text": "def format_time_delta(t: datetime.timedelta) -> str:\n\n remaining_secs = t.seconds % 3600\n time_dict = {\n 'days' : t.days,\n 'hours' : int(t.seconds / 3600),\n 'minutes' : int(remaining_secs / 60),\n 'seconds' : remaining_secs % 60\n }\n\n new_timedict = {}\n # create new dictionary with the keys being the right numeric textual value\n for element in time_dict:\n if time_dict[element] == 1:\n new_timedict[element[0:len(element)-1]] = time_dict[element]\n else:\n new_timedict[element] = time_dict[element]\n\n # store keys and values in a list\n # for easier access\n measures = list(new_timedict.keys())\n values = list(new_timedict.values())\n \n timedelta = ''\n for i in range(len(values)):\n timedelta += f'{values[i]} {measures[i]}, '\n\n # ignore the last 2 characters ', '\n return timedelta[:-2]", "title": "" }, { "docid": "75a3a3d607e4f94f93595af707e31025", "score": "0.56860155", "text": "def _format_as_timestr(td: datetime.timedelta) -> str:\n if td < datetime.timedelta(minutes=1):\n return 'under a minute'\n else:\n return timestr.timedelta_to_str(td)", "title": "" }, { "docid": "b5ba443c93f7560a13f8273e88838842", "score": "0.5671875", "text": "def FormatTimedelta(delta):\n if delta is None:\n return None\n hours, remainder = divmod(delta.total_seconds(), 3600)\n minutes, seconds = divmod(remainder, 60)\n return '%02d:%02d:%02d' % (hours, minutes, seconds)", "title": "" }, { "docid": "64c72d29a2b026461d8d8d95be0559d8", "score": "0.566458", "text": "def human_delta(tdelta):\n d = dict(days=tdelta.days)\n d['hrs'], rem = divmod(tdelta.seconds, 3600)\n d['min'], d['sec'] = divmod(rem, 60)\n if d['min'] is 0:\n fmt = '{sec} sec'\n elif d['hrs'] is 0:\n fmt = '{min} min {sec} sec'\n elif d['days'] is 0:\n fmt = '{hrs} hr(s) {min} min {sec} sec'\n else:\n fmt = '{days} day(s) {hrs} hr(s) {min} min {sec} sec'\n return fmt.format(**d)", "title": "" }, { "docid": "0582691c692b0c9fccdefb56dc3d11e8", "score": "0.56279296", "text": "def timerepr(deltat):\n if deltat.days<0:\n return \"now\"\n hours, seconds=divmod(deltat.seconds, 60*60)\n minutes, seconds=divmod(seconds, 60)\n if deltat.days: return \"in {0}d {1}h {2}m {3}s\".format(deltat.days, hours, minutes, seconds)\n if hours: return \"in {0}h {1}m {2}s\".format(hours, minutes, seconds)\n if minutes: return \"in {0}m {1}s\".format(minutes, seconds)\n return \"in {0}s\".format(seconds)", "title": "" }, { "docid": "bfbbbb07010a53087142006afb579ee3", "score": "0.5626832", "text": "def pretty_timedelta(fmt=\"%d:%02d:%02d\", since=None, until=None):\n since = since or time.time()\n until = until or time.time()\n delta_s = until - since\n hours, remainder = divmod(delta_s, 3600)\n minutes, seconds = divmod(remainder, 60)\n return fmt % (hours, minutes, seconds)", "title": "" }, { "docid": "193b5b8856e719f083e3d2e0535a2e2b", "score": "0.55961984", "text": "def smooth_timedelta(timedeltaobj):\n if not timedeltaobj:\n return None\n secs = timedeltaobj.total_seconds()\n timetot = \"\"\n if secs > 86400: # 60sec * 60min * 24hrs\n days = secs // 86400\n timetot += f\"{int(days)} dager\"\n secs = secs - days*86400\n\n if secs > 3600:\n hrs = secs // 3600\n timetot += f\" {int(hrs)} timer\"\n secs = secs - hrs*3600\n\n if secs > 60:\n mins = secs // 60\n timetot += f\" {int(mins)} minutter\"\n secs = secs - mins*60\n\n if secs > 0:\n timetot += f\" {secs} sekunder\"\n return timetot", "title": "" }, { "docid": "aeb7a40631b89d6300d368bf8dc61357", "score": "0.5568412", "text": "def FormatDuration(datetime_start, datetime_end):\n if not datetime_start or not datetime_end:\n return None\n return FormatTimedelta(datetime_end - datetime_start)", "title": "" }, { "docid": "4bd6362eaba0b096d429d52694e89def", "score": "0.5524054", "text": "def human_timedelta(self, delta):\n reldelta = dateutil.relativedelta.relativedelta(seconds=delta.total_seconds(), microseconds=delta.microseconds)\n attrs = ['years', 'months', 'days', 'hours', 'minutes', 'seconds']\n\n human_readable = lambda delta: ['%d %s' % (getattr(delta, attr), getattr(delta, attr) > 1 and attr or attr[:-1])\n for attr in attrs if getattr(delta, attr)]\n\n # just the first 2 elements\n deltalist = list(human_readable(reldelta))\n if len(deltalist) > 2:\n deltalist = deltalist[:2]\n\n # to string\n return ', '.join(deltalist)", "title": "" }, { "docid": "e78c15e073b9f971511182c0728f83ac", "score": "0.5491043", "text": "def format_date_diff(reld, no_color=False):\n if reld.years > 0:\n return colorize(TCOLORS['FAIL'], \"{y} years and {m} months ago\".format(y=reld.years, m=reld.months), no_color)\n elif reld.months > 0:\n return colorize(TCOLORS['FAIL'], \"{m} months and {d} days ago\".format(m=reld.months, d=reld.days), no_color)\n elif reld.days > 0:\n return colorize(TCOLORS['WARNING'], \"{d} days and {h} hours ago\".format(d=reld.days, h=reld.hours), no_color)\n elif reld.hours > 0:\n return colorize(TCOLORS['WARNING'], \"{h} hours and {m} min. ago\".format(h=reld.hours, m=reld.minutes), no_color)\n elif reld.minutes > 0:\n return colorize(TCOLORS['GREEN'], \"{} minutes ago\".format(reld.minutes), no_color)\n else:\n return colorize(TCOLORS['GREEN'], \"just some seconds ago\", no_color)", "title": "" }, { "docid": "dcc1e767dd587db985b21f9199303860", "score": "0.54403174", "text": "def format_elapse(**elapse) -> str:\n\n attrs = ['years', 'months', 'days', 'hours', 'minutes', 'seconds']\n elapse = relativedelta(**elapse)\n return ', '.join(['%d %s' % (getattr(elapse, attr), getattr(elapse, attr) > 1 and attr or attr[:-1]) for attr in attrs if getattr(elapse, attr)])", "title": "" }, { "docid": "0822c1ac18c99b3f55ceeff758c5a0ae", "score": "0.5433698", "text": "def TimeDelta(delta, separator=' '):\n parts = []\n seconds = delta.seconds\n if delta.days:\n parts.append('%dd' % delta.days)\n if seconds >= 3600:\n parts.append('%dh' % (seconds // 3600))\n seconds %= 3600\n if seconds >= 60:\n parts.append('%dm' % (seconds // 60))\n seconds %= 60\n seconds += delta.microseconds / 1e6\n if seconds or not parts:\n parts.append('%gs' % seconds)\n return separator.join(parts)", "title": "" }, { "docid": "04e71137640ec41b0a78c4147b3f2073", "score": "0.5402975", "text": "def format_time_ago(n: datetime) -> str:\n units = {\n \"years\": lambda diff: diff.days / 365,\n \"months\": lambda diff: diff.days / 30.436875, # Average days per month\n \"weeks\": lambda diff: diff.days / 7,\n \"days\": lambda diff: diff.days,\n \"hours\": lambda diff: diff.seconds / 3600,\n \"minutes\": lambda diff: diff.seconds % 3600 / 60,\n }\n diff = datetime.now() - n\n for unit in units:\n dur = int(units[unit](diff))\n if dur > 0:\n if dur == 1: # De-pluralize\n unit = unit[:-1]\n return f\"{dur} {unit} ago\"\n return \"Just now\"", "title": "" }, { "docid": "c3916c872863375b7ab78dbcf7626dd1", "score": "0.5382564", "text": "def readable_time(delta_seconds):\n hours, remainder = divmod(int(delta_seconds), 3600)\n minutes, seconds = divmod(remainder, 60)\n days, hours = divmod(hours, 24)\n\n if days:\n fmt = '{d}d {h}hr {m}min {s}sec'\n elif hours:\n fmt = '{h}hr {m}min {s}sec'\n else:\n fmt = '{m}min {s}sec'\n\n return fmt.format(d=days, h=hours, m=minutes, s=seconds)", "title": "" }, { "docid": "882d309ae8f757670def4d64bed5b685", "score": "0.53601116", "text": "def format_relative_date(date):\r\n\r\n\tnow = datetime.datetime.now()\r\n\tdiff = (now - date).seconds\r\n\r\n\t# Anti-repetition! These simplify the code somewhat.\r\n\tplural = lambda d: 's' if d != 1 else ''\r\n\tfrmt = lambda d: (diff / float(d), plural(diff / float(d)))\r\n\r\n\tif diff < 60:\r\n\t\treturn '%d second%s ago' % frmt(1)\r\n\telif diff < 3600:\r\n\t\treturn '%d minute%s ago' % frmt(60)\r\n\telif diff < 86400:\r\n\t\treturn '%d hour%s ago' % frmt(3600)\r\n\telif diff < 172800:\r\n\t\treturn 'yesterday'\r\n\telse:\r\n\t\treturn date.strftime('M j / y - H:i')", "title": "" }, { "docid": "0c6ab9ed44d40157289ca4c17e8c46fd", "score": "0.5353509", "text": "def format_relative(self, value):\n if isinstance(value, int):\n value = datetime.fromtimestamp(value, pytz.utc)\n #Check if timezone is naive, convert\n if value.tzinfo is None:\n raise ValueError(\"Not possible to use format_relative with timezone naive datetimes.\")\n elif value.tzinfo is not pytz.utc:\n value = self.tz_to_utc(value)\n\n now = self.utcnow()\n diff = now - value\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n #FIXME: Shouldn't future be handled as well? :)\n return self.format_dt(value)\n\n if day_diff == 0:\n if second_diff < 10:\n return _(\"Just now\")\n if second_diff < 60:\n return _(\"${diff} seconds ago\", mapping={'diff': str(second_diff)})\n if second_diff < 120:\n return _(\"1 minute ago\")\n if second_diff < 3600:\n return _(\"${diff} minutes ago\", mapping={'diff': str(second_diff / 60)})\n if second_diff < 7200:\n return _(\"1 hour ago\")\n if second_diff < 86400:\n return _(\"${diff} hours ago\", mapping={'diff': str(second_diff / 3600)})\n return self.format_dt(value)", "title": "" }, { "docid": "f3c8fcdc2be4d8f278e3f987d97d9e69", "score": "0.53460836", "text": "def time_elapsed(start_time):\n time_diff = time.time() - start_time\n # Source: tqdm.std.tqdm.format_interval\n mins, s = divmod(int(time_diff), 60)\n h, m = divmod(mins, 60)\n if h:\n return \"{0:d}:{1:02d}:{2:02d}\".format(h, m, s)\n else:\n return \"{0:02d}:{1:02d}\".format(m, s)", "title": "" }, { "docid": "ec5ce9533aab0f2508cbc4734e2ed8b4", "score": "0.52459955", "text": "def formatTime(self, record, datefmt=None):\n # The default value of the following argument is defined here so\n # that Sphinx doesn't embed the default value in the generated\n # documentation (because the result is awkward to read).\n datefmt = datefmt or DEFAULT_DATE_FORMAT\n # Replace %f with the value of %(msecs)03d.\n if '%f' in datefmt:\n datefmt = datefmt.replace('%f', '%03d' % record.msecs)\n # Delegate the actual date/time formatting to the base formatter.\n return logging.Formatter.formatTime(self, record, datefmt)", "title": "" }, { "docid": "3bf1a3c84ce14af5a1224659bf30c5c4", "score": "0.5241587", "text": "def convert_from_timedelta(timedelta_val):\n if timedelta_val.days:\n return \"{days}d\".format(days=timedelta_val.days)\n if timedelta_val.seconds:\n return \"{seconds}s\".format(seconds=timedelta_val.seconds)", "title": "" }, { "docid": "fa9be31d6c654601894e6113a482ea6f", "score": "0.5233914", "text": "def time_string(delta, hours, minutes, seconds, delim, always_show=True):\n t_hours, remainder = divmod(delta, 3600)\n t_minutes, t_seconds = divmod(remainder, 60)\n \n output = []\n if always_show or t_hours > 0: output.append(hours % t_hours)\n if always_show or t_minutes > 0: output.append(minutes % t_minutes)\n if always_show or t_seconds > 0: output.append(seconds % t_seconds)\n \n return delim.join(output)", "title": "" }, { "docid": "a80f4cf1f8f1c9f4773c76b5510c45cc", "score": "0.5204631", "text": "def format_date(self, date, gmt_offset=0, relative=True, shorter=False,\r\n full_format=False):\r\n if self.code.startswith(\"ru\"):\r\n relative = False\r\n if isinstance(date, numbers.Real):\r\n date = datetime.datetime.utcfromtimestamp(date)\r\n now = datetime.datetime.utcnow()\r\n if date > now:\r\n if relative and (date - now).seconds < 60:\r\n # Due to click skew, things are some things slightly\r\n # in the future. Round timestamps in the immediate\r\n # future down to now in relative mode.\r\n date = now\r\n else:\r\n # Otherwise, future dates always use the full format.\r\n full_format = True\r\n local_date = date - datetime.timedelta(minutes=gmt_offset)\r\n local_now = now - datetime.timedelta(minutes=gmt_offset)\r\n local_yesterday = local_now - datetime.timedelta(hours=24)\r\n difference = now - date\r\n seconds = difference.seconds\r\n days = difference.days\r\n\r\n _ = self.translate\r\n format = None\r\n if not full_format:\r\n if relative and days == 0:\r\n if seconds < 50:\r\n return _(\"1 second ago\", \"%(seconds)d seconds ago\",\r\n seconds) % {\"seconds\": seconds}\r\n\r\n if seconds < 50 * 60:\r\n minutes = round(seconds / 60.0)\r\n return _(\"1 minute ago\", \"%(minutes)d minutes ago\",\r\n minutes) % {\"minutes\": minutes}\r\n\r\n hours = round(seconds / (60.0 * 60))\r\n return _(\"1 hour ago\", \"%(hours)d hours ago\",\r\n hours) % {\"hours\": hours}\r\n\r\n if days == 0:\r\n format = _(\"%(time)s\")\r\n elif days == 1 and local_date.day == local_yesterday.day and \\\r\n relative:\r\n format = _(\"yesterday\") if shorter else \\\r\n _(\"yesterday at %(time)s\")\r\n elif days < 5:\r\n format = _(\"%(weekday)s\") if shorter else \\\r\n _(\"%(weekday)s at %(time)s\")\r\n elif days < 334: # 11mo, since confusing for same month last year\r\n format = _(\"%(month_name)s %(day)s\") if shorter else \\\r\n _(\"%(month_name)s %(day)s at %(time)s\")\r\n\r\n if format is None:\r\n format = _(\"%(month_name)s %(day)s, %(year)s\") if shorter else \\\r\n _(\"%(month_name)s %(day)s, %(year)s at %(time)s\")\r\n\r\n tfhour_clock = self.code not in (\"en\", \"en_US\", \"zh_CN\")\r\n if tfhour_clock:\r\n str_time = \"%d:%02d\" % (local_date.hour, local_date.minute)\r\n elif self.code == \"zh_CN\":\r\n str_time = \"%s%d:%02d\" % (\r\n (u('\\u4e0a\\u5348'), u('\\u4e0b\\u5348'))[local_date.hour >= 12],\r\n local_date.hour % 12 or 12, local_date.minute)\r\n else:\r\n str_time = \"%d:%02d %s\" % (\r\n local_date.hour % 12 or 12, local_date.minute,\r\n (\"am\", \"pm\")[local_date.hour >= 12])\r\n\r\n return format % {\r\n \"month_name\": self._months[local_date.month - 1],\r\n \"weekday\": self._weekdays[local_date.weekday()],\r\n \"day\": str(local_date.day),\r\n \"year\": str(local_date.year),\r\n \"time\": str_time\r\n }", "title": "" }, { "docid": "b08feaf86e5f499fd160fb746295f4f2", "score": "0.51617175", "text": "def precise_format_timedelta(delta, locale, threshold=.85, decimals=2):\r\n seconds = delta.total_seconds()\r\n\r\n for unit, secs_per_unit in TIMEDELTA_UNITS:\r\n value = abs(seconds) / secs_per_unit\r\n if value >= threshold:\r\n plural_form = locale.plural_form(value)\r\n pattern = None\r\n for choice in (unit + ':medium', unit):\r\n patterns = locale._data['unit_patterns'].get(choice)\r\n if patterns is not None:\r\n pattern = patterns[plural_form]\r\n break\r\n if pattern is None:\r\n return u''\r\n decimals = int(decimals)\r\n format_string = \"%.\" + str(decimals) + \"f\"\r\n return pattern.replace('{0}', format_string % value)\r\n return u''", "title": "" }, { "docid": "2078c11c3547e963ff550f8a261ace45", "score": "0.515791", "text": "def pretty_date(time=False):\n now = datetime.now()\n if type(time) is int:\n diff = now - datetime.fromtimestamp(time)\n elif isinstance(time,datetime):\n diff = now - time\n elif not time:\n diff = now - now\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return ''\n\n if day_diff == 0:\n if second_diff < 10:\n return \"just now\"\n if second_diff < 60:\n return str(second_diff) + \" seconds ago\"\n if second_diff < 120:\n return \"a minute ago\"\n if second_diff < 3600:\n return str(second_diff / 60) + \" minutes ago\"\n if second_diff < 7200:\n return \"an hour ago\"\n if second_diff < 86400:\n return str(second_diff / 3600) + \" hours ago\"\n if day_diff == 1:\n return \"Yesterday\"\n if day_diff < 7:\n return str(day_diff) + \" days ago\"\n if day_diff < 31:\n return str(day_diff / 7) + \" weeks ago\"\n if day_diff < 365:\n return str(day_diff / 30) + \" months ago\"\n return str(day_diff / 365) + \" years ago\"", "title": "" }, { "docid": "32f50b3b380e1a570a82ca6ef28ac92b", "score": "0.5140887", "text": "def render(self, context):\n\n wedding_date = date(2009, 10, 17)\n diff = wedding_date - date.today()\n return \"%s days until wedding\" % (diff.days)", "title": "" }, { "docid": "4aff73b4cf374c88bc3c9cffa41cdc56", "score": "0.5136714", "text": "def _delta_time_str(days, seconds, microseconds, use_microseconds=False):\n # Conversion\n remainder = divmod(seconds, 3600)[1]\n temp = {\n 'days': days,\n 'hours': divmod(seconds, 3600)[0],\n 'minutes': divmod(remainder, 60)[0],\n 'seconds': divmod(remainder, 60)[1],\n 'microseconds': microseconds\n }\n # Formatter\n formatter = \"{hours:02d}:{minutes:02d}:{seconds:02d}\"\n if use_microseconds:\n formatter += \":{microseconds:d}\"\n if days == 1:\n formatter = \"1 day, \" + formatter\n elif days > 1:\n formatter = \"{days} days, \" + formatter\n\n string = formatter.format(**temp)\n string = _precision_on_microseconds(string, use_microseconds)\n return string", "title": "" }, { "docid": "8e7b272e62d24668516b1bb6133ea0a7", "score": "0.51242286", "text": "def pretty_date(d):\n from math import fabs\n from datetime import datetime\n\n now = datetime.now()\n diff = now - d\n total_seconds = diff.seconds + diff.days * 24 * 60 * 60\n sec = int(fabs(total_seconds))\n\n if sec < 60:\n v = sec\n unit = 'second' + ('s' if v != 1 else '')\n elif sec < 60 * 60:\n v = sec / 60\n unit = 'minute' + ('s' if v != 1 else '')\n elif sec < 60 * 60 * 24:\n v = sec / 60 / 60\n unit = 'hour' + ('s' if v != 1 else '')\n else:\n v = sec / 60 / 60 / 24\n unit = 'day' + ('s' if v != 1 else '')\n\n if total_seconds < 0:\n return 'in %i %s' % (v, unit) # future\n else:\n return '%i %s ago' % (v, unit) # past", "title": "" }, { "docid": "d9f4e68ab3ebfb3919eec3396ee39dc3", "score": "0.5118705", "text": "def format_seconds(value, arg=None):\n from django.utils import dateformat\n #from django.utils import formats\n if value in (None, u''):\n return u''\n if arg is None:\n arg = settings.TIME_FORMAT\n data = Duration(value)\n \n try:\n return dateformat.time_format(data, arg)\n except AttributeError:\n return ''", "title": "" }, { "docid": "b98ee88e1adca61b9d5f9ed3649e69f4", "score": "0.5106905", "text": "def __formatDuration(self, time, showHours):\n hours, remainder = divmod(time.seconds, 3600)\n minutes, seconds = divmod(remainder, 60)\n if showHours:\n return \"%d:%02d:%02d\" % (hours, minutes, seconds)\n return \"%d:%02d\" % (minutes, seconds)", "title": "" }, { "docid": "e1e0b98e20546d9fde5a8b5d3fb7c980", "score": "0.5073348", "text": "def format_current_time(to_format: datetime, date=False, time=False, mil=False, micro=False, save=False):\n logger.debug(\"running\")\n if save:\n logger.debug(\"done with save\")\n return to_format.strftime(\"%Y-%m-%d-%H-%M-%S\")\n fmt_str = str()\n date_str = \"%Y-%m-%d\"\n time_str = \"%H:%M:%S\"\n micro_str = \".%f\"\n spacer = \" \"\n if date:\n fmt_str += date_str\n if time or mil or micro:\n fmt_str += spacer\n if time:\n fmt_str += time_str\n if mil or micro:\n fmt_str += micro_str\n if mil:\n logger.debug(\"done with mil\")\n return to_format.strftime(fmt_str)[:-3]\n else:\n logger.debug(\"done\")\n return to_format.strftime(fmt_str)", "title": "" }, { "docid": "2ab49bf2d4a0feed8ee3b5a21d9fbee2", "score": "0.5068368", "text": "def format_date(dt):\n now = utcnow()\n today = now.date()\n date = dt.date()\n delta = now - dt\n day_delta = today - date\n days = day_delta.days\n seconds = delta.total_seconds()\n if dt > now:\n print(\"???Future: %s, %s???\" % (dt, now))\n return \"In the future somehow\"\n if dt + timedelta(minutes=9) >= now:\n return \"just now\"\n elif dt + timedelta(minutes=90) >= now:\n return \"%i minutes ago\" % (seconds // 60)\n elif date == today:\n return \"today\"\n elif date + timedelta(days=1) == today:\n return \"yesterday\"\n elif date + timedelta(days=14) >= today:\n return \"%i days ago\" % days\n elif date + timedelta(days=60) >= today:\n return \"%i weeks ago\" % (days // 7)\n elif date + timedelta(days=700) >= today:\n return \"%i months ago\" % (days // 30)\n else:\n return \"%i years ago\" % ((days + 150) // 365)", "title": "" }, { "docid": "54049be46c6dc472e618026aae4927bb", "score": "0.5068333", "text": "def test_argument(self):\n self.assertEqual(self.render(timedelta(seconds=0), self.tpl2), \"0 second\")\n self.assertEqual(self.render(timedelta(seconds=30), self.tpl2), \"30 seconds\")\n self.assertEqual(self.render(timedelta(seconds=60), self.tpl2), \"1 minute\")\n self.assertEqual(self.render(timedelta(seconds=3599), self.tpl2), \"59 minutes, 59 seconds\")\n self.assertEqual(self.render(timedelta(seconds=3600), self.tpl2), \"1 hour\")", "title": "" }, { "docid": "e8169059f7e6a60351d88fdb1294c8af", "score": "0.50228864", "text": "def format_time(start, end):\n hours, rem = divmod(end - start, 3600)\n minutes, seconds = divmod(rem, 60)\n return \"{:0>2}:{:0>2}:{:05.2f}\".format(int(hours), int(minutes), seconds)", "title": "" }, { "docid": "546ebe2b846fcf9a77249f41647e339f", "score": "0.5012365", "text": "def get_time_elapsed(st_dt: datetime) -> str:\n result = relativedelta.relativedelta(datetime.now(), st_dt)\n\n attrs = {\n 'years': 'y',\n 'months': 'mo',\n 'days': 'd',\n 'hours': 'h',\n 'minutes': 'm',\n 'seconds': 's'\n }\n\n result_list = []\n for attr in attrs.keys():\n attr_val = getattr(result, attr)\n if attr_val is not None:\n if attr_val > 0:\n result_list.append('{:d}{}'.format(attr_val, attrs[attr]))\n return ' '.join(result_list)", "title": "" }, { "docid": "3acccf2fae12d588ae4d253db08bd775", "score": "0.49925202", "text": "def _fmt_time(self, date):\n return date.strftime(\"%H:%M, %d %b %Y\")", "title": "" }, { "docid": "13e8af88c09f3bb224d035da757911cc", "score": "0.49540022", "text": "def timesincehumanize(d, now=None, reversed=False):\n # Convert datetime.date to datetime.datetime for comparison.\n if not isinstance(d, datetime.datetime):\n d = datetime.datetime(d.year, d.month, d.day)\n if now and not isinstance(now, datetime.datetime):\n now = datetime.datetime(now.year, now.month, now.day)\n\n if not now:\n now = datetime.datetime.now(utc if is_aware(d) else None)\n\n if reversed:\n d, now = now, d\n delta = now - d\n\n # Deal with leapyears by subtracing the number of leapdays\n leapdays = calendar.leapdays(d.year, now.year)\n if leapdays != 0:\n if calendar.isleap(d.year):\n leapdays -= 1\n elif calendar.isleap(now.year):\n leapdays += 1\n delta -= datetime.timedelta(leapdays)\n\n # ignore microseconds\n since = delta.days * 24 * 60 * 60 + delta.seconds\n if since <= 0:\n # d is in the future compared to now, stop processing.\n return avoid_wrapping(ugettext('Just now'))\n for i, (seconds, name) in enumerate(TIMESINCE_CHUNKS):\n count = since // seconds\n if count != 0:\n break\n result = avoid_wrapping(name % count) + (' ago')\n \"\"\"\n if i + 1 < len(TIMESINCE_CHUNKS):\n # Now get the second item\n seconds2, name2 = TIMESINCE_CHUNKS[i + 1]\n count2 = (since - (seconds * count)) // seconds2\n if count2 != 0:\n result += ugettext(', ') + avoid_wrapping(name2 % count2)\n \"\"\"\n return result", "title": "" }, { "docid": "d8f16a8ad23b1727201ca536aeb9dd5f", "score": "0.49506634", "text": "def pretty_time(time: datetime) -> Optional[str]:\n if time is None:\n return None\n\n now = datetime.now()\n diff = now - time\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff == 0:\n if second_diff < 10:\n return \"just now\"\n if second_diff < 60:\n return str(second_diff) + \" seconds ago\"\n if second_diff < 120:\n return \"a minute ago\"\n if second_diff < 3600:\n return str(second_diff // 60) + \" minutes ago\"\n if second_diff < 7200:\n return \"an hour ago\"\n if second_diff < 86400:\n return str(second_diff // 3600) + \" hours ago\"\n if day_diff == 1:\n return \"yesterday\"\n if day_diff < 7:\n return str(day_diff) + \" days ago\"\n if day_diff < 31:\n return str(day_diff // 7) + \" weeks ago\"\n if day_diff < 365:\n return str(day_diff // 30) + \" months ago\"\n return str(day_diff // 365) + \" years ago\"", "title": "" }, { "docid": "e190f695a5eeccfa355c9353f54c8ed8", "score": "0.49500945", "text": "def getDateTimeDelta(date_time, op, form=record_name_format, days=0, hours=0, minutes=0, seconds=0):\n ops = {'+': (lambda x, y: x + y), '-': (lambda x, y: x - y)}\n if op == '+' or op == '-':\n return (ops[op](date_time, relativedelta(days=days, hours=hours, minutes=minutes, seconds=seconds))) \\\n .strftime(form)\n return None", "title": "" }, { "docid": "156c78f2588cae450872a66d18ffb5dd", "score": "0.4949701", "text": "def duration_difference(date_time: datetime) -> str:\n time_delta = date_time.replace(tzinfo=None) - datetime.now()\n time_tuple = seconds_to_duration(time_delta.total_seconds())\n # The returned tuple contains the duration in terms of\n # days, hours, minutes, seconds, and milliseconds.\n time_str = ''\n if time_tuple[0]:\n time_str += '{} days'.format(time_tuple[0])\n time_str += ' {} hrs'.format(time_tuple[1])\n elif time_tuple[1]:\n time_str += '{} hrs'.format(time_tuple[1])\n time_str += ' {} min'.format(time_tuple[2])\n elif time_tuple[2]:\n time_str += '{} min'.format(time_tuple[2])\n time_str += ' {} sec'.format(time_tuple[3])\n elif time_tuple[3]:\n time_str += '{} sec'.format(time_tuple[3])\n return time_str", "title": "" }, { "docid": "63ffd72e2e15f06edf79c7f35536d3db", "score": "0.49265742", "text": "def date_now_formatted():\n\n obp_logger.debug(\"setting nicely formatted datetime\")\n return datetime.datetime.now().strftime('[%d, %h %Y, %H:%M:%S]')", "title": "" }, { "docid": "539dbb6f65f648e5866703047d2ce78f", "score": "0.49173954", "text": "def timedelta_to_str(td): \n td_days = td.days\n td_hours = int(math.floor(td.seconds/60/60))\n td_mins = int(math.floor(td.seconds/60) - math.floor(td.seconds/60/60)*60)\n td_sec = int(round(td.seconds%60,0))\n return td_days,td_hours,td_mins,td_sec", "title": "" }, { "docid": "a7ebf433fbeb7d76c68fcc68a0fdcaaf", "score": "0.49097177", "text": "def timesince(date, dformat=\"%A, %d. %B %Y %I:%M%p\"):\n return date.strftime(dformat)", "title": "" }, { "docid": "34365521a4f501efc24ec1f0f0aa14f6", "score": "0.4904528", "text": "def prettydate(d):\n\n if isinstance(d, float):\n d = datetime.utcfromtimestamp(d)\n\n diff = datetime.utcnow() - d\n s = diff.seconds\n if diff.days > 7 or diff.days < 0:\n return d.strftime('%d %b %y')\n elif diff.days == 1:\n return '1 day ago'\n elif diff.days > 1:\n return '{0} days ago'.format(diff.days)\n elif s <= 1:\n return 'just now'\n elif s < 60:\n return '{0} seconds ago'.format(s)\n elif s < 120:\n return '1 minute ago'\n elif s < 3600:\n return '{0} minutes ago'.format(s // 60)\n elif s < 7200:\n return '1 hour ago'\n else:\n return '{0} hours ago'.format(s // 3600)", "title": "" }, { "docid": "d5e8c67c9b40cd30c5912bb4ac492858", "score": "0.4904388", "text": "def elapsed_time(start, end):\r\n hours, rem = divmod(end - start, 3600)\r\n minutes, seconds = divmod(rem, 60)\r\n return \"{:0>2}:{:0>2}:{:05.2f}\".format(int(hours), int(minutes), seconds)", "title": "" }, { "docid": "b2163442e4255a4a20aae133ac9410fd", "score": "0.48991105", "text": "def formatTime(self, record, datefmt=None):\n if datefmt:\n datefmt = datefmt.replace('%f', '%03d' % (record.msecs))\n return logging.Formatter.formatTime(self, record, datefmt)\n else:\n return logging.Formatter.formatTime(self, record, datefmt) # default ISO8601", "title": "" }, { "docid": "2c82d554d5ce8931320a4a4177b09c90", "score": "0.4895531", "text": "def datetimefilter(value, date_format='%Y/%m/%d %H:%M'):\n return value.strftime(date_format)", "title": "" }, { "docid": "7babf099eea720d30997edf3aade18b1", "score": "0.48938733", "text": "def _format_now(options):\n return datetime.datetime.now().strftime(options)", "title": "" }, { "docid": "0bf1e1b6389a0530ab052b6e5f28bfa9", "score": "0.48411903", "text": "def get_ts_formatter(fmt, tz):\n return lambda dt: pendulum.instance(dt).in_timezone(tz).format(fmt) if dt else '-'", "title": "" }, { "docid": "0a4df529dd3a8ec430ec393fe85e17ff", "score": "0.48397315", "text": "def pretty_time(timespan_in_seconds):\n seconds = abs(int(timespan_in_seconds))\n msg = []\n days, seconds = divmod(seconds, 86400)\n if days > 0:\n msg.append(\"%dd\" % days)\n hours, seconds = divmod(seconds, 3600)\n if hours > 0:\n msg.append(\"%dh\" % hours)\n minutes, seconds = divmod(seconds, 60)\n if minutes > 0:\n msg.append(\"%dm\" % minutes)\n msg.append(\"%ds\" % seconds)\n return \" \".join(msg)", "title": "" }, { "docid": "2afea4c234ea587f140f31a313872289", "score": "0.48237303", "text": "def timeElapsed(elapsed, short=False, leadingZeroes=False, years=True,\r\n weeks=True, days=True, hours=True, minutes=True, seconds=True):\r\n ret = []\r\n before = False\r\n def Format(s, i):\r\n if i or leadingZeroes or ret:\r\n if short:\r\n ret.append('%s%s' % (i, s[0]))\r\n else:\r\n ret.append(format('%n', (i, s)))\r\n elapsed = int(elapsed)\r\n\r\n # Handle negative times\r\n if elapsed < 0:\r\n before = True\r\n elapsed = -elapsed\r\n\r\n assert years or weeks or days or \\\r\n hours or minutes or seconds, 'One flag must be True'\r\n if years:\r\n (yrs, elapsed) = (elapsed // 31536000, elapsed % 31536000)\r\n Format(_('year'), yrs)\r\n if weeks:\r\n (wks, elapsed) = (elapsed // 604800, elapsed % 604800)\r\n Format(_('week'), wks)\r\n if days:\r\n (ds, elapsed) = (elapsed // 86400, elapsed % 86400)\r\n Format(_('day'), ds)\r\n if hours:\r\n (hrs, elapsed) = (elapsed // 3600, elapsed % 3600)\r\n Format(_('hour'), hrs)\r\n if minutes or seconds:\r\n (mins, secs) = (elapsed // 60, elapsed % 60)\r\n if leadingZeroes or mins:\r\n Format(_('minute'), mins)\r\n if seconds:\r\n leadingZeroes = True\r\n Format(_('second'), secs)\r\n if not ret:\r\n raise ValueError('Time difference not great enough to be noted.')\r\n result = ''\r\n if short:\r\n result = ' '.join(ret)\r\n else:\r\n result = format('%L', ret)\r\n if before:\r\n result = _('%s ago') % result\r\n return result", "title": "" }, { "docid": "2ad80790fbae08dd8e19c61865e41b09", "score": "0.48064327", "text": "def _jinja2_format_date(date, format_=None):\n return date.strftime(format_ or '%b %d, %Y')", "title": "" }, { "docid": "37aacceecbb5b30009dd8b860442f089", "score": "0.47854218", "text": "def __format__(self, formatspec):\n dt = self.as_datetime()\n if dt != None:\n return format(timefmt.display_datetime(dt), formatspec)\n return \"-\"", "title": "" }, { "docid": "d87661e80a1021971e47433690671846", "score": "0.4783467", "text": "def get_fancy_time(d, display_full_version=True, request=None, locale=None):\n\n if request != None:\n localizer = get_localizer(request)\n else:\n localizer = get_custom_localizer(locale)\n\n #some helpers lambda's\n plural = lambda x: 's' if x > 1 else ''\n singular = lambda x: x[:-1]\n #convert pluran (years) --> to singular (year)\n display_unit = lambda unit, name: '%s%s' %\\\n (name, plural(unit)) if unit > 0 else ''\n\n #time units we are interested in descending order of significance\n tm_units = ['years', 'months', 'days', 'hours', 'minutes', 'seconds']\n\n rdelta = get_time_difference_now(d) # capture the date difference\n time_format = localizer.translate(_('meta_time_format_wo_seconds',\n domain='Ondestan'))\n exact_time = convert_from_utc(d, default_timezone).strftime(time_format)\n for idx, tm_unit in enumerate(tm_units):\n first_unit_val = getattr(rdelta, tm_unit)\n if first_unit_val > 0:\n primary_unit = localizer.translate(_(\n display_unit(first_unit_val, singular(tm_unit)),\n domain='Ondestan'))\n if display_full_version and idx < len(tm_units) - 1:\n next_unit = tm_units[idx + 1]\n second_unit_val = getattr(rdelta, next_unit)\n if second_unit_val > 0:\n secondary_unit = localizer.translate(_(\n display_unit(second_unit_val, singular(next_unit)),\n domain='Ondestan'))\n parameters = {\n 'primary_val': first_unit_val,\n 'primary_unit': primary_unit,\n 'secondary_val': second_unit_val,\n 'secondary_unit': secondary_unit,\n 'exact_time': exact_time\n }\n return localizer.translate(_(\"fancy_time_two_units\",\n domain='Ondestan',\n mapping=parameters))\n parameters = {\n 'primary_val': first_unit_val,\n 'primary_unit': primary_unit,\n 'exact_time': exact_time\n }\n return localizer.translate(_(\"fancy_time_one_unit\",\n domain='Ondestan',\n mapping=parameters))\n return format_utcdatetime(d.astimezone(utc_timezone), request=request,\n locale=locale)", "title": "" }, { "docid": "2cbcd36f76d077ad3c972c5f781894e2", "score": "0.47736016", "text": "def _make_timelimit_string(kwargs):\n hours = kwargs.get(\"hours\", range(24))\n months = kwargs.get(\"months\", range(1, 13))\n limit_by_doy = kwargs.get(\"limit_by_doy\", False)\n if len(hours) == 24 and len(months) == 12 and not limit_by_doy:\n return \"\"\n parts = []\n if limit_by_doy:\n parts.append(f\"{kwargs['sts']:%b %-d} - {kwargs['ets']:%b %-d}\")\n elif months is not None and len(months) < 12:\n for h in months:\n parts.append(f\"{month_abbr[h]}\")\n if hours is not None and len(hours) != 24:\n if len(hours) > 4:\n parts.append(\n f\"{datetime(2000, 1, 1, hours[0]):%-I %p}-\"\n f\"{datetime(2000, 1, 1, hours[-1]):%-I %p}\"\n )\n else:\n for h in hours:\n parts.append(f\"{datetime(2000, 1, 1, h):%-I %p}\")\n return f\" ↳ constraints: {', '.join(parts)}\"", "title": "" }, { "docid": "d6ba5edffc6c1a34a6b6d49ca15ded8c", "score": "0.47692364", "text": "def pretty_date(dt, default=None):\n if default is None:\n default = 'just now'\n\n now = datetime.utcnow()\n diff = now - dt\n\n periods = (\n (diff.days / 365, 'year', 'years'),\n (diff.days / 30, 'month', 'months'),\n (diff.days / 7, 'week', 'weeks'),\n (diff.days, 'day', 'days'),\n (diff.seconds / 3600, 'hour', 'hours'),\n (diff.seconds / 60, 'minute', 'minutes'),\n (diff.seconds, 'second', 'seconds'),\n )\n\n for period, singular, plural in periods:\n if not period:\n continue\n if period == 1:\n return u'%d %s ago' % (period, singular)\n else:\n return u'%d %s ago' % (period, plural)\n\n return default", "title": "" }, { "docid": "93225df2625924022baf305698d14644", "score": "0.4768795", "text": "def timedelta_seconds(timedelta):\n\treturn timedelta.days * 86400 + timedelta.seconds", "title": "" }, { "docid": "cef06c768253e75d72898e55b78b534e", "score": "0.47670057", "text": "def make_timedelta_string(delta):\r\n\r\n if not isinstance(delta, np.timedelta64):\r\n raise TypeError(\"Cannot make timedelta string from type '%s'\" % type(delta))\r\n\r\n mag = delta.astype(int)\r\n unit = get_timedelta_unit(delta)\r\n return '%d,%s' % (mag, unit)", "title": "" }, { "docid": "2cac4fc847f2688022fdb35eab194a3a", "score": "0.475223", "text": "def transform_timedelta(time, addtime=0): \n seconds, mseconds = time.split(\".\") \n mseconds = float(mseconds) + 1000 * (float(seconds)+addtime)\n hours, mseconds = divmod(mseconds,3600000)\n minutes, mseconds = divmod(mseconds, 60000)\n seconds = float(mseconds) / 1000\n str_dt = (\"%02i:%02i:%06.3f\" % (hours, minutes, seconds))\n return str_dt", "title": "" }, { "docid": "fe8f40cd6c020c1173a27ce633717017", "score": "0.4739745", "text": "def _strftime(cls, date):\n return date.strftime(cls.TIME_FORMAT)", "title": "" }, { "docid": "2b8e59b2ac07d80da82d836f6fd4d572", "score": "0.47364262", "text": "def formatTime(self, record, datefmt=None):\n if datefmt:\n datefmt = datefmt.replace('%f', '%03d' % (record.msecs))\n return Formatter.formatTime(self, record, datefmt)\n else:\n return Formatter.formatTime(self, record, datefmt) # default ISO8601", "title": "" }, { "docid": "42474d6a2d37f1b9c253885fa79003eb", "score": "0.47305062", "text": "def __str__(self):\n now = datetime.now(timezone.utc)\n days = self.start - now\n result = ''\n if days.days > 0:\n result = 'J+{0} '.format(days.days)\n return '{0}{1} - {2} {3}'.format(result, self.start.strftime(\"%H:%M\"), self.end.strftime(\"%H:%M\"), self.summary)", "title": "" }, { "docid": "2a6f4b89b09e23c0a9551a7c9bed146b", "score": "0.47178566", "text": "def get_elapsed_time(start_time, end_time=None):\n if end_time == None:\n end_time = get_time()\n seconds = end_time - start_time\n negative_prefix = ''\n if seconds < 0:\n negative_prefix = '-'\n seconds = -seconds\n if seconds < 120:\n if int(seconds) == 0:\n milli_seconds = seconds * 1000\n timestr = \"%d\" % int(milli_seconds)\n suffix = \" msecs\"\n else:\n timestr = \"%d\" % int(seconds)\n suffix = \" secs\"\n else:\n minutes = int(seconds/60.0)\n remainder_seconds = int(seconds - (minutes*60))\n timestr = \"%.d:%02d\" % (minutes,remainder_seconds)\n suffix = \" min:sec\"\n return \"\".join([negative_prefix, timestr, suffix])", "title": "" }, { "docid": "91642265081b0828cec023a8f65de012", "score": "0.47118384", "text": "def format_day(self, date, gmt_offset=0, dow=True):\r\n local_date = date - datetime.timedelta(minutes=gmt_offset)\r\n _ = self.translate\r\n if dow:\r\n return _(\"%(weekday)s, %(month_name)s %(day)s\") % {\r\n \"month_name\": self._months[local_date.month - 1],\r\n \"weekday\": self._weekdays[local_date.weekday()],\r\n \"day\": str(local_date.day),\r\n }\r\n else:\r\n return _(\"%(month_name)s %(day)s\") % {\r\n \"month_name\": self._months[local_date.month - 1],\r\n \"day\": str(local_date.day),\r\n }", "title": "" }, { "docid": "378638f89f100a69fc73e39f7a2996e6", "score": "0.4707171", "text": "def custom_strftime(format, t):\n def suffix(d):\n return 'th' if 11 <= d <= 13 else \\\n {1: 'st', 2: 'nd', 3: 'rd'}.get(d % 10, 'th')\n\n return t.strftime(format).replace('{S}', str(t.day) + suffix(t.day))", "title": "" }, { "docid": "15179fe913379f847b5545888db61661", "score": "0.47048432", "text": "def print_elapsed_time(start_time, end_time=None, prefix=None, current=False):\n if end_time == None:\n end_time = get_time()\n ets = \"ELAPSED TIME\"\n if prefix:\n s = \"%s %s\" % (prefix, ets)\n else:\n s = ets\n\n t = get_elapsed_time(start_time, end_time)\n if current:\n t = t + \" / NOW: \" + get_time_str()\n vmsgb(1,s,t)", "title": "" }, { "docid": "82a0fda086028ef6b3e0cac4f8081505", "score": "0.47013095", "text": "def format_display_timerange(fromdate, todate):\n if fromdate.tzname() == todate.tzname():\n if fromdate.year == todate.year:\n if fromdate.month == todate.month and fromdate.day == todate.day:\n return fromdate.strftime('From %T') + todate.strftime(' To %T %b %d %Y timezone GMT %Z')\n else:\n return fromdate.strftime('From %b %d %T') + todate.strftime(' To %b %d %T %Y timezone GMT %Z')\n else:\n return fromdate.strftime('From %b %d %Y %T') + todate.strftime(' To %b %d %Y %T timezone GMT %Z')\n else: \n return fromdate.strftime('From %b %d %Y %T timezone GMT %Z') + todate.strftime(' To %b %d %Y %T timezone GMT %Z')", "title": "" }, { "docid": "c61716b893c5d410355830e7dfa1e020", "score": "0.46910354", "text": "def get_formatted_now(request):\n now = time.gmtime()\n formatter = get_locale_dates(request).getFormatter('dateTime', 'full')\n return formatter.format(datetime(*now[:6]))", "title": "" }, { "docid": "4c14496bca9247aec052d5eded37734c", "score": "0.46840936", "text": "def print_elapsed(\n events=_EVENTS,\n label='\\nElapsed Time(s): ',\n only_last=False):\n import datetime\n\n if not only_last:\n print(label, end='\\n' if len(events) > 2 else '')\n first_elapsed = events[0][1]\n for i in range(len(events) - 1):\n _id = i + 1\n name = events[_id][0]\n curr_elapsed = events[_id][1]\n prev_elapsed = events[_id - 1][1]\n diff_first = curr_elapsed - first_elapsed\n diff_last = curr_elapsed - prev_elapsed\n if diff_first == diff_last:\n diff_first = '-'\n print('{!s:24s} {!s:>24s}, {!s:>24s}'.format(\n name, diff_last, diff_first))\n else:\n _id = -1\n name = events[_id][0]\n curr_elapsed = events[_id][1]\n prev_elapsed = events[_id - 1][1]\n diff_last = datetime.timedelta(curr_elapsed - prev_elapsed)\n print('{!s}: {!s:>24s}'.format(name, diff_last))", "title": "" }, { "docid": "3b8a1cfcf22b040f3ee9d4ac7447b715", "score": "0.4670579", "text": "def timesince(d, now=None, pos=True, flag=False):\n if not d:\n if flag:\n return 0, ''\n else:\n return ''\n chunks = (\n (60 * 60 * 24 * 365, lambda n: ungettext('year', 'years', n)),\n (60 * 60 * 24 * 30, lambda n: ungettext('month', 'months', n)),\n (60 * 60 * 24 * 7, lambda n : ungettext('week', 'weeks', n)),\n (60 * 60 * 24, lambda n : ungettext('day', 'days', n)),\n (60 * 60, lambda n: ungettext('hour', 'hours', n)),\n (60, lambda n: ungettext('minute', 'minutes', n))\n )\n\n if not now:\n now = date.now()\n else:\n now = date.to_datetime(now)\n d = date.to_datetime(d)\n \n delta = now - (d - datetime.timedelta(0, 0, d.microsecond))\n oldsince = since = delta.days * 24 * 60 * 60 + delta.seconds\n \n suffix = ''\n if pos:\n if since >= 0:\n suffix = ugettext(' ago')\n elif since < 0:\n suffix = ugettext(' later')\n since *= -1\n \n for i, (seconds, name) in enumerate(chunks):\n count = since // seconds\n if count != 0:\n break\n s = ('%(number)d %(type)s') % {'number': count, 'type': name(count)}\n if i + 1 < len(chunks):\n # Now get the second item\n seconds2, name2 = chunks[i + 1]\n count2 = (since - (seconds * count)) // seconds2\n if count2 != 0:\n s += (', %(number)d %(type)s') % {'number': count2, 'type': name2(count2)}\n #if flag==True, then return twe elements (since, message) \n if flag:\n return oldsince, s + suffix\n else:\n return s + suffix", "title": "" }, { "docid": "501105f33ec1eebd6d05d4b0c24f8844", "score": "0.4666763", "text": "def date_format(value, ft='%H:%M / %d-%m-%Y'):\n\n v = datetime.fromtimestamp(value)\n return v.strftime(ft)", "title": "" }, { "docid": "c3fb418c83f25cb3a44f62f69490a2a4", "score": "0.46662012", "text": "def timesince(dt, default=\"just now\"):\n\n now = datetime.utcnow()\n diff = now - dt\n \n periods = (\n (diff.days / 365, \"year\", \"years\"),\n (diff.days / 30, \"month\", \"months\"),\n (diff.days / 7, \"week\", \"weeks\"),\n (diff.days, \"day\", \"days\"),\n (diff.seconds / 3600, \"hour\", \"hours\"),\n (diff.seconds / 60, \"minute\", \"minutes\"),\n (diff.seconds, \"second\", \"seconds\"),\n )\n\n for period, singular, plural in periods:\n \n if period:\n return \"%d %s ago\" % (period, singular if period == 1 else plural)\n\n return default", "title": "" }, { "docid": "58dc242b1f76b45e46146f7e956b470e", "score": "0.46616465", "text": "def time_format(value, format=None, use_l10n=None):\r\n return dateformat.time_format(value, get_format(format or 'TIME_FORMAT', use_l10n=use_l10n))", "title": "" }, { "docid": "d29bf846b5e1ec0e46bd700a8415b694", "score": "0.46598604", "text": "def _fmt_date(d: date, delta: timedelta = timedelta(), na: str = '') -> str:\n if not d or pd.isna(d):\n return na\n else:\n return (d + delta).isoformat()", "title": "" }, { "docid": "0673cc9b1be6ea53085ba753b02e2b84", "score": "0.46482766", "text": "def format_time_difference(later: str, earlier: str) -> str:\n return str(as_datetime(later) - as_datetime(earlier))", "title": "" }, { "docid": "8710a9e0f2879ddda8fd37af787ba2a6", "score": "0.46300632", "text": "def format_date_time(d):\n return d.strftime(\"%Y%m%d%H%M00\")", "title": "" }, { "docid": "226964028103f9d099c5a055a40fb422", "score": "0.46225524", "text": "def timesince(dt, default=\"Just now.\"):\n\n now = datetime.now()\n diff = now - dt\n \n periods = (\n (diff.days / 365, \"year\", \"years\"),\n (diff.days / 30, \"month\", \"months\"),\n (diff.days / 7, \"week\", \"weeks\"),\n (diff.days, \"day\", \"days\"),\n (diff.seconds / 3600, \"hour\", \"hours\"),\n (diff.seconds / 60, \"minute\", \"minutes\"),\n (diff.seconds, \"second\", \"seconds\"),\n )\n\n for period, singular, plural in periods:\n \n if period:\n return \"%d %s ago\" % (period, singular if period == 1 else plural)\n\n return default", "title": "" }, { "docid": "79f01895878d01af60bc3f6d122e09f8", "score": "0.4621434", "text": "def pretty_date(date):\n if not isinstance(date, datetime) or date > NOW:\n raise ValueError\n delta = NOW - date\n if delta < timedelta(seconds=(TIME_OFFSETS[0].offset)):\n return TIME_OFFSETS[0].date_str\n elif delta < timedelta(seconds=TIME_OFFSETS[1].offset):\n return TIME_OFFSETS[1].date_str.format(delta.seconds)\n elif delta < timedelta(seconds=TIME_OFFSETS[2].offset):\n return TIME_OFFSETS[2].date_str\n elif delta < timedelta(seconds=TIME_OFFSETS[3].offset):\n return TIME_OFFSETS[3].date_str.format(int(delta.seconds/TIME_OFFSETS[3].divider))\n elif delta < timedelta(seconds=TIME_OFFSETS[4].offset):\n return TIME_OFFSETS[4].date_str.format(int(delta.seconds/TIME_OFFSETS[3].divider))\n elif delta < timedelta(seconds=TIME_OFFSETS[5].offset):\n return TIME_OFFSETS[5].date_str.format(int(delta.seconds/TIME_OFFSETS[5].divider))\n elif delta < timedelta(seconds=TIME_OFFSETS[6].offset):\n return TIME_OFFSETS[6].date_str\n else:\n return (NOW - delta).strftime('%m/%d/%y')", "title": "" }, { "docid": "fc05115ba5c326c125b1d0c9d83292b5", "score": "0.46107835", "text": "def fuzzy_time_diff(begin, end=None):\n if end is None:\n end = datetime.datetime.now()\n timeDiff = end - begin\n days = timeDiff.days\n hours = timeDiff.seconds/3600\n minutes = timeDiff.seconds%3600/60\n seconds = timeDiff.seconds%3600%60\n \n str = u''\n tStr = u''\n if days > 0:\n if days == 1: tStr = u'day'\n else: tStr = u'days'\n str = str + u'%s %s' %(days, tStr)\n return str\n elif hours > 0:\n if hours == 1: tStr = u'hour'\n else: tStr = u'hours'\n str = str + u'%s %s' %(hours, tStr)\n return str\n elif minutes > 0:\n if minutes == 1:tStr = u'minutes'\n else: tStr = u'minutes' \n str = str + u'%s %s' %(minutes, tStr)\n return str\n elif seconds > 0:\n if seconds == 1:tStr = u'second'\n else: tStr = u'seconds'\n str = str + u'%s %s' %(seconds, tStr)\n return str\n else:\n return None", "title": "" }, { "docid": "30f8fb3f1314fdd17261e890e091fbfe", "score": "0.4598162", "text": "def _format(self,\n timeval: float):\n hour = int(timeval / 3600.0)\n timeval = timeval % 3600.0\n minute = int(timeval / 60)\n timeval = timeval % 60\n return self._template.format(hour, minute, timeval)", "title": "" }, { "docid": "4fbbe805838b073e02289c290b46dce2", "score": "0.45973894", "text": "def datetimefilter(value, format=\"%Y/%m/%d %H:%M:%S.%f\"):\n format = \"%H:%M:%S.%f\"\n return value.strftime(format)", "title": "" }, { "docid": "a76225e4721dde38e50dac51b8683568", "score": "0.45970124", "text": "def FormatRelativeDate(timestamp, days_only=False, clock=None):\n if clock:\n now = clock()\n else:\n now = int(time.time())\n\n # TODO(jrobbins): i18n of date strings\n delta = int(now - timestamp)\n d_minutes = delta // 60\n d_hours = d_minutes // 60\n d_days = d_hours // 24\n if days_only:\n if d_days > 1:\n return '%s days ago' % d_days\n else:\n return ''\n\n if d_days > 6:\n return ''\n if d_days > 1:\n return '%s days ago' % d_days # starts at 2 days\n if d_hours > 1:\n return '%s hours ago' % d_hours # starts at 2 hours\n if d_minutes > 1:\n return '%s minutes ago' % d_minutes\n if d_minutes > 0:\n return '1 minute ago'\n if delta > -MAX_CLOCK_SKEW_SEC:\n return 'moments ago'\n return ''", "title": "" }, { "docid": "d92466d54f91183e6b9d27a1eca7dd17", "score": "0.45947543", "text": "def format_time(time=None, format=None, rebase=True, request=None):\r\n format = _get_format(\"time\", format, request)\r\n return _date_format(\r\n dates.format_time, time, format, rebase, request=request\r\n )", "title": "" }, { "docid": "c59316426d042bb9f6268597bb74e8fd", "score": "0.45937845", "text": "def format_long_time(timespan):\n\n formatted_time = []\n units = (('d', 24 * 60 * 60), ('h', 60 * 60), ('m', 60), ('s', 1))\n\n for unit, length in units:\n value = int(timespan / length)\n\n if value > 0:\n timespan %= length\n formatted_time.append('%i%s' % (value, unit))\n\n if timespan < 1:\n break\n\n return ' '.join(formatted_time)", "title": "" }, { "docid": "a8b8fe279d862cfe3ed951fa5f928cae", "score": "0.4587374", "text": "def datetime_formatter():\r\n now = datetime.now()\r\n return now.strftime(\"%m-%d-%Y\")", "title": "" }, { "docid": "1a7d86a99595d825c157ebd7543cdc7e", "score": "0.45868284", "text": "def log_elapsed(self, local_label):\n return \"time_log: %s: %.2f elapsed %s\" % (\n self.label, self.timer.elapsed(), local_label)", "title": "" }, { "docid": "412be8790b276850841bce0c890382d5", "score": "0.45859605", "text": "def format_time(total_time, index, total):\n tt = (float(total_time)/(index+1)) * (total-index)\n\n m, s = divmod(tt, 60)\n h, m = divmod(m, 60)\n\n if h > 0:\n return \"{0:.2f}h {1:.2f}m {2:.2f}s\".format(h, m, s)\n elif m > 0:\n return \"{0:.2f}m {1:.2f}s\".format(m, s)\n else:\n return \"{0:.2f}s\".format(s)", "title": "" }, { "docid": "065f893b4bf70a952c8f757a94a1599e", "score": "0.4576687", "text": "def datetimefilter(value, format='%Y/%m/%d %H:%M'):\n return value.strftime(format)", "title": "" }, { "docid": "065f893b4bf70a952c8f757a94a1599e", "score": "0.4576687", "text": "def datetimefilter(value, format='%Y/%m/%d %H:%M'):\n return value.strftime(format)", "title": "" }, { "docid": "065f893b4bf70a952c8f757a94a1599e", "score": "0.4576687", "text": "def datetimefilter(value, format='%Y/%m/%d %H:%M'):\n return value.strftime(format)", "title": "" }, { "docid": "c659df850e40fa448107b7511622b255", "score": "0.45741847", "text": "def _format_datetime(self, date_time):\r\n if date_time is None:\r\n return None\r\n timezone_hrs = time.timezone / 60 / 60 # convert seconds to hours\r\n if timezone_hrs >= 0:\r\n timezone_join = '+'\r\n else:\r\n timezone_join = '' # minus sign comes from number itself\r\n timezone_spec = '%s%s:00' % (timezone_join, timezone_hrs)\r\n return date_time.strftime('%Y-%m-%dT%H:%M:%S') + timezone_spec", "title": "" }, { "docid": "c61ed5ba2af97773901c321e89ada745", "score": "0.45684105", "text": "def timedelta(value, arg):\n return (value - arg).days", "title": "" }, { "docid": "a4cd88ff74bc9eb961c9be3040234fa0", "score": "0.45675895", "text": "def timesince(d, now=None, reversed=False):\n if not now:\n now = datetime.now(utc if is_aware(d) else None)\n \n delta = (d - now) if reversed else (now - d)\n s = django_timesince(d, now=now, reversed=reversed)\n \n if len(s.split(' ')) is 2:\n count, name = s.split(' ')\n if name in ['minutes', 'minute']:\n seconds = delta.seconds % 60\n extension = '%(number)d %(type)s' % {'number': seconds, 'type': 'seconds'}\n if int(count) is 0:\n return extension\n elif int(count) < 2:\n s += ', %s' % extension\n return s", "title": "" }, { "docid": "b025fd8b4ed965cbad7b1ecfb8f5c4b1", "score": "0.456411", "text": "def format_time(current_time):\n seconds = current_time % 60\n minutes = seconds // 60\n\n return \" \" + str(minutes) + \":\" + str(seconds)", "title": "" }, { "docid": "59a69e0ff73aa8ca9dae8578d9b6e84a", "score": "0.45640182", "text": "def format_time(timespan):\n\n if timespan >= 60:\n # If the time is greater than one minute,\n # precision is reduced to a 100th of a second.\n return format_long_time(timespan)\n return format_short_time(timespan)", "title": "" }, { "docid": "66ad25951593f06e5906d04013b50fe0", "score": "0.4561305", "text": "def format_day(day, events):\n pass", "title": "" }, { "docid": "fb453cd3d6a6f2f396cc71e95000279a", "score": "0.456018", "text": "def _print_elapsed_time(os_access, start_time, prefix_string):\n end_time = time.perf_counter()\n time_rounded_seconds = round(end_time - start_time)\n time_string = str(datetime.timedelta(seconds=time_rounded_seconds))\n os_access.print_console(\"{0} {1} h:m:s or {2} s\".format(prefix_string, time_string, time_rounded_seconds))", "title": "" }, { "docid": "8c2be3f5ea67ca49c2c5a9e4403f0f52", "score": "0.45577952", "text": "def format_time(n: float) -> str:\n if n > 24 * 60 * 60 * 2:\n d = int(n / 3600 / 24)\n h = int((n - d * 3600 * 24) / 3600)\n return f\"{d}d {h}hr\"\n if n > 60 * 60 * 2:\n h = int(n / 3600)\n m = int((n - h * 3600) / 60)\n return f\"{h}hr {m}m\"\n if n > 60 * 10:\n m = int(n / 60)\n s = int(n - m * 60)\n return f\"{m}m {s}s\"\n if n >= 1:\n return \"%.2f s\" % n\n if n >= 1e-3:\n return \"%.2f ms\" % (n * 1e3)\n return \"%.2f us\" % (n * 1e6)", "title": "" } ]
7d6335d29da5c277e290e05412518fd1
The permission bits as an octal integer.
[ { "docid": "f57f2a04d9b7b37f1f433b59470bd055", "score": "0.59944504", "text": "def number(self):\n return os.stat(self.path).st_mode & 0o0777", "title": "" } ]
[ { "docid": "6ca6f89c82377ce8483931dc293ae5ba", "score": "0.66869646", "text": "def permissions(self, flag: int) -> None:\n return self.set_integer('permissions', flag)", "title": "" }, { "docid": "45517c7a1400d98cbcbc6a91b4c711c4", "score": "0.65820366", "text": "def permissions(self) -> int:\n return self.get_integer('permissions')", "title": "" }, { "docid": "b7d8ce5bc2071a9c6a1bc7558f6520d1", "score": "0.65032536", "text": "def get_oct_mode(entry):\r\n entry_stat = os.stat(entry)\r\n mode = oct(entry_stat[stat.ST_MODE] & 0777)\r\n return mode", "title": "" }, { "docid": "e4dd78a66255c2aed37502349a8aecb9", "score": "0.6486476", "text": "def get_octal_from_file_permission(rwx: str) -> str:\n\n groups = [rwx[i:i+3] for i in range(0, len(rwx), 3)]\n result = \"\"\n for g in groups:\n result = result + calc_group(g)\n return result", "title": "" }, { "docid": "356b1914b745430378a1bd04d16b7672", "score": "0.6480177", "text": "def testOuradminPlistMode(self):\n mode = self.ouradmin_stat[stat.ST_MODE]\n num_mode = oct(mode & 0777)\n self.assertEqual('0600', num_mode)", "title": "" }, { "docid": "42ee65788248b1ab22868929de34fd91", "score": "0.6469383", "text": "def easy_permissions(permission):\n permission = permission.upper()\n if permission == \"R\":\n return ntsecuritycon.GENERIC_READ\n if permission == \"RX\":\n return ntsecuritycon.GENERIC_READ | ntsecuritycon.GENERIC_EXECUTE\n if permission in [\"RWX\", \"M\"]:\n return (\n ntsecuritycon.GENERIC_READ\n | ntsecuritycon.GENERIC_WRITE\n | ntsecuritycon.GENERIC_EXECUTE\n )\n if permission == \"F\":\n return ntsecuritycon.GENERIC_ALL\n raise ValueError(\"Bogus easy permission\")", "title": "" }, { "docid": "2fb42dffe2e121ff52ef95c1038da3f3", "score": "0.6318699", "text": "def get_flags(self) -> int:\n return int.from_bytes(self.flags, byteorder=\"big\")", "title": "" }, { "docid": "2fb42dffe2e121ff52ef95c1038da3f3", "score": "0.6318699", "text": "def get_flags(self) -> int:\n return int.from_bytes(self.flags, byteorder=\"big\")", "title": "" }, { "docid": "538c97ed188045fa3877fb8e1abb05b4", "score": "0.62807775", "text": "def print_mode_permissions(mode):\n print(\"Mode:\", oct(mode), \"(Decimal: \" + str(mode) + \")\")\n for i in STAT_KEYS:\n if mode & getattr(stat, i) == getattr(stat, i):\n print(\" stat.\" + i)", "title": "" }, { "docid": "aab4893010e10d9a4d2144339e64e921", "score": "0.62620115", "text": "def as_int(self, perm):\n if isinstance(perm, int):\n valid_perm = perm\n elif isinstance(perm, basestring):\n # Look up the attribute, it will raise an error if it doesn't exist\n valid_perm = getattr(self, perm)\n elif isinstance(perm, (list, tuple)):\n valid_perm = 0\n for item in perm:\n valid_perm |= self.as_int(item)\n else:\n raise UnknownPermission(\"'%s' is an unknown permission type.\" % perm)\n return valid_perm", "title": "" }, { "docid": "b65c46e07112a3b169e387bdf8231ea3", "score": "0.6230009", "text": "def get_flags_bit(flag_bit) -> int:\n return int.from_bytes(flag_bit, byteorder=\"big\")", "title": "" }, { "docid": "b65c46e07112a3b169e387bdf8231ea3", "score": "0.6230009", "text": "def get_flags_bit(flag_bit) -> int:\n return int.from_bytes(flag_bit, byteorder=\"big\")", "title": "" }, { "docid": "f2680bb40111047ee303c279c4ab2d1e", "score": "0.6206197", "text": "def as_bits(self):\r\n return self.zfill(\"{:0b}\".format(self.rule_id), self.protocol.RULE_SIZE)", "title": "" }, { "docid": "c4a207f63bbf2a40892b2ce961ae97ba", "score": "0.61674714", "text": "def permission(self) -> str:\n return pulumi.get(self, \"permission\")", "title": "" }, { "docid": "8f233dc5e733edeec9880e335d537e90", "score": "0.6155625", "text": "def __oct__(self):\n return oct(self.__index__())", "title": "" }, { "docid": "08f727c2105c4ee77f8f409c2bb85b21", "score": "0.60298556", "text": "def _generate_flags(self) -> int:", "title": "" }, { "docid": "9e5ee1c2ecbdfee1a71b5dd4d71c5223", "score": "0.5998145", "text": "def permissions(self) -> str:\n return pulumi.get(self, \"permissions\")", "title": "" }, { "docid": "cd285216cf00e45dc419d30aaf86cd7f", "score": "0.5986254", "text": "def _int_mode(mode):\n return int(oct(mode)[-3:])", "title": "" }, { "docid": "d471df7566926d01b852df3d4a930bad", "score": "0.59619", "text": "def _bitmask(self):\n return self.raw_network.netmask().int()", "title": "" }, { "docid": "0882da1fcbdaf449039af10780ef60dc", "score": "0.5914277", "text": "def filemode(mode):\r\n perm = []\r\n for table in filemode_table:\r\n for bit, char in table:\r\n if mode & bit == bit:\r\n perm.append(char)\r\n break\r\n else:\r\n perm.append(\"-\")\r\n return \"\".join(perm)", "title": "" }, { "docid": "14720338f4feec0e3ecf3506cbac3129", "score": "0.5877849", "text": "def permissions(self):\n return FilePermissions(self.path)", "title": "" }, { "docid": "14720338f4feec0e3ecf3506cbac3129", "score": "0.5877849", "text": "def permissions(self):\n return FilePermissions(self.path)", "title": "" }, { "docid": "9209d5e229ac219f5fa0fcfd605d475e", "score": "0.5870107", "text": "def get_mode(path):\n if IS_WINDOWS:\n return win_get_permissions(path)\n return os.stat(path).st_mode & (stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)", "title": "" }, { "docid": "fa8fc989cff0028e4872e28cd993a67c", "score": "0.58682346", "text": "def umask(self):\n return self.plist[\"Umask\"]", "title": "" }, { "docid": "76e013881a5116d2a555faa4fb1bfe87", "score": "0.5840386", "text": "def print_win_permissions(win_perm, flags, object_type):\n print(\" -Permissions Mask:\", hex(win_perm), \"(\" + str(win_perm) + \")\")\n\n # files and directories do permissions differently\n if object_type == FILE:\n permissions = WIN_FILE_PERMISSIONS\n else:\n permissions = WIN_DIR_PERMISSIONS\n # directories have ACE that is inherited by children within them\n if flags & ntsecuritycon.OBJECT_INHERIT_ACE == \\\n ntsecuritycon.OBJECT_INHERIT_ACE and flags & \\\n ntsecuritycon.INHERIT_ONLY_ACE == \\\n ntsecuritycon.INHERIT_ONLY_ACE:\n permissions = WIN_DIR_INHERIT_PERMISSIONS\n\n calc_mask = 0 # see if we are printing all of the permissions\n for i in permissions:\n if getattr(ntsecuritycon, i) & win_perm == getattr(\n ntsecuritycon, i):\n calc_mask = calc_mask | getattr(ntsecuritycon, i)\n print(\" \", i)\n print(\" -Mask calculated from printed permissions:\", hex(calc_mask))", "title": "" }, { "docid": "add3c320a48cd61cbbf0912184508505", "score": "0.5839826", "text": "def permission_level(self):\n\t\traise NotImplementedError(self.not_implemented_msg(\"permission_level\"))", "title": "" }, { "docid": "f9cfda5b604b84227d943c40deff82b1", "score": "0.5801818", "text": "def flags(self) -> bytes:\n return self._flags", "title": "" }, { "docid": "f9cfda5b604b84227d943c40deff82b1", "score": "0.5801818", "text": "def flags(self) -> bytes:\n return self._flags", "title": "" }, { "docid": "a0b41375360e0a84b0895a0aa3221700", "score": "0.5797463", "text": "def filemode(mode):\n\t\tperm = []\n\t\tfor table in _filemode_table:\n\t\t\tfor bit, char in table:\n\t\t\t\tif mode & bit == bit:\n\t\t\t\t\tperm.append(char)\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tperm.append(\"-\")\n\t\treturn \"\".join(perm)", "title": "" }, { "docid": "0cdf9cc8d7bb02a6c3d647d632ecc98b", "score": "0.57868356", "text": "def GetFlags(self):", "title": "" }, { "docid": "0cdf9cc8d7bb02a6c3d647d632ecc98b", "score": "0.57868356", "text": "def GetFlags(self):", "title": "" }, { "docid": "0cdf9cc8d7bb02a6c3d647d632ecc98b", "score": "0.57868356", "text": "def GetFlags(self):", "title": "" }, { "docid": "0cdf9cc8d7bb02a6c3d647d632ecc98b", "score": "0.57868356", "text": "def GetFlags(self):", "title": "" }, { "docid": "0cdf9cc8d7bb02a6c3d647d632ecc98b", "score": "0.57868356", "text": "def GetFlags(self):", "title": "" }, { "docid": "0cdf9cc8d7bb02a6c3d647d632ecc98b", "score": "0.57868356", "text": "def GetFlags(self):", "title": "" }, { "docid": "f8880eaa5cc396a7f1d61a38b18a6f85", "score": "0.5755079", "text": "def ubx_flags(self, flags):\n \n if not len(flags) in [8,16,32]:\n print('Length of supplied bitfield string is incorrect.')\n return\n if type(flags) != str:\n print('Supplied data type for bitfield string is incorrect')\n return\n return int(flags, 2)", "title": "" }, { "docid": "0bf400ac1973145f33644028d0c230ea", "score": "0.5705454", "text": "def get_flags(self) -> int:\n return int.from_bytes(self.header.flags, byteorder=\"big\")", "title": "" }, { "docid": "8bb832554dcf2efe5ed9e6c9399c6593", "score": "0.56951815", "text": "def permission_id(self) -> int:\n return self._permission_id", "title": "" }, { "docid": "fcace942d6a299ceba5ba73c905998dc", "score": "0.56802267", "text": "def _get_perm_as_list(self):\n return self.int_to_perms(self.permission)", "title": "" }, { "docid": "92b4b58f35f1341dcb98db6d4bc512ad", "score": "0.5651501", "text": "def getOctects(objPath):\r\n\treturn(oct(os.stat(objPath).st_mode)[-3])", "title": "" }, { "docid": "693749ba0089309165aab8f525332173", "score": "0.5649951", "text": "def perm_num(self):\n return self._perm_num", "title": "" }, { "docid": "541129065aa91ebc148399278befbef6", "score": "0.5604436", "text": "def get_flags(self):\r\n if self._info.startswith('2,'):\r\n return self._info[2:]\r\n else:\r\n return ''", "title": "" }, { "docid": "995cde4dc8417e79b3d67ee2193bd0fc", "score": "0.5584657", "text": "def bit_abs(self):\n mem_address = self.get_PC() + 1\n self.write_PC(mem_address+1)\n\n low, high, address = self.make_address(mem_address)\n\n mem_value = self.read_memory(address, address + 1).hex()\n mem_value = int(mem_value, 16)\n\n ac = self.get_AC()\n value = ac & mem_value\n\n if mem_value & (1 << 6):\n self.set_overflow()\n else:\n self.unset_overflow()\n\n if mem_value & (1 << 7):\n self.set_negative()\n else:\n self.unset_negative()\n self.check_zero(value)\n\n return \"BIT\", \" abs\", low, high", "title": "" }, { "docid": "abbd01894a7c7754c0af876011a18210", "score": "0.5583148", "text": "def get_bit(self):\r\n return self.val", "title": "" }, { "docid": "13a798c6b8ce62a2c5c7c94ad0319424", "score": "0.5572662", "text": "def set_mode_to(file, permissions):\r\n global changes\r\n f = file\r\n mode = get_oct_mode(f)\r\n if mode != oct(permissions):\r\n try:\r\n if DRY:\r\n print \"# chmod {0} {1}\".format(oct(permissions), f)\r\n else:\r\n os.chmod(f, permissions)\r\n changes += 1\r\n except OSError:\r\n print >>sys.stderr, \"# cannot chmod the file {0}\".format(f)", "title": "" }, { "docid": "a76295b20046642417ab616232b43f6d", "score": "0.5552545", "text": "def bitfield(self, n):\n num = bin(n)[2:] # Konvertiert n in z.B. den String 0b101, [2:] löscht '0b'\n num = num.rjust(32, \"0\") # String auf 32 Stellen links mit Nullen auffüllen und zurückgeben\n return num[-1::-1] # String umdrehen, um zahl einfacher in for-schleifen nutzen zu koennen", "title": "" }, { "docid": "43eda48063fbea93e054633ddfb4e7d3", "score": "0.55062205", "text": "def as_int_list(self, perm):\n if not isinstance(perm, int):\n raise UnknownPermission(\"'perm' must be an integer\")\n result_list = []\n for key, val in self.items():\n if key & perm == key:\n result_list.append(key)\n return result_list", "title": "" }, { "docid": "a8181d208c6f6595c1be97758f6da164", "score": "0.5499761", "text": "def mode_string(self):\n # The first character indicates the file type, e.g. 'd' = directory\n mode_chars = [FileTypeCode.to_char[self.Typecode]] \n # Add protection string characters\n mask = 0o400\n chars = 'rwxrwxrwx'\n i = 0\n while mask:\n bit_is_set = self.Mode & mask\n next_char = chars[i] if bit_is_set else '-'\n mode_chars.append(next_char)\n i = i + 1\n mask = mask >> 1\n return ''.join(mode_chars)", "title": "" }, { "docid": "954bdbd4d0a945c8ed9de971d8bc94d7", "score": "0.5463962", "text": "def bits(self, a):\n result = []\n oct_bits={'0':[0,0,0],'1':[0,0,1],'2':[0,1,0],'3':[0,1,1],\n '4':[1,0,0],'5':[1,0,1],'6':[1,1,0],'7':[1,1,1]}\n for c in filter(lambda char: char != 'L', oct(a))[1:]:\n result += oct_bits[c]\n return result", "title": "" }, { "docid": "701431fa7cf8cea5a7008b86ac03a64d", "score": "0.5462894", "text": "def __str__(self):\r\n bit = ''\r\n for i in range(7, -1, -1):\r\n x = (self.bytebuffer[0] & 1 << i) >> i\r\n bit += str(x)\r\n return bit", "title": "" }, { "docid": "5c814c8cc832a79d78d531a81ca64a43", "score": "0.5461651", "text": "def bits(self, bits):\n bits = int(bits)\n\n if bits <= 0:\n raise AttributeError(\"bits must be >= 0 (%r)\" % bits)\n\n return bits", "title": "" }, { "docid": "aa5b5f5eb72a2a3315f7cebe3c1df57f", "score": "0.54546505", "text": "def Octet(name):\n return BitField(name, 8)", "title": "" }, { "docid": "71a1b62f4e5e71c2b951e71f1e739512", "score": "0.54531765", "text": "def get_mode(self):\n\n\t\treturn struct.unpack('<L', self.value[4 : 8])[0]", "title": "" }, { "docid": "2b5e3cd2ad80be85bbeb5ad087f4f7cb", "score": "0.54466647", "text": "def _perm_to_flag(perm):\n flag = [i for i in perm]\n for i, item in enumerate(perm):\n flag[item] = i\n\n return flag", "title": "" }, { "docid": "1c272340971adac09027e1f6baa0a154", "score": "0.54278356", "text": "def get_binary(self):\n return self.binary[2:].zfill(18)", "title": "" }, { "docid": "2a77d56d65352e8bf549c18616a38782", "score": "0.5421953", "text": "def perm(self):\n return self._perm", "title": "" }, { "docid": "dc2b7744fbfecb344d3224a7c30377ef", "score": "0.5414223", "text": "def _bits(self):\n return self.raw_network.prefixlen()", "title": "" }, { "docid": "fdc97ca915a517a89c80da2a3f04331d", "score": "0.54116976", "text": "def permission_level(self):\n username = self.get_current_user()\n user = User.get_user(username)\n \n if user is not None:\n return user.permission_level\n return PERMISSION_LEVEL_NONE", "title": "" }, { "docid": "33ca29765c796bf3239e14141fc7191f", "score": "0.5395037", "text": "def level(args,user=\"\",hostmask=\"\",extra={}):\n return \"Your perms are now 0 :D\"", "title": "" }, { "docid": "fcca55d199a1f0501ce453e61177b7a0", "score": "0.539427", "text": "def permissions(self):\n return [perm for perm in dir(self) if perm.startswith(\"can_\")]", "title": "" }, { "docid": "f9e9381a55f6fbd6f5d16843d59d3a25", "score": "0.5385754", "text": "def bit_of(c):\n return 1 << Alphabet.order.index(c)", "title": "" }, { "docid": "ed769908645629d2877421bd36df6898", "score": "0.5373343", "text": "def level(self):\n return bytes([ord(self.mask) & SLC_LEVELBITS])", "title": "" }, { "docid": "a84d0943be782a063cdea11835a95deb", "score": "0.53629595", "text": "def read_perm_spec(spec):\n total_mask = 0\n # suid/sgid modes give this mask in user, group, other mode:\n set_bits = (04000, 02000, 0)\n pieces = (spec[0:3], spec[3:6], spec[6:9])\n for i, (mode, set_bit) in enumerate(zip(pieces, set_bits)):\n mask = 0\n read, write, exe = list(mode)\n if read == 'r':\n mask = mask | 4\n elif read != '-':\n raise ValueError, (\n \"Character %r unexpected (should be '-' or 'r')\"\n % read)\n if write == 'w':\n mask = mask | 2\n elif write != '-':\n raise ValueError, (\n \"Character %r unexpected (should be '-' or 'w')\"\n % write)\n if exe == 'x':\n mask = mask | 1\n elif exe not in ('s', '-'):\n raise ValueError, (\n \"Character %r unexpected (should be '-', 'x', or 's')\"\n % exe)\n if exe == 's' and i == 2:\n raise ValueError, (\n \"The 'other' executable setting cannot be suid/sgid ('s')\")\n mask = mask << ((2-i)*3)\n if exe == 's':\n mask = mask | set_bit\n total_mask = total_mask | mask\n return total_mask", "title": "" }, { "docid": "ca5a1dccf5192455b4b2ca78e454acda", "score": "0.53494614", "text": "def flags(self) -> Optional[int]:\r\n return self._get(self.Types.flags)", "title": "" }, { "docid": "617209462f88a550c6834933cff95177", "score": "0.533105", "text": "def _permission_level(user, room):\n if not user.is_authenticated():\n return Perms.READ\n else:\n return Perms.WRITE", "title": "" }, { "docid": "68ac5d9bb239d89a512fc80b83d28460", "score": "0.5330062", "text": "def permissions(self):\n return self.getattr('permissions')", "title": "" }, { "docid": "09b3a47f82c1089f2eeba76611a459ad", "score": "0.53271484", "text": "def bitmask(self):\n return self.FEATURE_BITMASK", "title": "" }, { "docid": "caf4bf9ef9570f3b904de8194d04cc48", "score": "0.5319045", "text": "def r_flags(self):\r\n return self._flags", "title": "" }, { "docid": "d588c52901d59b5e2180e973476b3812", "score": "0.53073657", "text": "def flags(self):\n return self.__flags", "title": "" }, { "docid": "8567add6f392f7a795a05c092a6ac61c", "score": "0.5293697", "text": "def get_bits(self):\n return self.__bits", "title": "" }, { "docid": "078b0ca333729c3f46d6121efaf2f6dd", "score": "0.5291664", "text": "def __str__(self):\n\t\treturn self.permission", "title": "" }, { "docid": "2cd0e86c5846865f8ab1c9c408caf0c9", "score": "0.5290015", "text": "def GetFlags(self):\n return _MaxPlus.Bitmap_GetFlags(self)", "title": "" }, { "docid": "81638e9cd9140d3de1b40c1a5ccb5cea", "score": "0.5284313", "text": "def umask(mask):\n\tpass", "title": "" }, { "docid": "1ede6a3b93a420296602d37482a4ba5f", "score": "0.52813727", "text": "def permissions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"permissions\")", "title": "" }, { "docid": "27daa8e9fcf43e402fa3e6393568776c", "score": "0.5275204", "text": "def execbits2str(filename, check_other=False):\n\n\t# WARNING: no os.stat(); see elsewhere in this file for comment.\n\tfileperms = os.lstat(filename).st_mode & 07777\n\texecperms = []\n\n\t# exec bit for owner ?\n\tif fileperms & S_IXUSR:\n\t\texecperms.append('x')\n\telse:\n\t\texecperms.append('-')\n\n\t# exec bit for group ?\n\tif fileperms & S_IXGRP:\n\t\texecperms.append('x')\n\telse:\n\t\texecperms.append('-')\n\n\tif check_other:\n\t\t# exec bit for other ?\n\t\tif fileperms & S_IXOTH:\n\t\t\texecperms.append('x')\n\t\telse:\n\t\t\texecperms.append('-')\n\n\treturn execperms", "title": "" }, { "docid": "a854ce0ce6739023ba9b89431d50975c", "score": "0.5271982", "text": "def bit_length(self):\n return self._modulo.bit_length()", "title": "" }, { "docid": "8024780e0786e0bd822eff03c83648b2", "score": "0.52677035", "text": "def showPermissions(self):\n uri = f'{self.apibase}permissions'\n p = ParameterBuilder({}, {}, uri)\n return self.APIConnect('GET', p)", "title": "" }, { "docid": "6b4a0dc04e0f4034a7578e04dcedc480", "score": "0.5265167", "text": "def oct_(self):\n self.display_mode = 'oct'", "title": "" }, { "docid": "145e8cbc45bc677cb455e5ab4b5327a5", "score": "0.5262259", "text": "def flags(self):\n return self._flags", "title": "" }, { "docid": "477e20c4785d27bb259b2f17799d2e96", "score": "0.5259816", "text": "def s_or_flags(self, bits):\r\n self._flags |= bits\r\n return self._flags", "title": "" }, { "docid": "cbed3202eb531934a5ed96248e9cd970", "score": "0.52561474", "text": "def _flag_to_bytes(self, code):\n return int(code).to_bytes(1, byteorder=\"little\")", "title": "" }, { "docid": "6904a05038fb55690654a7ea923dadce", "score": "0.5234968", "text": "def test_flags_constructor():\n from Python.Test import FlagsConstructorTest\n from System.IO import FileAccess\n\n flags = FileAccess.Read | FileAccess.Write\n ob = FlagsConstructorTest(flags)\n assert ob.value == flags", "title": "" }, { "docid": "8870026d288ff6d5181d0b7695453e6b", "score": "0.5226247", "text": "def permissions_list(self):\n return [unicode(permission) for permission in self.permissions]", "title": "" }, { "docid": "528ddb469df3ebd7959d0b4c034a7e4f", "score": "0.5224042", "text": "def BitMask(num_bits):\n return (1<<num_bits) - 1", "title": "" }, { "docid": "a655b92e56e0d52aae4c24f9ac6ce445", "score": "0.5219834", "text": "def GetPermissions(self):\n pass", "title": "" }, { "docid": "1d64bab23ba2d615a7552bc6db05763e", "score": "0.519934", "text": "def __twos_compliment(self, val):\n bits = 16\n return int(bin(((1 << bits) - 1) & val)[2:], 2)", "title": "" }, { "docid": "b400da55ba1592057489732ca5e6ad13", "score": "0.5194823", "text": "def GetFlags(self):\n return _MaxPlus.BitmapInfo_GetFlags(self)", "title": "" }, { "docid": "9967a4238cfc208e910842590796d3cc", "score": "0.51919216", "text": "def permission_cfg(self):\n return self._permission_cfg", "title": "" }, { "docid": "922774794fd4df20aa4994c5ad5b0b62", "score": "0.5176379", "text": "def type_to_flag(type):\n\tprint(type)\n\tif type == TYPE_OUT_CMD:\n\t\treturn 0x02\n\tif type == TYPE_IN_ACL:\n\t\treturn 0x01\n\tif type == TYPE_OUT_ACL:\n\t\treturn 0x00\n\tif type == TYPE_IN_EVT:\n\t\treturn 0x03\n\tif type == TYPE_IN_SCO:\n\t\treturn 0x01\n\tif type == TYPE_OUT_SCO:\n\t\treturn 0x00", "title": "" }, { "docid": "3ffd9141b430d196fcae8a996c40fb41", "score": "0.5172777", "text": "def get_flags(self):\r\n return self.get('Status', '') + self.get('X-Status', '')", "title": "" }, { "docid": "4ed7c9b9fcae98f3037bb73c8fcd8398", "score": "0.5168232", "text": "def bits(self):\n return ''.join(['1' if l in vowels else '0' for l in self.word])", "title": "" }, { "docid": "9a41f5a5a9d6e75d1eee4c770513c6e2", "score": "0.51652443", "text": "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "9a41f5a5a9d6e75d1eee4c770513c6e2", "score": "0.51652443", "text": "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "9a41f5a5a9d6e75d1eee4c770513c6e2", "score": "0.51652443", "text": "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "9a41f5a5a9d6e75d1eee4c770513c6e2", "score": "0.51652443", "text": "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "9a41f5a5a9d6e75d1eee4c770513c6e2", "score": "0.51652443", "text": "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "9a41f5a5a9d6e75d1eee4c770513c6e2", "score": "0.51652443", "text": "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "9a41f5a5a9d6e75d1eee4c770513c6e2", "score": "0.51652443", "text": "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "9a41f5a5a9d6e75d1eee4c770513c6e2", "score": "0.51652443", "text": "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "9a41f5a5a9d6e75d1eee4c770513c6e2", "score": "0.51652443", "text": "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "title": "" } ]
b931b556e57f809db9247e801470b1bd
Return n chooses r, or n!/r!/(nr)!
[ { "docid": "10d66a7086fddab0599bce41d2771243", "score": "0.78110343", "text": "def chooses(n, r):\n return factorial(n)/factorial(r)/factorial(n-r)", "title": "" } ]
[ { "docid": "04c81a6690a644bf76f5e49c85d2a527", "score": "0.75664914", "text": "def choose_r(n):\r\n r = 1\r\n while n % 2 == 0:\r\n r += 1\r\n n = n // 2\r\n return r - 1", "title": "" }, { "docid": "a324d78962fec4addf95ecccb5bb5426", "score": "0.72688043", "text": "def n_choose_r(n, r):\n\n return int(factorial(n) / (factorial(r) * factorial(n - r)))", "title": "" }, { "docid": "41968d9f2c0398345c70ffa5fe778765", "score": "0.6999358", "text": "def ncr(n, r):\n if not isinstance(n, int) or n < 0:\n raise Exception(\"n must be a non-negative int\")\n if not isinstance(r, int) or r < 0:\n raise Exception(\"r must be a non-negative int\")\n if n < r:\n return 0\n f = math.factorial\n return f(n) // f(r) // f(n-r)", "title": "" }, { "docid": "f48ab286a8c7b9cac5a0f3776f518401", "score": "0.6702724", "text": "def p(n: int, r: int) -> float:\n # don't reject anyone if r = 0, pick first\n return 1/n if r == 0 else r/n*(H(n - 1) - H(r - 1))", "title": "" }, { "docid": "f53b7b82d5a37bddd5953ab7d830a8df", "score": "0.6700528", "text": "def nCr(n, r):\n return int(factorial(n) / factorial(r) / factorial(n-r))", "title": "" }, { "docid": "1792ccbc48d4d0f06c8501f9ea45fb57", "score": "0.6540554", "text": "def get_r(n):\n r = 2\n while True:\n if gcd(r, n) != 1:\n r += 1\n elif ord(n, r) > pow(log2(n), 2):\n break\n else:\n r += 1\n \n return r", "title": "" }, { "docid": "4351665adfd819c9a3c9830550aeb220", "score": "0.6498722", "text": "def npr(n, r):\r\n assert(0 <= r <= n)\r\n return product(range(n - r + 1, n + 1))", "title": "" }, { "docid": "188cf764a4141180a948fd0636fcaa69", "score": "0.64199007", "text": "def cnr(n, r):\n \n coef = fac(n) / (fac(r) * fac(n-r))\n \n return int(coef)", "title": "" }, { "docid": "902297aac4abc208299bf469754215d2", "score": "0.64098465", "text": "def ncr(n, r):\n r = min(r, n - r)\n if r == 0:\n return 1\n numer = reduce(op.mul, xrange(n, n - r, -1))\n denom = reduce(op.mul, xrange(1, r + 1))\n return numer // denom", "title": "" }, { "docid": "6bc88968dd9133151bbf59475af14ac5", "score": "0.6302684", "text": "def factR(n):\n if n==1:\n return 1\n else:\n return n * factR(n-1)", "title": "" }, { "docid": "aeaaea3c9d8ac47d93609cedcca58b20", "score": "0.62534606", "text": "def _factor(ps, n):\n for p in ps:\n if p > isqrt(n):\n break\n if (n // p) * p == n:\n return p\n return None", "title": "" }, { "docid": "274691baf3881478c6d816e45f8d50ea", "score": "0.6240244", "text": "def turan(n, r):\n mod = n % r\n upper = math.ceil(n / r) # ⌈n/r⌉\n lower = math.floor(n / r) # ⌊n/r⌋\n return (n * n - mod * upper * upper - (r - mod) * lower * lower) // 2", "title": "" }, { "docid": "2d5a97434a94a345d5c491d2e09404fe", "score": "0.62281054", "text": "def nCr(n, r):\n r = min(r, n-r)\n numer = reduce(op.mul, xrange(n, n-r, -1), 1)\n denom = reduce(op.mul, xrange(1, r+1), 1)\n return numer//denom", "title": "" }, { "docid": "8468d1e1dd16f37150def7e3cb80cd21", "score": "0.6159383", "text": "def pollard_rho(n):\n\tx1 = 2\n\tx2 = 2\n\tfactor = 1\n\tg = lambda x: ((x ** 2) + 1) % n\n\tstep = 0\n\twhile factor == 1:\n\t\tstep += 1\n\t\tx1 = g(x1)\n\t\tx2 = g(g(x2))\n\t\tfactor = find_gcf(abs(x1 - x2), n)\n\tif factor == n:\n\t\treturn None, step\n\treturn (factor, n // factor), step", "title": "" }, { "docid": "19128a73b0887945c7aded653db4487b", "score": "0.61101896", "text": "def compute_combinatoric(n, r):\n\t# r must be <= n\n\tif r > n:\n\t\treturn 0\n\n\t# nCr = n! / (r!(n-r)!)\n\treturn permute(n) / (permute(r) * permute(n-r))", "title": "" }, { "docid": "99821bd9cc8d682bb5289824461c7c21", "score": "0.6090385", "text": "def rational(n, d):\r\n g = gcd(n, d)\r\n n, d = n//g, d//g\r\n def select(name):\r\n if name == 'n':\r\n return n\r\n elif name == 'd':\r\n return d\r\n return select", "title": "" }, { "docid": "83e2ad7681ff73b54edda767dc0aceac", "score": "0.60754114", "text": "def _pollard_rho(n, rand=True):\n\n f = lambda x, c: x*x + c\n if not rand:\n x, c = 1, 1\n else:\n x, c = random.randrange(2, 1e6), random.randrange(2, 1e6)\n\n y, d = x, 1\n while d == 1 and d != n:\n x = f(x, c) % n\n y = f(y, c) % n\n y = f(y, c) % n\n d = gcd(y-x, n)\n return int(d)", "title": "" }, { "docid": "f076e4763201000a53f677403ccaa622", "score": "0.6051556", "text": "def Rnm(n,m,r):\n R = 0\n for s in range((n-m)/2+1):\n R += (((-1)**s*np.math.factorial(n-s))/(np.math.factorial(s)*np.math.factorial((n+m)/2-s)*np.math.factorial((n-m)/2-s)))*r**(n-2*s)\n return R", "title": "" }, { "docid": "b002774f6aa17ec8fcd54188b2306a8d", "score": "0.5978148", "text": "def rice(n, **args):\n if 'k' in args: k = args['k']\n else: k = 0\n \n q = int(n/2**k)\n r = n % 2**k\n \n return '1'*(q+1) + bin(r)[2:]", "title": "" }, { "docid": "5a06e308c2b218c622c3eca11fe86bf5", "score": "0.59628856", "text": "def exo13(n):\n q = -1\n res = ''\n while q != 0:\n q = n // 2\n r = n % 2\n res = `r` + res\n n = q\n return res", "title": "" }, { "docid": "eba2e2f932c46a7a1d744f0ac8491e1d", "score": "0.5936461", "text": "def combination(n,r):\n r=factorial(n) /( factorial(n-r) * factorial(r))\n return r", "title": "" }, { "docid": "a05c3761804d85a03a9659d9ca99e35b", "score": "0.5917747", "text": "def rs():\n return int(choice([-1, 1]))", "title": "" }, { "docid": "a1ec52b660e38f618e37e2860f82e92a", "score": "0.58853847", "text": "def perm(n, r):\n b = (n - r)\n a = fact(n)\n b = fact(b)\n c = a // b\n return c", "title": "" }, { "docid": "48e102a3429a41307f30ea6b72139a52", "score": "0.5857289", "text": "def nrcifre(n):\n c = int(n)\n\n t = 0\n while c > 0:\n t = t + 1\n c = c // 10\n return t", "title": "" }, { "docid": "16a222e00145258484c3a7f47d9b962f", "score": "0.5806393", "text": "def trial_division(n):\n for p in primes(isqrt(n)):\n if n % p == 0:\n return p\n return n", "title": "" }, { "docid": "e2dc0d5a373cb8182fd06fe1a1e0eede", "score": "0.5792834", "text": "def esParell(n):\n\n\treturn n%2==0", "title": "" }, { "docid": "d2723b2f243b0a32a05aa87f2b2ce42f", "score": "0.5785152", "text": "def return_n(n):\n return n", "title": "" }, { "docid": "5f5dadd5b58649912e9f09ce1f0d3530", "score": "0.57767385", "text": "def get_ratio(n):\n if (n < 1):\n raise ValueError(\"n must be positive integer\")\n else:\n log = get_log_primes_product(n)\n return log/n", "title": "" }, { "docid": "320e4d4a621684e4520aea61065ad154", "score": "0.5744738", "text": "def hardlims(n):\n if n<0:\n return -1\n else:\n return 1", "title": "" }, { "docid": "ff38bf427056e7340b3d5b82a13d6491", "score": "0.57345086", "text": "def RandDiv(n):\r\n\treturn __createRandDiv__(n)", "title": "" }, { "docid": "7c3be19864949c90891363c99f449c38", "score": "0.57166094", "text": "def R_nl(n, l, nu, r):\n n, l, nu, r = map(S, [n, l, nu, r])\n\n # formula uses n >= 1 (instead of nodal n >= 0)\n n = n + 1\n C = sqrt(\n ((2*nu)**(l + Rational(3, 2))*2**(n + l + 1)*factorial(n - 1))/\n (sqrt(pi)*(factorial2(2*n + 2*l - 1)))\n )\n return C*r**(l)*exp(-nu*r**2)*assoc_laguerre(n - 1, l + S.Half, 2*nu*r**2)", "title": "" }, { "docid": "9649be3698a902793a1c302eb82efc30", "score": "0.57129157", "text": "def calculate_number_of_combinations(n: int, r:int = 2) -> int:\n\tn_factorial = math.factorial(n)\n\tr_factorial = math.factorial(r)\n\tn_minus_r_factorial = math.factorial(n-r)\n\t#return int(n * (n - r)/r)\n\n\tresult = n_factorial / (n_minus_r_factorial * r_factorial)\n\treturn int(result)", "title": "" }, { "docid": "ac4cca70bb32ad8bf06a3bb8efdd9955", "score": "0.57090044", "text": "def r_in_n(manager, r, n, cur=1):\n alpha = sdd.sdd_manager_true(manager)\n if cur == n+1:\n pass\n\n elif r == 0:\n for i in range(cur, n+1):\n lit = sdd.sdd_manager_literal(-i, manager)\n alpha = sdd.sdd_conjoin(alpha, lit, manager)\n\n elif r == n - cur + 1:\n for i in range(cur, n+1):\n lit = sdd.sdd_manager_literal(i, manager)\n alpha = sdd.sdd_conjoin(alpha, lit, manager)\n\n else:\n beta = sdd.sdd_manager_literal(cur, manager)\n remainder_true = r_in_n(manager, r-1, n, cur+1)\n beta = sdd.sdd_conjoin(beta, remainder_true, manager)\n gamma = sdd.sdd_manager_literal(-cur, manager)\n remainder_false = r_in_n(manager, r, n, cur+1)\n gamma = sdd.sdd_conjoin(gamma, remainder_false, manager)\n alpha = sdd.sdd_disjoin(beta, gamma, manager)\n\n return alpha", "title": "" }, { "docid": "9238ccd700cc0ab07a9078b78b290442", "score": "0.5703459", "text": "def PLURAL_POLISH(n):\n if n == 1: return 0\n if n % 10 >= 2 and n % 10 <= 4 and (n % 100 < 10 or n % 100 >= 20): return 1\n return 2", "title": "" }, { "docid": "bcf2d20286891cbffbeb7de51fe89824", "score": "0.56968176", "text": "def n_to_rots(sampling_n):\n return 10*(sampling_n+5*sampling_n**3)", "title": "" }, { "docid": "39a4bc2a700976890429e7c9d1f9a52c", "score": "0.5687576", "text": "def radical(n, *args, **kwds):\n try:\n return n.radical(*args, **kwds)\n except AttributeError:\n return n.factor(*args, **kwds).radical_value()", "title": "" }, { "docid": "93efbe1542a98a1ed0d843eb219fcf04", "score": "0.5672335", "text": "def divide_and_round(n):\n print(\"4\")\n if n % 2 == 0:\n n = n / 2\n return int(n)\n else:\n n = (n + 1) / 2\n return int(n)", "title": "" }, { "docid": "c985d41e7dc210c1c190cb4252c1cd2c", "score": "0.5669196", "text": "def find_least_div(n:int) -> int:\n div = 1\n while div < n:\n div += 1\n if n % div == 0:\n return(div)", "title": "" }, { "docid": "3f29891ec21e887f7bf6c8185bd7b90c", "score": "0.5655062", "text": "def pollard_rho(number):\n\n if number % 2 == 0:\n return 2\n\n rand_y = nutil.getRandomRange(1, number - 1, urandom)\n rand_c = nutil.getRandomRange(1, number - 1, urandom)\n copy_y = rand_y\n\n index_g = 1\n\n while index_g == 1:\n rand_y = ((rand_y * rand_y) % number + rand_c) % number\n copy_y = ((copy_y * copy_y) % number + rand_c) % number\n copy_y = ((copy_y * copy_y) % number + rand_c) % number\n index_g = gcd(abs(rand_y - copy_y), number)\n\n return index_g", "title": "" }, { "docid": "bda44750b267fb1e33ce14ce8af83b71", "score": "0.5653909", "text": "def factoriel(n):\n #print(\"- input: \", n)\n if n <= 1:\n return 1\n else:\n return n * factoriel(n-1)", "title": "" }, { "docid": "10e683dcde391c798781d60c92d45f1a", "score": "0.5634964", "text": "def divisorsi(n):\n return (a for a in range(1, n) if n%a == 0)", "title": "" }, { "docid": "f2ec878f1c966b60a67eb21986fb742b", "score": "0.5625635", "text": "def divides(n):\r\n def div(k):\r\n return n%k==0\r\n return div", "title": "" }, { "docid": "15de93881c04bf54414233cf2e10972b", "score": "0.5623695", "text": "def double(n) :\r\n\t#PREMISES FOR NEXT LINE: \r\n\t# (n >= 0)\r\n\tif n == 0 :\r\n\t\t#PREMISES FOR THEN-ARM: \r\n\t\t# (n == 0)\r\n\t\t# (n >= 0)\r\n\t\tans = 0\r\n\t\t#PREMISES FOR ATTACHED PROOF, IF ANY: \r\n\t\t# (ans == 0)\r\n\t\t# (n == 0)\r\n\t\t# (n >= 0)\r\n\t\t\"\"\"{1.OK n == 0\tpremise\r\n\t\t\t2.OK ans == 0\tpremise\r\n\t\t\t3.OK ans == 2*n\talgebra 1 2\r\n\t\t}\"\"\"\r\n\t\t#PREMISES FOR NEXT LINE: \r\n\t\t# (ans == (2 * n))\r\n\telse :\r\n\t\t#PREMISES FOR ELSE-ARM: \r\n\t\t# not (n == 0)\r\n\t\t# (n >= 0)\r\n\t\tsubans = double(n - 1)\r\n\t\t#PREMISES FOR ATTACHED PROOF, IF ANY: \r\n\t\t# (subans == (2 * (n - 1)))\r\n\t\t# not (n == 0)\r\n\t\t# (n >= 0)\r\n\t\t#PREMISES FOR NEXT LINE: \r\n\t\t# (subans == (2 * (n - 1)))\r\n\t\t# not (n == 0)\r\n\t\t# (n >= 0)\r\n\t\tans = subans + 2\r\n\t\t#PREMISES FOR ATTACHED PROOF, IF ANY: \r\n\t\t# (ans == (subans + 2))\r\n\t\t# (subans == (2 * (n - 1)))\r\n\t\t# not (n == 0)\r\n\t\t# (n >= 0)\r\n\t\t\"\"\"{1.OK not(n == 0)\tpremise\r\n\t\t\t2.OK n >=0\tpremise\r\n\t\t\t3.OK n > 0\talgebra 1 2\r\n\t\t\t4.OK subans == 2 *(n-1)\tpremise\r\n\t\t\t5.OK ans == subans + 2\tpremise\r\n\t\t\t6.OK subans == ans-2\talgebra 5\r\n\t\t\t7.OK ans-2 == 2*(n-1)\tsubst 6 4\r\n\t\t\t8.OK ans == 2*n\talgebra 7\r\n\t\t}\"\"\"\r\n\t\t#PREMISES FOR NEXT LINE: \r\n\t\t# (ans == (2 * n))\r\n\t#PREMISES FOR NEXT LINE: \r\n\t# (ans == (2 * n))\r\n\t# (n >= 0)\r\n\treturn ans\t#PREMISES FOR NEXT LINE: \r\n\t# (ans == (2 * n))\r\n\t# (n >= 0)\r", "title": "" }, { "docid": "0b6d2fa96edca0a9b29a07eab47249e3", "score": "0.5607221", "text": "def rhos_composite (n, r):\n\tif not n % 2: return \n\tif int(math.sqrt(n)) == math.sqrt(n): return \n\n\tx = 2\n\tfor cycle in count(1):\n\t\ty = x\n\t\ti = 1\n\t\tfor j in range(2**cycle):\n\t\t\ti += 1\n\t\t\t# Pollard's modified fermat polynomial\n\t\t\tx = (x**18 + 2) % n \n\t\t\tfactor = math.gcd(x - y, n)\n\n\t\t\tif factor > r: return factor\n\n\t\t\tprint('Cycle %d' % i)", "title": "" }, { "docid": "0c28b8e2bd4b220168fe0b90351ae986", "score": "0.55710155", "text": "def recur_luc(n):\n if n == 1:\n return n\n if n == 0:\n return 2\n return recur_luc(n - 1) + recur_luc(n - 2)", "title": "" }, { "docid": "c5b6baf0796f6c9b2ee83d5e1a80b5c4", "score": "0.5554302", "text": "def divides(n):\r\n def div(k):\r\n return n % k == 0\r\n\r\n return div", "title": "" }, { "docid": "2954b98a914f6215ac1711a5af9dc2ab", "score": "0.5546035", "text": "def ngira(s, n):\r\n if n >= len(s):\r\n # assert n == len(s)\r\n return gira(s), 1\r\n # se n > len(s): impossible se len != 0, risolto se len==0 return mosse 0\r\n n = min(n, len(s) - n)\r\n return gira(s[:n]) + s[n : len(s)-n] + gira(s[len(s)-n : ]), 2", "title": "" }, { "docid": "8c0d8268de3370a3082ac3237c9be45a", "score": "0.55458486", "text": "def trial_division(n, bound=None):\n if bound is None:\n return ZZ(n).trial_division()\n else:\n return ZZ(n).trial_division(bound)", "title": "" }, { "docid": "8efb0d46a3a4d991bbbcd27327159009", "score": "0.55410457", "text": "def numer(rat):\n return rat[0]\n # Alternate implementation:\n # return rat['n']", "title": "" }, { "docid": "51ea8ca569ad00ee431eac93524cb2f9", "score": "0.5535783", "text": "def _annuity_pv_factor(r,n):\n return (1 - (1/(1+r)**n)) / r", "title": "" }, { "docid": "7e238ddbc5dd7bde0fe3c5b824e0f370", "score": "0.5529339", "text": "def combination(n,r):\n return fact(n)/(fact(r)*fact(n-r))", "title": "" }, { "docid": "bd8ad0a27206f3df39279974114ed9e1", "score": "0.5522404", "text": "def magic(n):\n sumdivisors = 0\n for i in range(1,n):\n if n%i == 0:\n sumdivisors = sumdivisors + i\n return (sumdivisors == n)", "title": "" }, { "docid": "b5010d33c709af9b6e95b453c40ce930", "score": "0.55201846", "text": "def rots_to_n(rots):\n sampling_n = 1\n while True:\n this_rots = n_to_rots(sampling_n)\n if this_rots == rots:\n return sampling_n\n if this_rots > rots:\n raise ValueError(f\"{rots} rotations does not correspond to any n\")\n sampling_n += 1", "title": "" }, { "docid": "0574f52be285ad5c6ee06b1474766e6b", "score": "0.55148375", "text": "def tower(n):\n if n == 0:\n return 1\n else:\n return 2**(tower(n - 1))", "title": "" }, { "docid": "2d320230345b8994f87df65d2cde5e0a", "score": "0.54875624", "text": "def nodoAleatorio(n):\n return random.randint(0, n-1)", "title": "" }, { "docid": "182bdc11fe1489c0bd6c3fe2800f2a08", "score": "0.546306", "text": "def divide_exact(n, d=10):\n\treturn floordiv(n,d), mod(n,d)", "title": "" }, { "docid": "209fd368e0665d79af05ae8a102fa7d2", "score": "0.5461922", "text": "def brent_rho(number):\n\n if number % 2 == 0:\n return 2\n\n rand_y = nutil.getRandomRange(1, number - 1, urandom)\n rand_c = nutil.getRandomRange(1, number - 1, urandom)\n rand_m = nutil.getRandomRange(1, number - 1, urandom)\n\n index_g, index_r, index_q = 1, 1, 1\n\n while index_g == 1:\n copy_y = rand_y\n index_k = 0\n\n index = 0\n while index < index_r:\n rand_y = ((rand_y * rand_y) % number + rand_c) % number\n index += 1\n\n while (index_k < index_r and index_g == 1):\n y_copy2 = rand_y\n index = 0\n limit = min(rand_m, index_r - index_k)\n\n while index < limit:\n rand_y = ((rand_y * rand_y) % number + rand_c) % number\n index_q = index_q * (abs(copy_y - rand_y)) % number\n index += 1\n\n index_g = gcd(index_q, number)\n index_k = index_k + rand_m\n\n index_r = index_r * 2\n\n if index_g == number:\n while True:\n y_copy2 = ((y_copy2 * y_copy2) % number + rand_c) % number\n index_g = gcd(abs(copy_y - y_copy2), number)\n if index_g > 1:\n break\n\n return index_g", "title": "" }, { "docid": "c51f2f6350d9f26bd4d27c3ed41236e7", "score": "0.5454989", "text": "def divide_exact(n,d): \n return floordiv(n,d), mod(n,d)", "title": "" }, { "docid": "f04d59f0943ba07bbcdf9c30056e969c", "score": "0.54456586", "text": "def answer(n):\n base = 2\n while 1:\n if is_palindrome(rep_in_base(n, base)):\n break\n else:\n base += 1\n return base", "title": "" }, { "docid": "e45f60632aab9ead25d6b1c94400c8dd", "score": "0.54436153", "text": "def harmonic_number(n, k=1, r=1):\n if k < 0:\n raise ValueError(\"m must be positive\")\n elif k == 0:\n ret = 1/(n**r)\n elif k == 1:\n # if QQ isn't used, then 1/n == 0 (integer division)\n ls = [QQ(1)/(j**r) for j in range(1, n + 1)]\n ret = sum(ls)\n else:\n ls = [harmonic_number(j, k - 1, r) for j in range(1, n + 1)]\n ret = sum(ls)\n return ret", "title": "" }, { "docid": "b51b383650f520971c633cb772174bc1", "score": "0.54333293", "text": "def nth_remainder(a, b, n):\n return (a if n == 0 else 10 * nth_remainder(a, b, n - 1)) % b", "title": "" }, { "docid": "61562e0d5bb8e49d88326a8ca479c26f", "score": "0.54241616", "text": "def hacked_farey(n):\n\n a, b, c, d = 428569, 999994, 3, 7 # initial fractions\n\n count = 10\n while c <= n and count:\n count -= 1\n\n print(\"{}/{}\".format(a,b))\n\n # calc k\n k = (n + b)//d\n\n # calc next fraction\n\n # next numerator: cannot be done independent!\n a, c, = c, k*c - a\n # next denumarator\n b, d = d, k*d - b\n\n # detect end\n #if(b == 9998 and a == 3333 or b == 999997 and a == 428570 or b == 3 and a == 1): \n \n\n \n # detect end\n #if(a==1 and b == 2): break", "title": "" }, { "docid": "4b29d6761b78004b3a2fc3969f37831a", "score": "0.5421521", "text": "def fibo_r(n: int) -> int:\n if n < 2:\n return 1\n else:\n return fibo_r(n - 1) + fibo_r(n - 2)", "title": "" }, { "docid": "156ccb41f5b33c4f62ed7b253caf0107", "score": "0.5420947", "text": "def lower_right(n):\n return n ** 2", "title": "" }, { "docid": "22dff38eaada0c76b0e6de57d288e348", "score": "0.542083", "text": "def numer(x):\r\n return x('n')", "title": "" }, { "docid": "8974d1ce63a43e3ab7396d2b0d4d24f8", "score": "0.54143625", "text": "def hardlim(n):\n if n<0:\n return 0\n else:\n return 1", "title": "" }, { "docid": "dbaf1a5fca90969ffe5c2f4367c44b45", "score": "0.5406334", "text": "def choose(n,x):\r\n #(n x) = n!/(x!(n-x)!)\r\n f = factorial\r\n result = f(n)/(f(x)*f(n-x))\r\n return result", "title": "" }, { "docid": "071e6a269e259242ec4c078ef519cabc", "score": "0.5404378", "text": "def reciprocal_cycle_length(n: int) -> int:\n \n # perform long division for 1/n\n remainders: Dict[int, int] = {}\n dividend = 1\n digit_count = 0\n while dividend != 0:\n # compute next remainder\n dividend = (dividend * 10) % n\n \n if dividend in remainders:\n # remainder has been seen before; found cycle\n return digit_count - remainders[dividend]\n else:\n # store remainder for later cycle detection\n remainders[dividend] = digit_count\n \n digit_count += 1\n \n # division occurred without remainder; no digit cycle\n return 0", "title": "" }, { "docid": "6ea8123123c073417e1ca539ece4280b", "score": "0.5403335", "text": "def ci(P, r, n, t):\n return P * (1 + (r / n)) ** (n * t)", "title": "" }, { "docid": "cd56dd280684583259eb36e84d785c35", "score": "0.5399756", "text": "def factor ( self, n ):\n\n #-- 1 --\n if n < 4:\n return None\n\n #-- 2 --\n #-[ limit := the largest integer <= floor(sqrt(n))\n #-]\n limit = int ( sqrt ( float ( n ) ) )\n\n #-- 3 --\n #-[ self._p := self._p with all necessary values\n # added so that it contains all primes <= limit\n #-] self._pMax := max ( self._pMax, limit )\n #-]\n self._fill ( limit )\n\n #-- 4 --\n #-[ if there is an element E of self._p such that\n # (E<=limit) and (E divides n) ->\n # return the smallest such element\n # else -> I\n #-]\n for f in self._p:\n #-- 4 body --\n #-[ if f > limit ->\n # break\n # else if f divides n ->\n # return f\n # else -> I\n #-]\n if ( n % f ) == 0:\n return f\n elif f >= limit:\n break\n\n #-- 5 --\n return None", "title": "" }, { "docid": "beaf9d81901581cacdfb301dc9abca74", "score": "0.539756", "text": "def abundancy(n):\n return sumOfProperDivisors(n) - n", "title": "" }, { "docid": "85ae083513782232b76537cadb244302", "score": "0.5397531", "text": "def calc_dn(self,n):\n liste_image=self.dir.list_of_image()\n n_image=len(liste_image)\n if n>n_image:\n return(int(n/n_image))\n else:\n print(\"Number of display images greater then the number of initial images:%s\",self.n_image)", "title": "" }, { "docid": "854d97647f2847eb2794a914507e4fe5", "score": "0.5397127", "text": "def solution(n = 1001):\n\n return 1 / 6 * (4 * n**3 + 3 * n**2 + 8 * n -9)", "title": "" }, { "docid": "25fd74ffb71aea112494d3710afd73fb", "score": "0.5395729", "text": "def Expansion(n):\n \n digits = []\n r = 1\n remainders = []\n remainders2 = []\n \n while True:\n dig = int((r*10)/n)\n r = (r*10)%n\n digits.append(dig)\n if r in remainders2:\n break\n elif r in remainders:\n remainders2.append(r)\n else:\n remainders.append(r)\n\n \n return len(remainders)", "title": "" }, { "docid": "5fee3ca939d85ce9fb6c3470d2abc2a3", "score": "0.53952265", "text": "def square_root(n):\"\n L,R=0,n\n while R-L>1:\n mid=(L+R)/2\n if mid**2<n:\n L=mid\n elif mid**2>n:\n R=mid-1\n else:\n return mid\n \n if R==L:\n return R\n elif R**2<=n:\n return R\n else:\n return L", "title": "" }, { "docid": "f645c4c277f95e2ec4ffefa7f692e678", "score": "0.53906876", "text": "def trial_division(number):\n last_factor = 0 \n factor = 2\n while number > 1: \n if number % factor == 0: \n last_factor = factor\n number /= factor\n else: \n factor += 1\n return last_factor", "title": "" }, { "docid": "ba092e046c2c42adef30fa906a018713", "score": "0.5389058", "text": "def g(n):\n if n == 0:\n return 1\n else:\n return n if n <= 3 else g(n - 1) + 2 * g(n - 2) + 3 * g(n - 3)", "title": "" }, { "docid": "3612259eb72c5a7d92ab883570bfd4af", "score": "0.5387897", "text": "def divide_and_round(n):\n if n % 2 == 0:\n n = n / 2\n else:\n n = (n + 1) / 2\n n = int(n)\n # division always returns a float, so truncating to get a float,\n # we can alternately achieve same functionality with int division op // \n return n\n # we need the function to turn back the value calculated to the calee.", "title": "" }, { "docid": "1f0d3640dc3a7545308f0cd758130d4c", "score": "0.53717995", "text": "def _default_rarity() -> int:\n if random.random() < 0.66:\n return 0\n elif random.random() < 0.5:\n return 1\n elif random.random() < 0.5:\n return 2\n elif random.random() < 0.5:\n return 3\n else:\n return 4", "title": "" }, { "docid": "60b9fbba2af121badff7394c0e1e0cf2", "score": "0.5371215", "text": "def winnerrank(n):\n rank=['Highcard','One pair','Two pair','Three of kind','Straight','Flush','Fullhouse','Four of kind','Straight Flush']\n return rank[n]", "title": "" }, { "docid": "340147320d895058278f323703478384", "score": "0.535203", "text": "def formula(n):\n sqrt_5 = 5**0.5\n return round(((1 + sqrt_5) / 2)**n / sqrt_5)", "title": "" }, { "docid": "4e2ea39eec43c85aac8effaca163a354", "score": "0.5351698", "text": "def choose(m,n):\n return factorial(m) / (factorial(n)*factorial(m-n))", "title": "" }, { "docid": "9b11e577b9d644eec7ca8fd7c34dacfd", "score": "0.5350847", "text": "def nh_const(dist, r):\n return 1 if dist <= r else 0", "title": "" }, { "docid": "9e5db0a2881275fe214f5037b9675ada", "score": "0.5347606", "text": "def rnd(n: float, n_places: int):\n mult = math.pow(10, n_places or 3)\n return math.floor(n * mult + 0.5) / mult", "title": "" }, { "docid": "23ee8bdb89a6ce26f434373d59ee7978", "score": "0.53392804", "text": "def testPrime(n, r):\r\n if n == 2:\r\n return True\r\n if (n <= 1) or (n % 2 == 0):\r\n return False\r\n \r\n # Get n as 2^r * d + 1\r\n r = 0\r\n d = n-1\r\n while (d % 2 == 0):\r\n r += 1\r\n d //= 2\r\n \r\n # Witness Loop\r\n for _ in range(r):\r\n a = random.randint(2,n-2)\r\n x = pow(a,d,n)\r\n if (x == 1) or (x == n-1):\r\n continue\r\n for _ in range (r-1):\r\n x = pow(x,2,n)\r\n if (x == n-1):\r\n break # Go to next round\r\n else:\r\n return False\r\n return True", "title": "" }, { "docid": "a78114f9dd3893629374242b16d8277c", "score": "0.53357685", "text": "def number(n):\n return n**x", "title": "" }, { "docid": "4dec5d86e9e946784e7c6f28c8768aae", "score": "0.53357667", "text": "def g(n):\n \"*** YOUR CODE HERE ***\"\n if n <= 3:\n return n\n else:\n return g(n - 1) + 2*g(n - 2) + 3*g(n - 3)", "title": "" }, { "docid": "35abef22122a413103268c39b9a89362", "score": "0.5334579", "text": "def super_digit(n: int) -> int:\n ds = n % 9\n if ds:\n return ds\n return 9", "title": "" }, { "docid": "59e44ee0fcebbd38dad401d00db372a3", "score": "0.53336114", "text": "def hailstone(n):\n if n == 1:\n return 1\n elif n % 2 == 0:\n return hailstone(n // 2) + 1\n else:\n return hailstone(n * 3 + 1) + 1", "title": "" }, { "docid": "b907dc69318748b5533d8a912e8e3a34", "score": "0.53310454", "text": "def expected_rs(n):\n # front = (n - 0.5) / n\n i = np.arange(1, n)\n back = np.sum(np.sqrt((n - i) / i))\n if n <= 340:\n middle = scipy.special.gamma((n - 1) * 0.5) / np.sqrt(np.pi) / scipy.special.gamma(n * 0.5)\n else:\n middle = 1.0 / np.sqrt(n * np.pi * 0.5)\n\n return middle * back", "title": "" }, { "docid": "51670bd7a905d24d56acb65ef05e32a1", "score": "0.5320555", "text": "def proper_divisors(n):\n facts = factors(n)\n facts.discard(n)\n return facts", "title": "" }, { "docid": "049dd02b911c06aa67a7bf8213255a6d", "score": "0.53190184", "text": "def sumProperDivisors(self, n):\n return self.sumDivisors(n)-n", "title": "" }, { "docid": "56d5030e78ca1f053d905c16f1dabbd7", "score": "0.5313297", "text": "def path(n):\n\t\tsequence.append(n)\n\t\tif n == 1:\n\t\t\treturn n\n\t\telif n%2 == 0:\n\t\t\tn = n/2\n\t\t\treturn path(n)\n\t\telse:\n\t\t\tn = 3*n + 1\n\t\t\treturn path(n)", "title": "" }, { "docid": "28b22ebc35757be7342cfda05802e101", "score": "0.5309955", "text": "def kind(n, ranks):\n \n for r in ranks:\n if ranks.count(r) == n:\n return r\n return 0", "title": "" }, { "docid": "61510ff9f738a14fee91e18378a40b0b", "score": "0.5309601", "text": "def nthUglyNumber(self, n: int) -> int:\n if n <= 1:\n return n\n \n ugly = [1]\n p2 = p3 = p5 = 0\n for i in range(1, n):\n ugly.append(min(ugly[p2] * 2, ugly[p3] * 3, ugly[p5] * 5))\n if ugly[i] == ugly[p2] * 2: p2 += 1\n if ugly[i] == ugly[p3] * 3: p3 += 1\n if ugly[i] == ugly[p5] * 5: p5 += 1\n return ugly[-1]", "title": "" }, { "docid": "b8cecef5e7ae7eb24651b076226a1cfc", "score": "0.5299203", "text": "def primitive_root(n: int) -> int:\n n_1 = n - 1\n order = prime_factors(n_1) # set of possible orders\n\n for r in range(2, n):\n flag = False\n for it in order:\n if fast_powering(r, n_1 // it, n) == 1:\n flag = True\n break\n if flag is False:\n return r\n return -1", "title": "" }, { "docid": "a417be3711ee097937246472b0ed2d23", "score": "0.5298516", "text": "def memoized_cut_rod(p, n):\n r = [float(\"-inf\")] * (n+1)\n\n return memoized_cut_rod_aux(p, n, r)", "title": "" }, { "docid": "4bd6cb73b9cef1803572e822f7292a81", "score": "0.52952284", "text": "def haker_rank_wraper(n, k):\n\n def sum_digits(number):\n if number < 10:\n return number\n else:\n return number % 10 + sum_digits(number // 10)\n\n def super_digit(number):\n if number < 10:\n return number\n else:\n return super_digit(sum_digits(number))\n\n p = int(str(n) * k)\n return super_digit(p)", "title": "" }, { "docid": "15363b560f9612243f4cc64f4374b900", "score": "0.5294265", "text": "def solve(n=10**9):\n return count_nondivisible(baseDigits(n, 7))", "title": "" }, { "docid": "8489226f320b71b90112f0fa3fbd2a8c", "score": "0.52939355", "text": "def number2(n):\n return 0 # Stub return. Replace this.", "title": "" } ]
44cba881612adb75761dbba32be55117
Create necessary user accounts and login as an admin user.
[ { "docid": "cecd23b2f17c718e8d4802d2e20bc74c", "score": "0.62966883", "text": "def test_0000_initiate_users(self):\n self.login(email=common.test_user_1_email, username=common.test_user_1_name)\n test_user_1 = self.test_db_util.get_user(common.test_user_1_email)\n assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_1_email\n self.test_db_util.get_private_role(test_user_1)\n self.login(email=common.admin_email, username=common.admin_username)\n admin_user = self.test_db_util.get_user(common.admin_email)\n assert admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email\n self.test_db_util.get_private_role(admin_user)", "title": "" } ]
[ { "docid": "00713b0f96bed33b60fc121b60a6e795", "score": "0.7546469", "text": "def p_makeAdminUser(self):\n\n # If already in database, return\n if self.dbManager.userExists(C_ADMINISTRATOR_USERNAME):\n return\n # Store admin in database\n self.dbManager.createUser(C_ADMINISTRATOR_USERNAME, C_ADMINISTRATOR_PASSWORD, UserRole.ADMIN, defaultPacemakerParameterData)", "title": "" }, { "docid": "c96ef611b441344a07f4dacbbb76ef41", "score": "0.7424698", "text": "def create_admin():\n from pyceo import prompt\n from .manage import create_user\n\n u = User.by_login(u'admin')\n if not u:\n print 'Creating the `admin` user…'\n email = prompt('>>> `admin` email?\\n')\n create_user(u'admin', 'admin', fullname=u'Admin', email=email)\n u = User.by_login(u'admin')\n\n u.add_role(u'admin')\n db.commit()\n return u", "title": "" }, { "docid": "d47754b71e3e1b2caf253b1327363b4f", "score": "0.7391592", "text": "def create_and_login(self):\n with self.context():\n user = self.factory(meido.factories.UserFactory)\n self.client.post('/management/login', data={\n 'username': 'admin', 'password': 'pretender'\n })", "title": "" }, { "docid": "a5bf03d58efd40256468bb09c2f5889c", "score": "0.73905456", "text": "def create_admin():\n admin = models.User(username= 'gallery_admin', email='[email protected]', address='#0000' , password =bcrypt.generate_password_hash('toledano',\n current_app.config.get('BCRYPT_LOG_ROUNDS')).decode('utf-8'), admin=True)\n admin.save()", "title": "" }, { "docid": "4c60fafd7d7d7db590a5e0b1b4d36c81", "score": "0.7325498", "text": "def create_admin():\n db.session.add(User(\n email=str(hashlib.sha256(\"[email protected]\".encode()).hexdigest()),\n password=\"admin\",\n admin=True,\n confirmed=True,\n confirmed_on=datetime.datetime.now())\n )\n db.session.commit()", "title": "" }, { "docid": "237ebb31ee3c464e169cb9eac1b0e8f5", "score": "0.72866404", "text": "def create_admin_user(self):\n\n sys.stdout.write('creating admin user...'.ljust(LJ_SIZE))\n\n User.objects.create_superuser(username=ADMIN_ACCOUNT_NAME, password=ADMIN_ACCOUNT_PASSWORD, email='')\n self.print_ok()\n\n return self.admin_user_exists()", "title": "" }, { "docid": "c31b5f4aaf8f01d6b68ec8cb7ed7a6b5", "score": "0.72391707", "text": "def on_start(self):\n admin_user = os.environ['ADMIN_USER']\n admin_password = os.environ['ADMIN_PASSWORD']\n admin_domain_name = os.environ['ADMIN_DOMAIN_NAME']\n admin_project_id = os.environ['ADMIN_PROJECT_ID']\n HEADERS['X-Auth-Token'] = self._get_token(admin_user,\n admin_password,\n admin_domain_name,\n project_id=admin_project_id)\n # Create test user\n self.username = 'test_user'\n self.password = 'Password1'\n self.user_domain_id = 'default'\n self.user_domain_name = 'Default'\n self.project_id = self._create_project()['project']['id']\n self._create_user(self.username, self.password, self.user_domain_id,\n self.project_id)", "title": "" }, { "docid": "a22f1e20ff871bd71e6c9c3df13e13b4", "score": "0.7238884", "text": "def create_admin():\n db.session.add(User(\n email=\"[email protected]\",\n password=\"admin\",\n admin=True,\n confirmed=True)\n )\n db.session.commit()", "title": "" }, { "docid": "f921ae37445f73d0c5731c11fe342950", "score": "0.7204094", "text": "def create_admin():\n db.session.add(User(email='[email protected]', password='admin', admin=True))\n db.session.commit()", "title": "" }, { "docid": "f921ae37445f73d0c5731c11fe342950", "score": "0.7204094", "text": "def create_admin():\n db.session.add(User(email='[email protected]', password='admin', admin=True))\n db.session.commit()", "title": "" }, { "docid": "62b0b162ef1764dbef4434775ef9c396", "score": "0.7170599", "text": "def _ensure_initial_admin(config):\n if get_api_version() > 2:\n manager = get_manager()\n default_domain_id = create_or_show_domain(DEFAULT_DOMAIN)\n leader_set({'default_domain_id': default_domain_id})\n admin_domain_id = create_or_show_domain(ADMIN_DOMAIN)\n leader_set({'admin_domain_id': admin_domain_id})\n create_or_show_domain(SERVICE_DOMAIN)\n create_tenant(\"admin\", ADMIN_DOMAIN)\n create_tenant(config(\"service-tenant\"), SERVICE_DOMAIN)\n leader_set({'service_tenant_id': manager.resolve_tenant_id(\n config(\"service-tenant\"),\n domain=SERVICE_DOMAIN)})\n create_role('service')\n create_tenant(\"admin\", DEFAULT_DOMAIN)\n create_tenant(config(\"service-tenant\"), DEFAULT_DOMAIN)\n # User is managed by ldap backend when using ldap identity\n if not (config('identity-backend') ==\n 'ldap' and config('ldap-readonly')):\n\n admin_username = config('admin-user')\n if get_api_version() > 2:\n passwd = create_user_credentials(admin_username,\n get_admin_passwd,\n set_admin_passwd,\n domain=ADMIN_DOMAIN)\n if passwd:\n create_role('Member')\n # Grant 'Member' role to user ADMIN_DOMAIN/admin-user in\n # project ADMIN_DOMAIN/'admin'\n # ADMIN_DOMAIN\n grant_role(admin_username, 'Member', tenant='admin',\n user_domain=ADMIN_DOMAIN,\n project_domain=ADMIN_DOMAIN)\n create_role(config('admin-role'))\n # Grant admin-role to user ADMIN_DOMAIN/admin-user in\n # project ADMIN_DOMAIN/admin\n grant_role(admin_username, config('admin-role'),\n tenant='admin', user_domain=ADMIN_DOMAIN,\n project_domain=ADMIN_DOMAIN)\n # Grant domain level admin-role to ADMIN_DOMAIN/admin-user\n grant_role(admin_username, config('admin-role'),\n domain=ADMIN_DOMAIN, user_domain=ADMIN_DOMAIN)\n else:\n create_user_credentials(admin_username, get_admin_passwd,\n set_admin_passwd, tenant='admin',\n new_roles=[config('admin-role')])\n\n create_service_entry(\"keystone\", \"identity\",\n \"Keystone Identity Service\")\n\n for region in config('region').split():\n create_keystone_endpoint(public_ip=resolve_address(PUBLIC),\n service_port=config(\"service-port\"),\n internal_ip=resolve_address(INTERNAL),\n admin_ip=resolve_address(ADMIN),\n auth_port=config(\"admin-port\"),\n region=region)", "title": "" }, { "docid": "52098a6319c31992a1838cefcbc4e5fc", "score": "0.7109386", "text": "def create_admin():\n logger.info('Creating admin user')\n admin_email = CONFIG_BROKER['admin_email']\n admin_pass = CONFIG_BROKER['admin_password']\n with create_app().app_context():\n sess = GlobalDB.db().session\n user = sess.query(User).filter(User.email == admin_email).one_or_none()\n if not user:\n # once the rest of the setup scripts are updated to use\n # GlobalDB instead of databaseSession, move the app_context\n # creation up to initialize()\n user = create_user_with_password(admin_email, admin_pass, Bcrypt(), website_admin=True)\n return user", "title": "" }, { "docid": "25b4185933d35e70785cfcd51f812b1a", "score": "0.7086815", "text": "def setup_user(self):\r\n self.email = '[email protected]'\r\n self.password = 'bar'\r\n self.username = 'test'\r\n self.create_account(self.username,\r\n self.email, self.password)\r\n self.activate_user(self.email)\r\n self.login(self.email, self.password)", "title": "" }, { "docid": "67cd2e8ce0f1622973ed9f4d108c835c", "score": "0.70381325", "text": "def create_admin():\n click.echo(db)\n # db.session.add(User(\n # email=\"[email protected]\",\n # password=\"admin\",\n # username=\"admin\"\n # )\n # db.session.commit()", "title": "" }, { "docid": "c66d76732c1d1111325ce48264480922", "score": "0.70319176", "text": "def do_admin_login():\n user_requested = request.form['email'].lower()\n password_requested = request.form['password']\n\n target_user = User.query.filter_by(mail=user_requested).first()\n if target_user is None:\n return Response(render_template('admin/login.html',\n message=\"Unknown Credentials\"))\n\n if not target_user.check_password(password_requested):\n return Response(render_template('admin/login.html',\n message=\"Unknown Credentials\"))\n\n if not target_user.state == StateType.ACTIVE:\n return Response(render_template('admin/login.html',\n message=\"User account deactivated. Cannot login.\"))\n\n resp = Response(render_template('admin/admin.html', user=target_user.name,\n message=\"Login succeeded\"))\n set_access_cookies(resp, create_access_token(identity=target_user.id))\n return resp", "title": "" }, { "docid": "a8216f3418fe853d840cc2aa00c7856b", "score": "0.70099276", "text": "def default_admin_setup(self, *args):\n name = args[0]\n email_address = args[1]\n password = args[2]\n account_type = args[3]\n created_on = args[4]\n last_modified = args[5]\n select_users = \"SELECT * FROM users;\"\n self.cursor.execute(select_users)\n an_admin = self.cursor.fetchall()\n if not an_admin:\n insert_user = \"INSERT INTO users(name, email_address, password, account_type, created_on, last_modified) \" \\\n \"VALUES('{}', '{}', '{}', '{}', '{}', '{}');\"\\\n .format(name, email_address, password, account_type, created_on, last_modified)\n self.cursor.execute(insert_user, (name, email_address, password, account_type, created_on, last_modified))\n self.connection.commit()", "title": "" }, { "docid": "fd114c9ab96c8f3a09666e5689aa50c1", "score": "0.69622064", "text": "def createAdmin():\n select_user_by_email = \"\"\"\n SELECT id, username, password, email FROM users\n WHERE users.email = '{}'\"\"\".format(\"[email protected]\")\n\n isUserPresent = select_data_from_db(select_user_by_email)\n if not isUserPresent:\n conn, cursor = connect_to_db()\n password = generate_password_hash('BootcampWeek1')\n create_admin_if_not_present = \"\"\"\n INSERT INTO users(username, firstname, lastname, othername ,\n phone, email, password, passportUrl , isPolitician ,isAdmin)\n VALUES(\n '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}'\n )\"\"\".format('OriginalAdmin',\n 'FirstAdminName', 'LastAdminName',\n 'OtherAdminName', '0742546892',\n '[email protected]', password, \"\",\n False, True)\n cursor.execute(create_admin_if_not_present)\n conn.commit()\n conn.close()", "title": "" }, { "docid": "ca0ab5982a027c41256c5c0fed7f35c8", "score": "0.6933704", "text": "def make_admin(self):\n user_datastore = SQLAlchemyUserDatastore(db, User, Role)\n user_datastore.add_role_to_user(self, 'admin')\n db.session.commit()", "title": "" }, { "docid": "80df98f8dbad1405fe04df70ab03ae8f", "score": "0.68916017", "text": "def add_admin():\n email = Config.SITE_ADMIN\n password = input('Enter Admin Password >>> ')\n name = input('Enter Display Name >>> ')\n\n user = User(email, password, name)\n user.confirmed = True\n db.session.add(user)\n db.session.commit()\n print(\"%s has been added to the system as Admin\" % user.username)", "title": "" }, { "docid": "7556be390453ddb4b30c2171a25e7eff", "score": "0.6824586", "text": "def admin_login():\n account = request.json['account']\n password = request.json['password']\n u = user.User.query.filter(user.User.account == account).first()\n if not u:\n abort(404)\n if u.password == password and u.role == 'admin':\n if u.token is None:\n u.generate_token()\n db.session.merge(u)\n db.session.commit()\n return jsonify(u.to_dict())\n else:\n abort(500)", "title": "" }, { "docid": "3aa9f743ba93f04948f1781543aba5ea", "score": "0.6804341", "text": "def add_admin():\n admin_role = Role.query.filter_by(permissions=0xFF).first()\n admin = User.query.filter_by(email=current_app.config['PILI_ADMIN']).first()\n if not admin:\n admin_user = User(\n email=current_app.config['PILI_ADMIN'],\n username=current_app.config['PILI_ADMIN_NAME'],\n password=generate_password(10),\n role=admin_role,\n confirmed=True,\n )\n db.session.add(admin_user)\n db.session.commit()", "title": "" }, { "docid": "c477a53429edbcbe11a3b850e84a3f25", "score": "0.67809033", "text": "def admin_actions():\n\n create_default_admin()\n return response('Admin account has been created', 201)", "title": "" }, { "docid": "a737597cb4fada1168529b018bdbd145", "score": "0.674952", "text": "def make_user_admin(connection,user):\r\n with connection:\r\n connection.execute(MAKE_USER_ADMIN,(user,))", "title": "" }, { "docid": "1af781336dfe921f92190691c30c37c0", "score": "0.67482233", "text": "def create_admin_user(password, force):\n admin_user = User.query.filter_by(name='admin').first()\n admin_role = Role.query.filter_by(name='admin').first()\n if admin_user and not force:\n print_notice('Admin user already found, exiting. '\n 'Use \"--force\" to force recreation.')\n sys.exit(0)\n if admin_role and not force:\n print_notice('Admin role already found, exiting. '\n 'Use \"--force\" to force recreation.')\n sys.exit(0)\n if admin_user:\n print_notice('Admin user already found, deleting.')\n admin_user.delete()\n if admin_role:\n print_notice('Admin role already found, deleting.')\n admin_role.delete()\n admin_user = User.create(\n 'admin', password=password, full_name='Default admin user')\n admin_role = Role.create('admin', description='Default admin role')\n for permission in get_valid_permissions():\n admin_role.add_permission(permission)\n admin_role.add_user(admin_user)\n admin_role.save()\n admin_user = User.query.filter_by(name='admin').first()\n admin_role = Role.query.filter_by(name='admin').first()\n print_notice('Admin user created with the specified password:')\n print(UserSchema().dumps(admin_user, indent=4))\n print_notice('Admin role created:')\n print(RoleSchema().dumps(admin_role, indent=4))", "title": "" }, { "docid": "ede7a92247a1e6372c884fc61abfd68e", "score": "0.6707552", "text": "def add_users():\n try:\n User.objects.get(username='admin').delete()\n except User.DoesNotExist:\n pass\n User.objects.create_superuser(username='admin', password='admin', email='')\n print('> Superuser was created')\n\n try:\n User.objects.get(username='user1').delete()\n except User.DoesNotExist:\n pass\n User.objects.create_user(username='user1', password='user1', email='')\n print('> User (user1) was created')", "title": "" }, { "docid": "1a61e931a0f82eb828df2727054d81d6", "score": "0.66937685", "text": "def handle(self, *args, **options):\r\n username = 'populate_creators_command'\r\n email = '[email protected]'\r\n try:\r\n admin = User.objects.create_user(username, email, 'foo')\r\n admin.is_staff = True\r\n admin.save()\r\n except IntegrityError:\r\n # If the script did not complete the last time it was run,\r\n # the admin user will already exist.\r\n admin = User.objects.get(username=username, email=email)\r\n\r\n for user in get_users_with_role(CourseInstructorRole.ROLE):\r\n add_user_with_status_granted(admin, user)\r\n\r\n # Some users will be both staff and instructors. Those folks have been\r\n # added with status granted above, and add_user_with_status_unrequested\r\n # will not try to add them again if they already exist in the course creator database.\r\n for user in get_users_with_role(CourseStaffRole.ROLE):\r\n add_user_with_status_unrequested(user)\r\n\r\n # There could be users who are not in either staff or instructor (they've\r\n # never actually done anything in Studio). I plan to add those as unrequested\r\n # when they first go to their dashboard.\r\n\r\n admin.delete()", "title": "" }, { "docid": "bc740954118ef57a09c6f670d89da4c9", "score": "0.6689055", "text": "def process_admin_login():\n\n entered_email = request.form.get(\"email\")\n entered_password = request.form.get(\"password\")\n admin = c.get_admin(entered_email, entered_password)\n\n if admin is False:\n flash('Invalid credentials. Please click on sign up to create an account!')\n return redirect('/')\n session['current_admin'] = entered_email\n ad_id = admin.admin_id\n flash('Logged in as %s' % entered_email)\n if admin.rescue_id is None:\n return redirect('/admin' + '/' + str(ad_id) + '/rescue-info')\n else:\n return redirect('/admin' + '/' + str(ad_id))", "title": "" }, { "docid": "9d7bb7337fd6c9e6e5a0728163f682c7", "score": "0.6682012", "text": "def setUp(self):\n self.login(self.create_superuser())", "title": "" }, { "docid": "82e9d56534d905a386c5dd38c70a9315", "score": "0.6678624", "text": "def init(username, password):\r\n from flaskblog.models import User, Tag, Category, Article\r\n click.echo('Initializing the database...')\r\n db.create_all()\r\n\r\n user = User.query.first()\r\n if user is not None:\r\n click.echo('The administrator already exists, updating...')\r\n user.username = username\r\n user.password(password)\r\n else:\r\n click.echo('Creating the temporary administrator account...')\r\n # user = User(username=username,email\r\n #\r\n # )\r\n # admin.set_password(password)\r\n # db.session.add(admin)\r", "title": "" }, { "docid": "848f3080cfdc070d0ed3e0f7d60220ed", "score": "0.66704994", "text": "def login_as_admin(self, username='admin', password='admin'):\n return self.login(**{'username': username, 'password': password})", "title": "" }, { "docid": "c449138dcc949ccc28d2ca6dfcb9bcba", "score": "0.6648066", "text": "def create_admin(\n name: str,\n email: str,\n password: str\n) -> None:\n db: Session = SessionLocal()\n\n hashed_password = get_password_hash(password)\n db_admin = models.Admin(\n name=name,\n email=email,\n password=hashed_password\n )\n db.add(db_admin)\n db.commit()\n db.close()\n print('Admin criado com sucesso!')", "title": "" }, { "docid": "f770b543f5db6c85efb1905383ae89ed", "score": "0.6634512", "text": "def setup_general():\n Role.insert_roles()\n admin_query = Role.query.filter_by(name='Administrator')\n if admin_query.first() is not None:\n if Employee.query.filter_by(email=Config.ADMIN_EMAIL).first() is None:\n user = Employee(first_name='Admin',\n last_name='Account',\n password=Config.ADMIN_PASSWORD,\n email=Config.ADMIN_EMAIL)\n db.session.add(user)\n db.session.commit()\n print('Added administrator {}'.format(user.full_name()))", "title": "" }, { "docid": "8c246669710bad7975d482dbc0e4c816", "score": "0.66311353", "text": "def setUp(self):\n self.superuser = User.objects.create_superuser(\n 'admin',\n '[email protected]',\n 'StrongPassword123'\n )\n self.client.login(\n username='admin',\n password='StrongPassword123'\n )", "title": "" }, { "docid": "8c246669710bad7975d482dbc0e4c816", "score": "0.66311353", "text": "def setUp(self):\n self.superuser = User.objects.create_superuser(\n 'admin',\n '[email protected]',\n 'StrongPassword123'\n )\n self.client.login(\n username='admin',\n password='StrongPassword123'\n )", "title": "" }, { "docid": "e8b9f965cbb0cff11bddecfbb730fb26", "score": "0.66243565", "text": "def setUp(self):\n account_models.User.objects.create_user(email='[email protected]', password='WhoAmI', username='aov1')", "title": "" }, { "docid": "0acf8cf9949f96855fd7954390b198ae", "score": "0.6606304", "text": "def create_admin_user(username, password, email):\n admin_group = Group.query.filter_by(admin=True).first()\n user = User()\n\n user.username = username\n user.password = password\n user.email = email\n user.primary_group_id = admin_group.id\n user.activated = True\n\n user.save()\n return user", "title": "" }, { "docid": "113f21867991a9dc4da6836b28dbb151", "score": "0.6603835", "text": "def create_user_as_admin(self, *args, **kwargs):\n profile = self.create_user(*args, **kwargs)\n profile.make_administrator()\n return profile", "title": "" }, { "docid": "c489b833c6cfb32b31d46503ffda4439", "score": "0.65868986", "text": "def test_0000_initiate_users(self):\n self.login(email=common.test_user_1_email, username=common.test_user_1_name)\n self.login(email=common.admin_email, username=common.admin_username)\n self.galaxy_login(email=common.admin_email, username=common.admin_username)", "title": "" }, { "docid": "a8f9b54320826b2c8c64345563bd0a72", "score": "0.65673983", "text": "def create_user(self):\n User.objects.create_user('test', '[email protected]', 'testing')", "title": "" }, { "docid": "29ef2e0c1b9610d2ae523d01e8ba89d1", "score": "0.6545341", "text": "def do_admin_login():\n if request.form['password'] == 'admin' and request.form['username'] == 'admin':\n teams = get_team()\n if teams:\n return render_template('team-players.html', teams=teams)\n else:\n return render_template('team-players.html')\n else:\n flash('Invalid username or password. Please try again!')\n return render_template('login.html')", "title": "" }, { "docid": "477c2220745938437e4ccb2c88587152", "score": "0.6528603", "text": "def setup_user(self, admin=False, enroll=False, login=False):\n self.user = AdminFactory() if admin else UserFactory()\n\n if enroll:\n CourseEnrollmentFactory(user=self.user, course_id=self.course.id)\n\n if login:\n self.login()", "title": "" }, { "docid": "52bb844dfd0180b34799ae47dd5d9b7c", "score": "0.65275466", "text": "def setUp(self):\n a, b, c = (\n User.objects.create_user(guy, email=\"%[email protected]\" % guy, password=guy)\n for guy in \"abc\"\n )\n a.is_superuser = True\n a.save()", "title": "" }, { "docid": "76aa5210ebb8164f6e08b5baa3f14f47", "score": "0.65099925", "text": "async def test_auth_admin_non_admin(app):\n name = 'kiwi'\n user = add_user(app.db, app, name=name, admin=False)\n assert user.admin is False\n cookies = await app.login_user(name)\n assert user.admin is False", "title": "" }, { "docid": "126f7051e8753f3c6da3f30794b33c65", "score": "0.65058434", "text": "def _create_superuser():\n try:\n User.objects.get(is_superuser=True)\n except User.DoesNotExist:\n login = os.getenv('SUPERUSER_NAME', 'admin')\n password = os.getenv('SUPERUSER_PASSWORD', 'passw0rd')\n User.objects.create_superuser(username=login, password=password, email='')", "title": "" }, { "docid": "387f64048bfed429381bd5ae76102730", "score": "0.65045154", "text": "def step_impl(context):\n\n from django.contrib.auth.models import User\n u = User(username='test_user', email='[email protected]')\n u.set_password('admin')", "title": "" }, { "docid": "5b2d314fe785d8f1c1d8d1fe7fb746fa", "score": "0.6488751", "text": "def add_admin_user(firstname, lastname, email, password, pseudo):\n\n async def create_user(firstname: str, lastname: str, email: str, password: str, pseudo: str) -> None:\n await Tortoise.init(config=TORTOISE_ORM)\n user = User(firstname=firstname, lastname=lastname, email=email, pseudo=pseudo, is_admin=True)\n user.set_password(password)\n try:\n await user.save()\n except IntegrityError as e:\n click.secho(f'Unable to create user, reason: {e}', fg='red')\n raise click.Abort()\n await Tortoise.close_connections()\n\n anyio.run(create_user, firstname, lastname, email, password, pseudo)\n click.secho(f'admin user {pseudo} created!', fg='green')", "title": "" }, { "docid": "17d307e2bcbedcb34709252a36e407d2", "score": "0.64754814", "text": "async def test_auth_admin_is_admin(app):\n # Admin user defined in MockPAMAuthenticator.\n name = 'admin'\n user = add_user(app.db, app, name=name, admin=False)\n assert user.admin is False\n cookies = await app.login_user(name)\n assert user.admin is True", "title": "" }, { "docid": "5f3075f59c4e4478052fd021615c6e13", "score": "0.6462568", "text": "def test_admin_user_login(self):\n self.login(\"admin\", \"admin\")\n self.should_see(\"This is your profile, admin.\")", "title": "" }, { "docid": "189b6593909e1533d60c5abaec468dee", "score": "0.64383906", "text": "def setUp(self):\n\n # Create client\n self.client = Client()\n\n # Create the admin user\n self.admin_user = get_user_model().objects.create_superuser(\n email='[email protected]',\n password='adminTesting123'\n )\n\n # Login the admin user\n self.client.force_login(self.admin_user)\n\n # Create the reqular user\n self.user = get_user_model().objects.create_user(\n email='[email protected]',\n password='userTesting123',\n name='Test user full name'\n )", "title": "" }, { "docid": "76c8da07cd7844eb2175862c45cd4408", "score": "0.6428133", "text": "def setUp(self):\n self.superuser = User.objects.create_superuser(\n 'admin',\n '[email protected]',\n 'StrongPassword123'\n )", "title": "" }, { "docid": "694584d2d254759e7231a7ae5f1658b5", "score": "0.64207995", "text": "def check_login(self):\n admin_exists = self.set_auth_header()\n\n if self.default_login_works:\n self.log.info(\"default login worked, removing it\")\n if admin_exists:\n self.log.info(\"admin user exists, only deleting default user\")\n else:\n # Since the admin user doesn't exist but we were able to authenticate\n # using the default login request an authentication token and\n # explicitly set the object's auth_header to it.\n self.auth_header = self.default_login_auth_header\n\n self.log.info(\"admin user doesn't exist, creating it before deleting default user\")\n self.create_user(self.login, self.password, self.description)\n self.add_user_to_group(self.login, 'superusers')\n\n self.delete_user(self.default_login['login'])\n else:\n if not admin_exists:\n self.log.error(\"default user doesn't exist but admin user doesn't work either - manual intervention required\")\n else:\n self.log.info(\"default user doesn't exist and admin user works - everything looking good\")", "title": "" }, { "docid": "6c562a9587d17dc470d370029e65acd3", "score": "0.63889766", "text": "def test_admin_create_user(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Summer Love has been registered')\n self.assertEqual(resp.status_code, 201)", "title": "" }, { "docid": "2177817c9a4d367952458bc2ce8c696d", "score": "0.6385541", "text": "def create_superuser(self, email, role, teams, password):\n user = self.create_user(\n email,\n password=password,\n )\n\n for team in teams:\n al = AccessEntry(email=email, team=team)\n al.save()\n\n\n user.is_admin = True\n user.save(using=self._db)\n return user", "title": "" }, { "docid": "1cf59b4b30e96b57efb7c3df8f90f442", "score": "0.63726145", "text": "def login_as_admin():\n users.loginAsUser(\n config.VDC_ADMIN_USER, config.VDC_ADMIN_DOMAIN,\n config.VDC_PASSWORD, filter=False\n )\n return True", "title": "" }, { "docid": "3041332f62cf0ed35fd8d142ade065fd", "score": "0.63636875", "text": "def createsuperuser(request):\r\n\r\n user = models.User()\r\n user.username = 'admin' # change later\r\n user.email = '[email protected]'\r\n user.set_password(\"qazwsxed\")\r\n user.is_staff = True\r\n user.is_superuser = True\r\n\r\n if models.User.objects.filter(username=user.username).exists():\r\n return redirect('/')\r\n else:\r\n user.save()\r\n return redirect('/console')", "title": "" }, { "docid": "1688b6df491232a9aee1e52447aa4084", "score": "0.6357277", "text": "def tenant_user_admin(db) -> TenantUser:\n with schema_context('public'):\n return TenantUser.objects.create_superuser(\n _USER_PASS,\n email='[email protected]',\n )", "title": "" }, { "docid": "384bd658db2ea4a7aa25172ceffcda02", "score": "0.6355942", "text": "def setUpTestUsers(self) -> None:\n self.password = \"thisisasecret\"\n self.other = get_user_model().objects.create_user(\"other\", password=self.password)\n self.user = get_user_model().objects.create_user(\"user\", password=self.password)\n self.admin = get_user_model().objects.create_superuser(\"admin\", password=self.password)\n self.anonymous = AnonymousUser()", "title": "" }, { "docid": "460c2c6a46ae8196f9c0a6e7444ab907", "score": "0.63428617", "text": "def test_0000_initiate_users( self ):\n self.login( email=common.test_user_1_email, username=common.test_user_1_name )\n test_user_1 = self.test_db_util.get_user( common.test_user_1_email )\n assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_1_email\n self.test_db_util.get_private_role( test_user_1 )\n self.login( email=common.admin_email, username=common.admin_username )\n admin_user = self.test_db_util.get_user( common.admin_email )\n assert admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email\n self.test_db_util.get_private_role( admin_user )", "title": "" }, { "docid": "9323a52ec0a9b3a0af9610508a149fa3", "score": "0.6334718", "text": "def get(self):\n create_all()\n \n admin = create_user( username = 'admin', \n email = '[email protected]',\n password = 'adminpass'\n )\n guest = create_user( username = 'guest', \n email = '[email protected]',\n password = 'guestpass'\n )\n commit(self.db, [admin, guest])\n self.render_template(\"dbadmin.html\", args={'users':[], 'operation':\"Create Database\"})", "title": "" }, { "docid": "c044531c0eb6d721c20cd1ef19a1aad2", "score": "0.63241065", "text": "def admin_required(handler):\n def admin_login(self, *args, **kwargs):\n auth = self.auth\n if not auth.get_user_by_session():\n self.redirect('/auth/login', abort=True)\n \n user = auth.get_user_by_session()\n queried_entity = User.get_by_id(user['user_id'])\n \n if queried_entity and queried_entity.phb_user_admin_status == 'admin-1':\n return handler(self, *args, **kwargs)\n else:\n self.redirect('/', abort = True)\n \n return admin_login", "title": "" }, { "docid": "5b4d4065b3713944579638b8576c85d9", "score": "0.63127476", "text": "def createsuperuser():\n\n email = prompt('User E-Mail')\n email_confirm = prompt('Confirm E-Mail')\n\n if not email == email_confirm:\n sys.exit('\\nCould not create user: E-Mail did not match')\n\n if not EMAIL_REGEX.match(email):\n sys.exit('\\nCould not create user: Invalid E-Mail addresss')\n\n password = prompt_pass('User password')\n password_confirm = prompt_pass('Confirmed password')\n\n if not password == password_confirm:\n sys.exit('\\nCould not create user: Passwords did not match')\n\n datastore = SQLAlchemyUserDatastore(db, User, Role)\n datastore.create_user(\n email=email,\n password=encrypt_password(password),\n active=True,\n super_user=True)\n\n db.session.commit()", "title": "" }, { "docid": "dbba70d0158a784396d36d2a15d5e52e", "score": "0.6297302", "text": "def create_user(ctx, db_username, db_password, project_name):\n project = ctx.obj.groups.byName[project_name].get().data\n user = cmd.ensure_admin_user(\n client=ctx.obj, project_id=project.id, username=db_username,\n password=db_password)\n pprint(user)", "title": "" }, { "docid": "9be7e60420f8d05a673b80717e5abb73", "score": "0.6297232", "text": "def admin():\n aaa.require(role='admin', fail_redirect='/sorry_page')\n return dict(\n current_user=aaa.current_user,\n users=aaa.list_users(),\n roles=aaa.list_roles()\n )", "title": "" }, { "docid": "60e998e212bd202101d1109446012025", "score": "0.62733936", "text": "def test_create_user(self):\n #open the django admin page.\n self.selenium.get(\n '%s%s' % (self.live_server_url, \"/admin\")\n )\n\n #fill in login information of admin\n username = self.selenium.find_element_by_id(\"id_username\")\n username.send_keys(\"admin\")\n password = self.selenium.find_element_by_id(\"id_password\")\n password.send_keys(\"admin\")\n\n #locate login button and click it.\n self.selenium.find_element_by_xpath('//input[@value=\"Inloggen\"]').click()\n self.selenium.get(\n '%s%s' % (self.live_server_url, \"/admin/auth/user/add/\")\n )\n\n # Fill the create user form with username and password\n self.selenium.find_element_by_id(\"id_username\").send_keys(\"test\")\n self.selenium.find_element_by_id(\"id_password1\").send_keys(\"test1234\")\n self.selenium.find_element_by_id(\"id_password2\").send_keys(\"test1234\")\n\n # Forms can be submitted directly by calling its method submit\n self.selenium.find_element_by_id(\"user_form\").submit()\n self.assertIn(\"Change user\", self.selenium.title)", "title": "" }, { "docid": "615d6f8963c61d5d00fa90970e0872c6", "score": "0.6271346", "text": "def administrator():\n\n administrator = Administrator.objects.create(name='Michał', surname='Paluch',\n login='Udfsr43', password='Password_3',\n password_repeat='Password_3')\n return administrator", "title": "" }, { "docid": "0780cc7f20fe0128027a7fe1de0a3d8f", "score": "0.6255125", "text": "def post(self):\n DA = DataAccessor()\n\n session = getSessionByRequest(self)\n\n uid = self.request.get('uid')\n pw = self.request.get('pw')\n pw2 = self.request.get('pw2')\n\n if pw != pw2:\n setSessionMessage(session, \"Your new passwords did not match. Please try again.\", True)\n self.redirect('/admin')\n return\n\n try:\n DA.addAdmin(uid, pw)\n except Usage:\n setSessionMessage(session, \"A user with that uid exists already\", True)\n self.redirect('/admin')\n return\n \n setSessionMessage(session, \"Admin: \" + uid + \" successfully added.\", False)\n self.redirect('/admin')", "title": "" }, { "docid": "f0e30cb48ee84ed67cc515e8dc0946c3", "score": "0.62493145", "text": "def populate_user_data():\n try:\n db = mongo_client.MongoClient(config.MONGO_URI).twitter\n db.user.insert_one(\n {\n 'username': 'admin',\n 'password': 'admin',\n }\n )\n print(\"Created an admin account\")\n except Exception as e:\n print(e)", "title": "" }, { "docid": "c811e6c4eae1198a8ea53d0192e02766", "score": "0.6237831", "text": "def admin_con():\n user = users.get_current_user()\n if user:\n if users.is_current_user_admin() or is_local_admin():\n admins_query = Admins.query(ancestor = admin_base).order(-Admins.date)\n admins = admins_query.fetch()\n output = template('admin', name=g_name, log_in_out = users.create_logout_url('/'), opt = 'Выход', user = user.nickname(), admins = admins)\n return output\n else:\n redirect('/')\n else:\n redirect('/')", "title": "" }, { "docid": "318b48de3ebf45149e81a166af81a172", "score": "0.6237285", "text": "def bootstrap():\n db.create_all()\n os.environ['ADMIN_EMAIL'] = '[email protected]'\n os.environ['ADMIN_PASSWORD'] = '1111'\n admin = User(email='[email protected]', username='Maxim', password='1111')\n admin.gravatar()\n db.session.commit()\n db.session.add(admin)\n User._bootstrap()\n Interest._bootstrap()", "title": "" }, { "docid": "54bd3e05ca468c2eff4f722040a51d60", "score": "0.62359023", "text": "def create_admin_tenant(tenant, user_id, password, url):\n user = get_user_model().objects.get(pk=user_id)\n tenant = Tenant(schema_name=tenant)\n\n # Send email of welcome\n send_mailgun(\"Bienvenido a SCR\", user.email, url)\n\n with tenant_context(tenant):\n get_user_model().objects.create_superuser(email=user.email, password=password, first_name=user.first_name, last_name=user.last_name)", "title": "" }, { "docid": "b1b0c35c06585d2458e76a0325f9d2fe", "score": "0.6232685", "text": "def initDb():\n createDb()\n admin = User(\n name=\"faby\",\n lastname=\"star\",\n username=\"faby\",\n email=\"[email protected]\",\n isAdmin=True,\n cellphone=\"0983856136\",\n )\n admin.onSetPassord(\"faby123\")\n db.session.add(admin)\n db.session.commit()", "title": "" }, { "docid": "57420e275b04372027fb1df6d66eabc6", "score": "0.62187886", "text": "def test_create_admin():\n os.environ[\"ADMIN_EMAIL\"] = \"[email protected]\"\n os.environ[\"ADMIN_PASSWORD\"] = \"password\"\n\n output = io.StringIO()\n call_command(\"createadmin\", stdout=output)\n\n assert get_user_model().objects.count() == 1\n\n admin = get_user_model().objects.get()\n\n assert admin.check_password(os.environ[\"ADMIN_PASSWORD\"])\n assert admin.first_name == \"Admin\"\n assert admin.last_name == \"User\"\n assert admin.is_staff\n assert admin.is_superuser\n assert admin.email_addresses.count() == 1\n\n email = admin.email_addresses.get()\n\n assert email.email == os.environ[\"ADMIN_EMAIL\"]\n assert email.is_primary\n assert email.is_verified", "title": "" }, { "docid": "ae97cd903765e50d87be4ea6851cd84b", "score": "0.6218735", "text": "def after_db_init():\n with app_instance.app_context():\n # Creates any models that have been imported\n db.create_all()\n\n # Init security for the application\n from .security import user_datastore\n\n # Create the Admin user\n if not UserModel.find(1):\n user_datastore.create_role(name='_permissions | admin')\n user_datastore.create_role(name='_permissions | manager')\n user_datastore.create_role(name='_permissions | agent')\n user_datastore.create_user(\n username='admin',\n email='[email protected]',\n password='password',\n first_name='Super',\n last_name='Admin',\n roles=['_permissions | admin']\n )\n db.session.commit()\n\n # Register the admin views to the extension\n admin.add_view(\n UsersView(\n UserModel, db.session, name='Manage Users', category='User Admin'\n )\n )\n admin.add_view(RolesView(RolesModel, db.session, name='Manage Privileges', category='User Admin'))", "title": "" }, { "docid": "a3e320e888afa4ec137eb0bf4dd62ff3", "score": "0.6212713", "text": "def configure_admin_user(session, account_id, admin_role, in_use):\n sys.stderr.write(\"Creating IAM client...\" + \"\\n\")\n iam = session.client(\"iam\")\n sys.stderr.write(\n \"Creating managed policy for protecting organization assets...\" + \"\\n\")\n iam.create_policy(\n PolicyName=AWS_IAM_PROTECTION_POLICY_NAME,\n Description=(\n \"Provides default-deny control over the Organization roles and resources that \"\n \"cannot be controlled through organization SCPs.\"),\n PolicyDocument=\"\"\"{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Sid\": \"Stmt1500485872000\",\n \"Effect\": \"Deny\",\n \"Action\": [\n \"iam:*\"\n ],\n \"Resource\": [\n \"arn:aws:iam::%s:role/%s\",\n \"arn:aws:iam::%s:role/%s\"\n ]\n }\n ]\n }\n \"\"\" % (account_id, admin_role, account_id, AWS_CLOUDTRAIL_ROLE_NAME))\n\n sys.stderr.write(\"Creating user...\" + \"\\n\")\n iam.create_user(UserName=AWS_IAM_USER_NAME)\n sys.stderr.write(\"Attached AWS managed AdministratorAccess policy...\" +\n \"\\n\")\n iam.attach_user_policy(\n UserName=AWS_IAM_USER_NAME,\n PolicyArn=\"arn:aws:iam::aws:policy/AdministratorAccess\")\n iam.attach_user_policy(\n UserName=AWS_IAM_USER_NAME,\n PolicyArn=\"arn:aws:iam::%s:policy/%s\" %\n (account_id, AWS_IAM_PROTECTION_POLICY_NAME))\n sys.stderr.write(\"IAM user created and policies attached.\" + \"\\n\")\n\n password = base64.b64encode(os.urandom(32))\n iam.create_login_profile(\n UserName=AWS_IAM_USER_NAME,\n Password=password,\n PasswordResetRequired=True)\n sys.stderr.write(\"IAM user (%s) password changed to: %s\" % (\n AWS_IAM_USER_NAME, password) + \"\\n\")\n return password", "title": "" }, { "docid": "9db447c18c2be916faa38bfd1ecde2cb", "score": "0.62043613", "text": "def login_permitted_user(self):\n self.grant_permission()\n self.client.login(username=\"john\", password=\"pass\")", "title": "" }, { "docid": "a5d6eff113b6f6bf25ab8f658276a751", "score": "0.6197827", "text": "def authAdmin(self, email='[email protected]'):\n admin = self._createUser(email=email, role=UserType.ADMIN)\n return admin, self._authenticate(admin)", "title": "" }, { "docid": "986c9448da14a89a0acfcc52ab7d5136", "score": "0.61894035", "text": "def create_user(user_name, password, tenant_name, auth_admin_url, admin_token):\n keystone = get_client(auth_admin_url, admin_token)\n tenants = keystone.tenants.list()\n my_tenant = [x for x in tenants if x.name==tenant_name][0]\n my_user = keystone.users.create(name=user_name, password=password, tenant_id=my_tenant.id)\n print my_user\n return my_user.to_dict()", "title": "" }, { "docid": "2018e33d48aa379c04a16e6be3e04536", "score": "0.61777014", "text": "def add_admin(self, username, password):\n password_hash = generate_password_hash(password) # Generates a SHA256 hash.\n try:\n self.cur.execute(\"INSERT INTO admins VALUES(\\\"{}\\\", \\\"{}\\\")\".format(username, password_hash))\n self.db.commit()\n except:\n self.db.rollback()", "title": "" }, { "docid": "89d31d780602987b2974ed3be86fb23c", "score": "0.61725676", "text": "def makeNewUser(self, p_loginData, p_adminPassword):\n if not self.validNewUserInput(p_loginData.username, p_loginData.password):\n return FailureCodes.INVALID_USER_INPUT\n\n p_username = p_loginData.username\n p_password = hash_password(p_loginData.password)\n p_adminPassword = hash_password(p_adminPassword)\n if self.validUser():\n return FailureCodes.MISSING_PERMISSIONS\n if not self.validNumUsers():\n return FailureCodes.TOO_MANY_USERS\n\n\n # This will enforce only Admin can create users, Currently anyone can create user\n # # If current user's role isn't admin, return \n # if self.user.getRole() != UserRole.ADMIN:\n # return FailureCodes.MISSING_PERMISSIONS\n\n # # If current user isn't admin, return \n # if self.user.getUsername() != C_ADMINISTRATOR_USERNAME:\n # return FailureCodes.MISSING_PERMISSIONS\n \n # # Verify administrator password\n # if not verify_password(self.user.getPassword(), p_adminPassword):\n # return FailureCodes.INVALID_CREDENTIALS\n\n # If user already exists, return False\n if self.dbManager.userExists(p_username):\n return FailureCodes.EXISTING_USER\n\n # Store user in database\n self.dbManager.createUser(p_username, p_password, UserRole.USER, defaultPacemakerParameterData)\n return FailureCodes.VALID", "title": "" }, { "docid": "481ca4e75c8be68d3d8036e323e1eb49", "score": "0.61712366", "text": "def test_login(self):\n self.user_api()\n self.base.metadata.create_all(self.engine)\n people = self.provision_users()\n p = {'__action': 'login', 'id': people[0].id, 'password': \"testing\"}\n self.post('user', 200, params=p)", "title": "" }, { "docid": "920270f55e272031acff9d2f2d36b3ea", "score": "0.616812", "text": "def _setup_user(self, is_staff=False, password=None):\r\n email = 'foo_{0}@test.com'.format(uuid4().hex[:8])\r\n password = password if password else 'foo'\r\n username = 'test_{0}'.format(uuid4().hex[:8])\r\n self.create_account(username, email, password)\r\n self.activate_user(email)\r\n\r\n # manually twiddle the is_staff bit, if needed\r\n if is_staff:\r\n user = User.objects.get(email=email)\r\n user.is_staff = True\r\n user.save()\r\n\r\n return email, password", "title": "" }, { "docid": "7040f5e999e7daa7a1f505c23e84d0b5", "score": "0.6162969", "text": "def test_get_create_post_as_admin_user(self):\n login = self.client.login(username='testuser_admin', password='password12345')\n\n if login:\n url = reverse('blogs:create')\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'create.html')\n self.client.logout()\n else:\n # TODO Make this dynamic rather than hard coded text string\n self.fail('Login Failed for testuser_staff')", "title": "" }, { "docid": "793eca70ff07094d7d48d7b87bbd0ba0", "score": "0.61608326", "text": "def connectToAdmin():\n connect(url='t3://' + admin_host + ':' + admin_port,\n adminServerName='AdminServer',\n username=admin_username,\n password=admin_password)", "title": "" }, { "docid": "0d8dcb58efc6346425d95732c6b29afa", "score": "0.6153886", "text": "def addAdmin(username, sshId, user, identity):\n if identity:\n env.key_filename = identity\n if user:\n env.user = user\n sudo('adduser --disabled-password --gecos \",,,\" %s' % username)\n sudo('usermod -p \"\" %s' % username)\n sudo('chage -d 0 %s' % username)\n sudo('gpasswd --add %s admin' % username)\n authorizeSshKey(username, sshId)", "title": "" }, { "docid": "baff21c672d2b50f6d441856d91719d5", "score": "0.61467886", "text": "def post(self):\n if not util.DEVT:\n abort(404) # very important - dont give users the opportunity to destroy our entire user base\n \n def delete_all():\n ndb.delete_multi(u.User .query().fetch(keys_only=True))\n ndb.delete_multi(u.AuthId.query().fetch(keys_only=True))\n \n def create_admin():\n u.User.create ( username ='admin'\n , email_ ='[email protected]' \n , pwdhash__ =pwd.encrypt('123456')\n , isAdmin_ =True\n , isVerified_=True\n , isActive_ =True\n , authIds =u.randomAuthIds()\n )\n #User.put(admin)\n\n def create_user(n):\n name = 'tutshka%d' % n \n u.User.create ( username =name\n , email_ =name+'@xyz.com'\n , pwdhash__ =pwd.encrypt('123456')\n , isAdmin_ =False\n , isVerified_=random.choice((True, False))\n , isActive_ =random.choice((True, False))\n , bio =random.choice(('All component', 'things are', 'impermanent: work', 'out your', 'own salvation', 'with diligence.'))\n , authIds =u.randomAuthIds()\n )\n #u.addRandomAuthIds()\n #User.put(usr)\n \n delete_all()\n NumUsers = 15\n for n in xrange(NumUsers):\n create_user(n)\n create_admin()\n return ok()", "title": "" }, { "docid": "13afa1f775b7cf3680d5e20e616a4b43", "score": "0.6136692", "text": "def login_user(self):\r\n self.client.login(username=self.user.username, password=\"password\")", "title": "" }, { "docid": "b7604952891f7a77cd5b363d8121944f", "score": "0.6134593", "text": "def install():\n # check if admin exists\n from enferno.user.models import Role\n a = Role.query.filter(Role.name == 'admin').first()\n\n if a is None:\n r = Role(name='admin')\n try:\n db.session.add(r)\n db.session.commit()\n u = click.prompt('Admin Email?', default='[email protected]')\n p = click.prompt('Admin Password (min 6 characters)?', default='enferno')\n user = User(email=u, password=hash_password(p), active=1)\n user.roles.append(r)\n user.save()\n except Exception as e:\n db.session.rollback()\n else:\n print('Seems like an Admin is already installed')", "title": "" }, { "docid": "5f77d161e72598672fbf83a98c8ce48e", "score": "0.6122506", "text": "def setUp(self):\n\n self.superuser = User.objects.create_superuser(\n name='Victor Arnaud',\n email='[email protected]',\n password='victorhad123456'\n )\n self.user = User.objects.create_user(\n name='Pedro Calile',\n email='[email protected]',\n password='pedro123456'\n )\n self.client.force_authenticate(self.user)\n self.url = reverse('user-change-password')", "title": "" }, { "docid": "d04b819e9d9b4da2de82e98bb20af1f5", "score": "0.61224097", "text": "def create_superuser(self, email, name, password):\n\n user = self.create_user(email, name, password)\n\n # Make this user an admin.\n user.is_superuser = True\n user.is_staff = True\n user.save(using=self._db)\n\n return user", "title": "" }, { "docid": "ad45290646b55d2c77500edd12f01b9e", "score": "0.6116917", "text": "def create_superuser(self, username, email, password):\n user = self.create_user(\n username,\n email,\n password=password,\n )\n user.admin = True\n user.staff = True\n user.save(using=self._db)\n return user", "title": "" }, { "docid": "885a5df5e36d028f521a44bafb568e4a", "score": "0.61167014", "text": "def init():\n create_user(app)\n get_all_user()", "title": "" }, { "docid": "9f4cc8edb0a7770a3ecad70b6bf889f4", "score": "0.61162215", "text": "def create_users(self):\n from django.contrib.auth.models import User\n user = User.objects.create_user('red', '', 'red')\n user = User.objects.create_user('green', '', 'green')\n user = User.objects.create_user('blue', '', 'blue')", "title": "" }, { "docid": "234ce6242ad924727db72dc956a54a5b", "score": "0.611089", "text": "def setup_test_user(self):\n self.setup_test_tenant()\n self.test_user = rand_name('test_user_')\n self.test_password = rand_name('pass_')\n self.test_email = self.test_user + '@testmail.tm'\n resp, self.user = self.client.create_user(self.test_user,\n self.test_password,\n self.tenant['id'],\n self.test_email)\n self.users.append(self.user)", "title": "" }, { "docid": "2f3a616d040564931cbb52164950d0c8", "score": "0.6097407", "text": "def create_account():\n if request.method == 'POST':\n username = request.form['username']\n password = request.form['password']\n\n user = create_user(username, password)\n\n if not user:\n return redirect(url_for('login'))\n\n session['username'] = user.username\n session['user_id'] = user.id\n session['logged_in'] = True\n session['is_admin'] = user.is_admin\n\n return redirect(url_for('index'))\n\n return render_template('createaccount.html')", "title": "" }, { "docid": "809f1440f62facbb09564ef2dd07aa59", "score": "0.60915995", "text": "def _setstaff_login(self):\r\n GlobalStaff().add_users(self.user)\r\n self.client.login(username=self.user.username, password='foo')", "title": "" }, { "docid": "5458bb43041c0915fad2e2711d510145", "score": "0.609117", "text": "def create_superuser(self, login_name, email, password):\n user = self.create_user(\n login_name,\n email,\n password=password,\n )\n user.is_admin = True\n user.save(using=self._db)\n return user", "title": "" }, { "docid": "8b21535ab52b3242fa00d34a39f09825", "score": "0.60717237", "text": "def test_09_admin_users_as_admin(self):\r\n self.register()\r\n res = self.app.get('/admin/users', follow_redirects=True)\r\n assert \"Manage Admin Users\" in res.data, res.data", "title": "" }, { "docid": "896384986e7b7dd4b87de2363d6f3485", "score": "0.6063123", "text": "def setUp(self):\n self.credentials = {\"username\": \"BobRobert\", \"password\": \"fglZfYmr%?,\"}\n User.objects.create_user(**self.credentials)", "title": "" }, { "docid": "8b99285dab6218685f617ba1f5ce4d45", "score": "0.606047", "text": "def init(username, password):\r\n click.echo('Initializing the database...')\r\n db.create_all()\r\n\r\n admin = Admin.query.first()\r\n if admin:\r\n click.echo('The adminstrator already exists, updating...')\r\n admin.username = username\r\n admin.set_password(password)\r\n else:\r\n click.echo('Creating the temporary administrator account..')\r\n admin = Admin(\r\n username=username,\r\n blog_title='Bluelog',\r\n blog_sub_title=\"No, I'm the real thing.\",\r\n name='Admin',\r\n about='Anything about you'\r\n )\r\n admin.set_password(password)\r\n db.session.add(admin)\r\n\r\n category = Category.query.first()\r\n if category is None:\r\n click.echo('Creating the default category...')\r\n category = Category(name='默认')\r\n db.session.add(category)\r\n\r\n db.session.commit()\r\n click.echo('Done.')", "title": "" } ]
903d5f542a2e807e35b3198bef1ffaa9
Handle logout of user.
[ { "docid": "f97b4f46bf2fa048a49a8aba7416e172", "score": "0.0", "text": "def logout():\n do_logout()\n flash('Success logging out', 'success')\n return redirect('/')", "title": "" } ]
[ { "docid": "5415155b12d3cc37bb140d0d9eaf3af6", "score": "0.7879216", "text": "def _user_logout(self):\n \n self._user = None", "title": "" }, { "docid": "f8fffaa154dcf2eccd4acc3d996b70a3", "score": "0.7867465", "text": "def handlelogout(request):\n logout(request)\n messages.success(request, \"Your have successfully logged out\")\n return redirect(\"idu:home\")", "title": "" }, { "docid": "15d9a3ccd8c0001815fc59fb8148d3ee", "score": "0.7859247", "text": "def logout():\n logout_user()\n return", "title": "" }, { "docid": "c72b506f99d69d409902f3ae62aae415", "score": "0.7834874", "text": "def handle_logout(username):\n if username in users:\n logout_user(users[username])", "title": "" }, { "docid": "b82bbc7faf91e9d25d00797e2e81693c", "score": "0.781424", "text": "def logout_user(): # noqa: E501\n return 'do some magic!'", "title": "" }, { "docid": "beac873aae1b20df9e843f46024ee9ce", "score": "0.7794043", "text": "def logout_user(request):\n logout(request)\n return login_user(request)", "title": "" }, { "docid": "2af447998f5e108667db26a10ead4263", "score": "0.77687645", "text": "def process_logout():\n\n session.pop('user_id')\n flash('Logout Successful')\n\n return redirect('/')", "title": "" }, { "docid": "3f7cf453b88c659db8c90e532df3aa6c", "score": "0.77673435", "text": "def logout_handler():\n session.pop('username', None) # Remove username variable from session, indicating no logged in account\n flash('You were logged out')\n return redirect(url_for('homepage'))", "title": "" }, { "docid": "9bee3eb073044ac5c9e40246e5c0bb9d", "score": "0.7759413", "text": "def logout_user(request):\n logout(request)\n return HttpResponse()", "title": "" }, { "docid": "91067c8801dc9105e5bac5b71898cf51", "score": "0.77476716", "text": "def logout():\n context = Context()\n context.set_value(\"Customer: \" + current_user.email + \" has logged out.\")\n logging_dispatcher.callback(context)\n logout_user()\n return redirect(url_for('login'))", "title": "" }, { "docid": "27a10c7cb2b4909908ae783fbb720a8d", "score": "0.77369565", "text": "def handle_logout(self, handle_logout):\n\n self._handle_logout = handle_logout", "title": "" }, { "docid": "0582c68f70ba0c818537a41415ac7ba0", "score": "0.7707407", "text": "def logout_user(request):\n logout(request)\n return redirect('login')", "title": "" }, { "docid": "0582c68f70ba0c818537a41415ac7ba0", "score": "0.7707407", "text": "def logout_user(request):\n logout(request)\n return redirect('login')", "title": "" }, { "docid": "94ade436512aeaeeed85faa8defde9c8", "score": "0.7695144", "text": "def user_logout(request):\n\t# Since we know the user is logged in, we can now just log them out.\n\tlogout(request)\n\n\t# Take the user back to the homepage.\n\treturn HttpResponseRedirect('/weave/teacher_interface')", "title": "" }, { "docid": "6c720f9b67f29691d95c94cf035fe55a", "score": "0.76807904", "text": "def logout_user(request):\n\n logout(request)\n return redirect(reverse('login-url'))", "title": "" }, { "docid": "69ef5d2d6f004b94869d85d18c3d7cfc", "score": "0.7675707", "text": "def post_logout(self, came_from=lurl('/')):", "title": "" }, { "docid": "5b9c3f87e09ce05e9a9f267339eae6d6", "score": "0.7675107", "text": "def logout_user(userid):\n pass", "title": "" }, { "docid": "21eef5440e483b689689852f338e0628", "score": "0.76503503", "text": "def logUserOut(self):\n self.redirect(self.getLogoutUrl())", "title": "" }, { "docid": "40c7c1d2b2d4f01d2090db1baeb6cfc2", "score": "0.7641568", "text": "def logout_user(self, request):\n logger.debug(\"Logout request received for %s\" % request.user)\n if request.user.is_authenticated():\n ServiceTicket.objects.consume_tickets(request.user)\n ProxyTicket.objects.consume_tickets(request.user)\n ProxyGrantingTicket.objects.consume_tickets(request.user)\n\n if getattr(settings, 'MAMA_CAS_ENABLE_SINGLE_SIGN_OUT', False):\n ServiceTicket.objects.request_sign_out(request.user)\n\n logger.info(\"Single sign-on session ended for %s\" % request.user)\n logout(request)\n msg = _(\"You have been successfully logged out\")\n messages.success(request, msg)", "title": "" }, { "docid": "8f8ac7875939b05d7645507667b21c16", "score": "0.7630399", "text": "def logout_user(self):\n url = reverse('rest_framework:logout')\n self.client.get(url)", "title": "" }, { "docid": "40a6d2c515b2b2be46055b98a228e193", "score": "0.76287204", "text": "def logout_user(request, *args, **kwargs):\n logout(request)\n return HttpResponseRedirect(reverse('login'))", "title": "" }, { "docid": "47209b6c9f36cb0e64910fa0b700e2d9", "score": "0.7623376", "text": "def logout(request):\n\t# log user out, end session\n\t# display success message ?\n # Dispatch the signal before the user is logged out so the receivers have a\n # chance to find out *who* logged out.\n\tuser = getattr(request, 'user', None)\n\tif hasattr(user, 'is_authenticated') and not user.is_authenticated:\n\t\tuser = None\n\t# user_logged_out.send(sender=user.__class__, request=request, user=user)\n\n # remember language choice saved to session\n\t# language = request.session.get(LANGUAGE_SESSION_KEY)\n\n\trequest.session.flush()\n\n\t# if language is not None:\n\t# \trequest.session[LANGUAGE_SESSION_KEY] = language\n\n\tif hasattr(request, 'user'):\n\t\tfrom django.contrib.auth.models import AnonymousUser\n\t\trequest.user = AnonymousUser()\n\n\t# try:\n\t# \tdel request.session['member_id']\n\t# except KeyError:\n\t# \tpass\n\t# return HttpResponse(\"You're logged out.\")\n\treturn redirect(reverse(\"login\"))", "title": "" }, { "docid": "ac47d35bb29142c559212ba2cb0b3dff", "score": "0.75882417", "text": "def logout():\n # Remove the user information from the session\n userlog = CmsUserLog.objects.filter(id=current_user.get_userlog_id()).first()\n userlog.logout_time = datetime.now()\n userlog.save()\n logout_user()\n # Remove session keys set by Flask-Principal\n for key in ('identity.name', 'identity.auth_type'):\n session.pop(key, None)\n # Tell Flask-Principal the user is anonymous\n identity_changed.send(current_app._get_current_object(), identity=AnonymousIdentity())\n return redirect(app.config['BASE_URL'])", "title": "" }, { "docid": "3d500e951b3c75d625a48fc51030ccae", "score": "0.7575961", "text": "def logout():\n user_session.end()\n flash_success(gettext('Successfully logged out.'))", "title": "" }, { "docid": "1ebb833d5df49a0b07e1949818ba998e", "score": "0.75585794", "text": "def logout_user(request):\n\n logout(request)\n return redirect('home page')", "title": "" }, { "docid": "98d95179feb98af4aa2862641a73fe6e", "score": "0.7556592", "text": "def logout_user(request):\n logout(request)\n return redirect('base_app:mainpage')", "title": "" }, { "docid": "6c53cbf1e7ebd1dc6330b059f203c932", "score": "0.7538258", "text": "def user_logout(request):\n logout(request)\n\n return HttpResponseRedirect('/matchApp/')", "title": "" }, { "docid": "a0e53b5cfcf70cf991edb41546f6c33e", "score": "0.7514304", "text": "def logout():", "title": "" }, { "docid": "da024f960eedd7c57cc43cf0170e8ed5", "score": "0.75107855", "text": "def logout_user():\n\n del session[\"user_id\"]\n flash(\"You are logged out.\")\n return redirect(\"/\")", "title": "" }, { "docid": "15b9d09633395d2cdf8fcb7ace896c19", "score": "0.7492386", "text": "def logout_user():\n if 'credentials' in flask.session:\n credentials = client.OAuth2Credentials.from_json(\n flask.session['credentials'])\n user = controller.get_session_username(credentials)\n flask.session.pop('credentials', None)\n beacons.app.logger.warning('USER:' + user +\n '\\nis successfully Logged out.')\n\n return flask.redirect(flask.url_for('portal.oauth2callback'))", "title": "" }, { "docid": "4232cc6e138891b4f9748f6ae6885aee", "score": "0.7483679", "text": "def logout():\n logout_user()\n return redirect('/')", "title": "" }, { "docid": "4c2ee86c740be82e52c221d2327dd696", "score": "0.74632317", "text": "def logout():\n\tlogout_user()\n\treturn redirect(url_for('home'))", "title": "" }, { "docid": "dded465ecef589f98b400d3325cd69d5", "score": "0.7452407", "text": "def logout():\r\n\tlogout_user()\r\n\treturn redirect(url_for('home'))", "title": "" }, { "docid": "8791ccd32036a09d6a0cfbdc5c6bba9a", "score": "0.7447691", "text": "def logout():\n logout_user()\n return redirect(url_for('login'))", "title": "" }, { "docid": "8791ccd32036a09d6a0cfbdc5c6bba9a", "score": "0.7447691", "text": "def logout():\n logout_user()\n return redirect(url_for('login'))", "title": "" }, { "docid": "0cd77beddc35f05b7bab00f075d05c38", "score": "0.74411404", "text": "def logout():\n user_manager = current_app.user_manager\n # Send user_logged_out signal\n signals.user_logged_out.send(current_app._get_current_object(), user=current_user)\n # Use Flask-Login to sign out user\n logout_user()\n # Redirect to logout_next endpoint or '/'\n next = request.args.get('next', url_for(user_manager.after_logout_endpoint)) # Get 'next' query param\n return redirect(next)", "title": "" }, { "docid": "19c4bdba39f9c727db9fd01d73da234b", "score": "0.74391496", "text": "def user_logout(request):\n logout(request)\n return HttpResponseRedirect(reverse('basic_app:index'))", "title": "" }, { "docid": "ab6f02cc43221f90bd0683dc0874c9b5", "score": "0.74305123", "text": "def logout():\n return on_logout()", "title": "" }, { "docid": "265dcd44de4e3cc6c89a437b2048c1ce", "score": "0.743015", "text": "def logout():\n logout_user()\n return redirect(\"/\")", "title": "" }, { "docid": "655ebd61c953b2e8cd228589a9532a73", "score": "0.74172264", "text": "def logout_user(request):\n if request.user.is_authenticated:\n logout(request)\n return redirect('/')", "title": "" }, { "docid": "655ebd61c953b2e8cd228589a9532a73", "score": "0.74172264", "text": "def logout_user(request):\n if request.user.is_authenticated:\n logout(request)\n return redirect('/')", "title": "" }, { "docid": "65f4181f13eb17b04806f9d10903bafe", "score": "0.741659", "text": "def logout(self, request):\n\n fiuzu_logout(request)", "title": "" }, { "docid": "4979bfd5882ea380ba82de789b194a9d", "score": "0.7415805", "text": "def logout():\n flash(\"You have been successfully logged out!\")\n session.pop(\"user\")\n return redirect(url_for(\"home\"))", "title": "" }, { "docid": "020a87030d905d1047f5a3f9261f3ef3", "score": "0.740723", "text": "def logout(self, request):\n logout(request)\n return response.Ok({\"success\": \"Successfully logged out.\"})", "title": "" }, { "docid": "7af3b8e6bc2ad909a6161b37c75553b6", "score": "0.7394802", "text": "def logout():\n\n user = session.get('logged_in_user', None)\n\n if not user:\n return redirect('/login')\n\n logout_user()\n data.user = None\n data.board = None\n session.pop('logged_in_user')\n\n return redirect('/')", "title": "" }, { "docid": "5b40775fe1ee15da28af5d93e3af3a90", "score": "0.73922753", "text": "def logout():\n flash(\"You have been logged out\")\n session.pop(\"user\")\n return redirect(url_for(\"login\"))", "title": "" }, { "docid": "5b40775fe1ee15da28af5d93e3af3a90", "score": "0.73922753", "text": "def logout():\n flash(\"You have been logged out\")\n session.pop(\"user\")\n return redirect(url_for(\"login\"))", "title": "" }, { "docid": "627bd2900c111161fbf25380b1e7640d", "score": "0.7384441", "text": "def logout():\n\n session.pop(\"user_id\")\n\n return redirect(\"/\")", "title": "" }, { "docid": "9758bb7fd63ed05074a6e32c67c82487", "score": "0.738304", "text": "def logout():\n global actualUserInfo\n\n #Eliminamos al usuario\n actualUserInfo.pop(current_user.id, None)\n logout_user()\n return redirect((url_for('index')))", "title": "" }, { "docid": "3fbae94f64d5a7e09122259f98f36df4", "score": "0.7382162", "text": "def logout():\n flash(\"You have been logged out!\")\n session.pop(\"user\")\n return redirect(url_for(\"login\"))", "title": "" }, { "docid": "8391f400f15270eb65d24293fe6f2aef", "score": "0.7380695", "text": "def logout_user():\n session.pop('user_id')\n flash(\"Goodbye!\", \"info\")\n return redirect('/')", "title": "" }, { "docid": "74b3c286b2694a103d75c0f6f08590cb", "score": "0.73735994", "text": "def user_logout():\n session.clear()\n return redirect(url_for(\"home\"))", "title": "" }, { "docid": "0147b7c1ec288e1432efd98b50782060", "score": "0.73711234", "text": "def logout():\n logout_user()\n return redirect(url_for('home'))", "title": "" }, { "docid": "0147b7c1ec288e1432efd98b50782060", "score": "0.73711234", "text": "def logout():\n logout_user()\n return redirect(url_for('home'))", "title": "" }, { "docid": "0147b7c1ec288e1432efd98b50782060", "score": "0.73711234", "text": "def logout():\n logout_user()\n return redirect(url_for('home'))", "title": "" }, { "docid": "853fba8bf96373f6856bd1413319e942", "score": "0.7367605", "text": "def logout():\n if user not in session:\n flash('Please log in to access this page!')\n return redirect(url_for('login'))\n session.pop(user, None)\n setUser(None)\n setSynced(False)\n return redirect(url_for('home'))", "title": "" }, { "docid": "9712cab5942ccf19d7eef7a0898bdd72", "score": "0.73552674", "text": "def logout_user(self):\r\n self.session.clear()", "title": "" }, { "docid": "a3613f807158a51158c565a1287d59eb", "score": "0.735142", "text": "def logout():\n\n response = user_management_api('logout/')\n session.clear()\n if response:\n flash(u'You have been logged out successfully.', 'success')\n return redirect(request.args.get('next', None) or\n url_for('index'))\n return redirect(url_for('index'))", "title": "" }, { "docid": "d116bc302fb3463438edea438a8b8e52", "score": "0.7339286", "text": "def logout():\n session.pop(\"user\", None)\n return redirect(url_for(\"login\"))", "title": "" }, { "docid": "5a34dc5a147a7ea1c79a170fb62ad9ed", "score": "0.73301435", "text": "def logout():\n user_logout()\n return redirect(url_for('home'))", "title": "" }, { "docid": "760e27e9e898f0339418e9e7a42693b3", "score": "0.73201615", "text": "def logout(request):\n\n request.session['user'] = None\n return redirect(BASE_HTTP_ADDRESS)", "title": "" }, { "docid": "cdb69a599159261786abb261d0561d92", "score": "0.7317187", "text": "def post_logout(self, came_from=lurl('/')):\n redirect('/')", "title": "" }, { "docid": "b9525e20528efdcd90c9f6b14f2401b9", "score": "0.73148036", "text": "def logout():\n session.pop('user', None)\n return redirect('/')", "title": "" }, { "docid": "2cc7e192691f599883771f626a6458f7", "score": "0.73119676", "text": "def logout(self):\n self.server.LOGOUT()", "title": "" }, { "docid": "6f4e2a75353d0bb9bf5ae6ed94578757", "score": "0.73002356", "text": "def user_logged_out_callback(sender, request, user, **kwargs):\n logger.info(f'User {user.username} logged out from {get_client_ip(request)}')", "title": "" }, { "docid": "51f01fef2da7ebade6c58083abf56afd", "score": "0.7297945", "text": "def logout_user():\n session.pop(\"username\")\n return redirect(\"/\")", "title": "" }, { "docid": "8af44e0ecbc48440fc34b07752b7f9e7", "score": "0.7296517", "text": "def logout_view(request):\n logout(request)\n return redirect(to=reverse('webapp:user-login'))", "title": "" }, { "docid": "c402ee7add823d5a7762c6ace7ca8ca6", "score": "0.72933424", "text": "def logout(request, user):\n if 'x-access-token' in request.headers:\n token = request.headers['x-access-token']\n # Mark the token as blacklisted\n blacklist_token = BlacklistToken(token=token)\n try:\n db.session.add(blacklist_token)\n db.session.commit()\n response_object = {\n 'status': 'success',\n 'message': 'Successfully logged out.'\n }\n return make_response(jsonify(response_object)), 200\n except Exception as e:\n logging.info(str(e))\n response_object = {\n 'status': 'fail',\n 'message': str(e)\n }\n return make_response(jsonify(response_object)), 200\n else:\n response_object = {\n 'status': 'fail',\n 'message': \"Invalid Token\"\n }\n return make_response(jsonify(response_object)), 400", "title": "" }, { "docid": "3ed5a9b9576f5dc50a37744251ff540e", "score": "0.72913265", "text": "def logout():\n \n logout_user()\n flash('You have been successfully logged out.')\n \n return redirect(url_for('auth.login'))", "title": "" }, { "docid": "a5339136d12099105708496c80a1b42a", "score": "0.72905684", "text": "def logout():\n session.pop('user_id', None)\n flash(\"You were logged out\")\n return redirect(url_for('list_entries'))", "title": "" }, { "docid": "b93a0c278fe1cd8c94fda15056f74bf1", "score": "0.7290459", "text": "def logout():\n try:\n current_user.get_current_user().set_logout()\n current_user.set_current_user(None)\n except:\n return jsonify({\"status\": \"error\"})\n return jsonify({\"status\": \"successfully logged out\"})", "title": "" }, { "docid": "9768254827302338ec18fff8091860a0", "score": "0.728957", "text": "def logout():\n print('logout()')\n\n if session.get(\"user_username\"):\n session.pop(\"user_username\")\n\n return redirect('/')", "title": "" }, { "docid": "d84eda9857896e913aeeae89768396f4", "score": "0.7286712", "text": "def logout():\n logout_user()\n\n return redirect(url_for('main.public'))", "title": "" }, { "docid": "8175655f1d6583e7efe112febc0bf8c6", "score": "0.7280347", "text": "def _logout(self):\n self.client.get(\"/logout\")", "title": "" }, { "docid": "7310e8723ba785e6a7c1095f90965c6a", "score": "0.7279957", "text": "def logout(request):\n\n del(request.session['user'])\n messages.add_message(request, messages.INFO, u'Session destroyed - Logout')\n return redirect('home')", "title": "" }, { "docid": "3e396fc13b6094212906fc45d4e483a3", "score": "0.7278661", "text": "def logout():\n # Remove session data, this will log the user out\n session.pop('loggedin', None)\n session.pop('userid', None)\n session.pop('username', None)\n # Redirect to login page\n return redirect(url_for('site.login'))", "title": "" }, { "docid": "5ec5ec0d487a5a24878dac364ef2a850", "score": "0.7278164", "text": "def do_logout():\n if CURR_USER_KEY in session:\n del session[CURR_USER_KEY]\n flash(\"Have a nice day!\", \"secondary\")\n return redirect('/')", "title": "" }, { "docid": "a7655fe84353c8928b11f81fe84bad99", "score": "0.7276208", "text": "def logout():\n required_data_keys = ['email']\n status, error = verify_request(request.json, keys=required_data_keys, all_should_exist=True)\n if not status: return error_response(error)\n\n # Check if user exists and password is correct\n user = self.database.get_user(request.json['email'])\n if not user: return error_response('User does not exist')\n\n # Check if user is already logged out\n if not user['logged_in']: return error_response('User not logged in')\n\n # Update auth status for user\n if not self.database.update_user(request.json['email'], logged_in=False):\n return error_response('Failure logging out user')\n\n return success_response(request.path)", "title": "" }, { "docid": "209526b986b0287fa09d5ee2a361c2bb", "score": "0.7273281", "text": "def logout():\n\n # Forget any user_id\n session.clear()\n\n flash(\"Logged out\")\n return redirect(\"/\")", "title": "" }, { "docid": "9ac670e37233baf65ca9516154c79983", "score": "0.726506", "text": "def logout():\n flash(\"Logged out. Happy Walking!\")\n session.pop(\"user\")\n return redirect(url_for(\"index\"))", "title": "" }, { "docid": "e034908c1a84865389d6ad5ac4f700be", "score": "0.72607553", "text": "def view_logout(self, request):\n\n # forget the current loged-in user\n headers = forget(request)\n # redirect to the login page\n return HTTPFound(location=request.route_url(\"index\"), headers=headers)", "title": "" }, { "docid": "6ad7d45e1903cf6394335b2b9a347fb5", "score": "0.7253628", "text": "def logout():\n\n flash(\"Goodbye! You have been logged out\")\n session.pop(\"user\")\n return redirect(url_for(\"login\"))", "title": "" }, { "docid": "f25512c9a86697821e7b7155caf3027b", "score": "0.72533303", "text": "def logout():\n if not flask_login.current_user.is_authenticated():\n return redirect(request.args.get(\"next\", url_for(\"auth.login\")))\n user = flask_login.current_user\n flask_login.logout_user()\n return redirect(request.args.get(\"next\", url_for(\"auth.login\")))", "title": "" }, { "docid": "3c9066b19c1fe5347e268b61313cb369", "score": "0.72508705", "text": "def logout():\n app = current_app._get_current_object()\n app.logger.info('%s logged out', current_user.email)\n logout_user()\n flash('You have been logged out.')\n return redirect(url_for('auth.login'))", "title": "" }, { "docid": "992724d8fbfadbe2b2aa2da026eede67", "score": "0.7242596", "text": "def logout():\n logout_user()\n return redirect(url_for(\"home.index\"))", "title": "" }, { "docid": "a73708a677976d5038c816bde8618905", "score": "0.72375613", "text": "def logout(self, user):\n self.bridge.logout(user)", "title": "" }, { "docid": "689f345569ea01ddb007b4c889acebda", "score": "0.7237287", "text": "def user_logout(request):\n logout(request)\n return render(request, 'home.html', {'just_logged_out': True})", "title": "" }, { "docid": "5822654f63d816cae74b5a3840ea27a2", "score": "0.72333056", "text": "def logout():\n #logout_user is a method to remove a user's session and sign them out.\n logout_user()\n flash(\"You've been logged out! Come back soon!\", \"success\")\n return redirect(url_for('index'))", "title": "" }, { "docid": "3bd4104a61b95e9768d017fabfe3ecca", "score": "0.7229821", "text": "def logout():\n\n # Forget any user_id\n session.clear()\n\n # Redirect to login page\n return redirect(\"/\")", "title": "" }, { "docid": "e85bd2e6644547e2223a9a0c0116ab18", "score": "0.72286", "text": "def logout():\r\n \r\n print('==[ LOGOUT ]==')\r\n print('ID : ',session['user_id'])\r\n print('Name : ',session['username'])\r\n print('Date : ',datetime.datetime.now().strftime('%x'))\r\n print('Time : ',datetime.datetime.now().strftime('%X'))\r\n print('==================')\r\n session.clear()\r\n return redirect(url_for(\"login\"))", "title": "" }, { "docid": "cd30c3d6d23b79bea0c8f18e7c6a4b54", "score": "0.72284573", "text": "def logout():\n\n session.clear()\n return 'User log out successfully.'", "title": "" }, { "docid": "fee927bf30955d8adc4c44b21489c47b", "score": "0.7223703", "text": "def logout():\n logout_user()\n return redirect(url_for('index'))", "title": "" }, { "docid": "fee927bf30955d8adc4c44b21489c47b", "score": "0.7223703", "text": "def logout():\n logout_user()\n return redirect(url_for('index'))", "title": "" }, { "docid": "fee927bf30955d8adc4c44b21489c47b", "score": "0.7223703", "text": "def logout():\n logout_user()\n return redirect(url_for('index'))", "title": "" }, { "docid": "fee927bf30955d8adc4c44b21489c47b", "score": "0.7223703", "text": "def logout():\n logout_user()\n return redirect(url_for('index'))", "title": "" }, { "docid": "fee927bf30955d8adc4c44b21489c47b", "score": "0.7223703", "text": "def logout():\n logout_user()\n return redirect(url_for('index'))", "title": "" }, { "docid": "fee927bf30955d8adc4c44b21489c47b", "score": "0.7223703", "text": "def logout():\n logout_user()\n return redirect(url_for('index'))", "title": "" }, { "docid": "790bdb1b00bf0ed86c159d2414332d03", "score": "0.72084796", "text": "def logout(request):\n\tif request.user.is_anonymous():\n\t\treturn JsonError(\"Not logged in\")\n\tCustomUser.logout(request)\n\treturn JsonResponse({'status': 200})", "title": "" }, { "docid": "d0040712714da22ca393361588e882a4", "score": "0.7207497", "text": "def logout():\n\n if 'username' in login_session:\n gdisconnect()\n del login_session['provider']\n del login_session['access_token']\n del login_session['username']\n del login_session['email']\n del login_session['picture']\n del login_session['user_id']\n flash(\"You have been successfully logged out!\")\n return redirect(url_for('homePage'))\n else:\n flash(\"You were not logged in!\")\n return redirect(url_for('homePage'))", "title": "" }, { "docid": "706aa11737c1f3e8a0a7d32ba5dc0d2e", "score": "0.7202425", "text": "def logout(request):\n auth.logout(request)\n return CMDAccount.ack_logout(Status.SUCCESS, \"logout successfully\")", "title": "" }, { "docid": "b35a051609e1cfeb2ee743d684e72d8b", "score": "0.720046", "text": "def auth_logout(request):\n logout(request)\n return redirect('/')", "title": "" } ]
ad05ad1b6c8774666498d281ac8eefac
Returns the values of plane0 and plane1 at the x, y coordinates
[ { "docid": "2476103f45b1418c9f62913c4116eeba", "score": "0.613089", "text": "def __getitem__(self, item):\n x, y = item\n offset = (x + self.width * y) << 1\n p0 = struct.unpack('<H', self.plane0[offset: offset + 2])[0]\n p1 = struct.unpack('<H', self.plane1[offset: offset + 2])[0]\n return p0, p1", "title": "" } ]
[ { "docid": "feda3b68fc0925e9df0003336c79c729", "score": "0.67105764", "text": "def plane(X1,Y1,X2,Y2,n):\n\n x = linspace(X1,X2,n)\n y = linspace(Y1,Y2,n)\n # Create the mesh grid, of a XY plane sitting on the orgin\n X,Y = meshgrid(x,y)\n Z = zeros([10,10])\n print(X, X.shape)\n print(Y, Y.shape)\n print(Z, Z.shape)\n return X,Y,Z", "title": "" }, { "docid": "ef50ae1bcd323b5d14486c6758469408", "score": "0.614817", "text": "def project():\r\n\t\t# note that we're not just dropping the z values, we're getting true perspective\r\n\t\tbx = plane[z]/d[z,:]*d[x,:]+plane[x]\r\n\t\tby = plane[z]/d[z,:]*d[y,:]+plane[y]\r\n\t\r\n\t\treturn bx,by", "title": "" }, { "docid": "986fc2e90b087b24c9c660335334b886", "score": "0.60593593", "text": "def _compute_plane_c(c1, c2, c3):\n ij = c1 - c2\n kj = c3 - c2\n p_vector = np.cross(ij, kj)\n c = np.dot(c1, p_vector)\n plane = list(p_vector) + [c]\n return plane", "title": "" }, { "docid": "c015b566444045ef22d49a6c011af5b0", "score": "0.6022685", "text": "def get_xz(self, plane_k, plane_b, event):\n\n Wz1 = event.Wz1.values\n Wx1 = event.Wx1.values\n Wx2 = event.Wx2.values\n Wy1 = event.Wy1.values\n Wy2 = event.Wy2.values\n\n y = plane_k * Wz1 + plane_b\n x = (Wx2 - Wx1) / (Wy2 - Wy1) * (y - Wy1) + Wx1\n\n return Wz1, x", "title": "" }, { "docid": "366ca9393a477fa188efc98b3e766248", "score": "0.5863449", "text": "def over_plane(p0, p1, p2):\n v1 = np.array(p1) - np.array(p0)\n v2 = np.array(p2) - np.array(p0)\n\n n = np.cross(v1, v2)\n D = -(n[0] * p0[0] + n[1] * p0[1] + n[2] * p0[2])\n return lambda x: n[0] * x[0] + n[1] * x[1] + D > -n[2] * x[2]", "title": "" }, { "docid": "9711027eb017596801c16d8a05bb9c9f", "score": "0.5805826", "text": "def _compute_plane(self, i, j, k):\n c1 = self[i].coords\n c2 = self[j].coords\n c3 = self[k].coords\n return self._compute_plane_c(c1, c2, c3)", "title": "" }, { "docid": "290a866136d93aa2d05ada2dad595123", "score": "0.5670252", "text": "def calcular_producto_vectorial(x1,y1,z1,x2,y2,z2):\n\n\tresultado_x = y1*z2 - z1*y2#Calcula la componente X del vector resultado. \n\tresultado_y = z1*x2 - x1*z2#Calcula la componente Y del vector resultado.\n\tresultado_z = x1*y2 - y1*x2 #Calcula la componente Z del vector resultado.\n\n\treturn resultado_x,resultado_y,resultado_z", "title": "" }, { "docid": "e1666bca5934c14c3c92b1cc80f90ce3", "score": "0.5616512", "text": "def getPlaneNormal(v0, v1, v2):\n\n vector0 = v1 - v0\n vector1 = v2 - v0\n vector0.normalize()\n vector1.normalize()\n\n normal = vector1 ^ vector0\n normal.normalize()\n\n return normal", "title": "" }, { "docid": "7fb8417dfe9630421a8452e0360fed91", "score": "0.55119133", "text": "def findVector(x1, y1, x2, y2):\n vector_x = x2 - x1\n vector_y = y2 - y1\n return [vector_x, vector_y]", "title": "" }, { "docid": "713dc146f93214c533b76ddba5713b29", "score": "0.54968095", "text": "def get_plane_of_points(\n self,\n x1_resolution=200,\n x2_resolution=200,\n normal_vector=\"z\",\n x3_value=100,\n x1_bounds=None,\n x2_bounds=None,\n ):\n # Get a copy for the flow field so don't change underlying grid points\n flow_field = copy.deepcopy(self.floris.farm.flow_field)\n\n if self.floris.farm.flow_field.wake.velocity_model.requires_resolution:\n\n # If this is a gridded model, must extract from full flow field\n self.logger.info(\n \"Model identified as %s requires use of underlying grid print\"\n % self.floris.farm.flow_field.wake.velocity_model.model_string\n )\n\n # Get the flow data and extract the plane using it\n flow_data = self.get_flow_data()\n return get_plane_from_flow_data(\n flow_data, normal_vector=normal_vector, x3_value=x3_value\n )\n\n # If x1 and x2 bounds are not provided, use rules of thumb\n if normal_vector == \"z\": # Rules of thumb for horizontal plane\n if x1_bounds is None:\n coords = self.floris.farm.flow_field.turbine_map.coords\n max_diameter = self.floris.farm.flow_field.max_diameter\n x = [coord.x1 for coord in coords]\n x1_bounds = (min(x) - 2 * max_diameter, max(x) + 10 * max_diameter)\n if x2_bounds is None:\n coords = self.floris.farm.flow_field.turbine_map.coords\n max_diameter = self.floris.farm.flow_field.max_diameter\n y = [coord.x2 for coord in coords]\n x2_bounds = (min(y) - 2 * max_diameter, max(y) + 2 * max_diameter)\n if normal_vector == \"x\": # Rules of thumb for cut plane plane\n if x1_bounds is None:\n coords = self.floris.farm.flow_field.turbine_map.coords\n max_diameter = self.floris.farm.flow_field.max_diameter\n y = [coord.x2 for coord in coords]\n x1_bounds = (min(y) - 2 * max_diameter, max(y) + 2 * max_diameter)\n if x2_bounds is None:\n hub_height = self.floris.farm.flow_field.turbine_map.turbines[\n 0\n ].hub_height\n x2_bounds = (10, hub_height * 2)\n if normal_vector == \"y\": # Rules of thumb for cut plane plane\n if x1_bounds is None:\n coords = self.floris.farm.flow_field.turbine_map.coords\n max_diameter = self.floris.farm.flow_field.max_diameter\n x = [coord.x1 for coord in coords]\n x1_bounds = (min(x) - 2 * max_diameter, max(x) + 10 * max_diameter)\n if x2_bounds is None:\n hub_height = self.floris.farm.flow_field.turbine_map.turbines[\n 0\n ].hub_height\n x2_bounds = (10, hub_height * 2)\n\n # Set up the points to test\n x1_array = np.linspace(x1_bounds[0], x1_bounds[1], num=x1_resolution)\n x2_array = np.linspace(x2_bounds[0], x2_bounds[1], num=x2_resolution)\n\n # Grid the points and flatten\n x1_array, x2_array = np.meshgrid(x1_array, x2_array)\n x1_array = x1_array.flatten()\n x2_array = x2_array.flatten()\n x3_array = np.ones_like(x1_array) * x3_value\n\n # Create the points matrix\n if normal_vector == \"z\":\n points = np.row_stack((x1_array, x2_array, x3_array))\n if normal_vector == \"x\":\n points = np.row_stack((x3_array, x1_array, x2_array))\n if normal_vector == \"y\":\n points = np.row_stack((x1_array, x3_array, x2_array))\n\n # Recalculate wake with these points\n flow_field.calculate_wake(points=points)\n\n # Get results vectors\n x_flat = flow_field.x.flatten()\n y_flat = flow_field.y.flatten()\n z_flat = flow_field.z.flatten()\n u_flat = flow_field.u.flatten()\n v_flat = flow_field.v.flatten()\n w_flat = flow_field.w.flatten()\n\n # Create a df of these\n if normal_vector == \"z\":\n df = pd.DataFrame(\n {\n \"x1\": x_flat,\n \"x2\": y_flat,\n \"x3\": z_flat,\n \"u\": u_flat,\n \"v\": v_flat,\n \"w\": w_flat,\n }\n )\n if normal_vector == \"x\":\n df = pd.DataFrame(\n {\n \"x1\": y_flat,\n \"x2\": z_flat,\n \"x3\": x_flat,\n \"u\": u_flat,\n \"v\": v_flat,\n \"w\": w_flat,\n }\n )\n if normal_vector == \"y\":\n df = pd.DataFrame(\n {\n \"x1\": x_flat,\n \"x2\": z_flat,\n \"x3\": y_flat,\n \"u\": u_flat,\n \"v\": v_flat,\n \"w\": w_flat,\n }\n )\n\n # Subset to plane\n df = df[df.x3 == x3_value]\n\n # Drop duplicates\n df = df.drop_duplicates()\n\n # Limit to requested points\n df = df[df.x1.isin(x1_array)]\n df = df[df.x2.isin(x2_array)]\n\n # Sort values of df to make sure plotting is acceptable\n df = df.sort_values([\"x2\", \"x1\"]).reset_index(drop=True)\n\n # Return the dataframe\n return df", "title": "" }, { "docid": "a1c2c995cfc2d59579de1c0a09abe61e", "score": "0.54871064", "text": "def _get_xyz(self):\n return self.x, self.y, self.z", "title": "" }, { "docid": "75f3e535ec748da593d01e794224d80a", "score": "0.546944", "text": "def _project_point_onto_plane(point, plane):\n vector = plane[0:3]\n constant = plane[3]\n nom = np.inner(vector, point) - constant\n denom = np.inner(vector, vector)\n const = nom / denom\n return np.array([po - v * const for po, v in zip(point, vector)])", "title": "" }, { "docid": "e82255519e7a12e38341c88b9bbc556a", "score": "0.546684", "text": "def get_coordinates(self, one_d=False):\n\n y_act = (np.arange(self.shape[0])-self.pupil_center)*self.actuator_spacing\n x_act = (np.arange(self.shape[1])-self.pupil_center)*self.actuator_spacing\n\n if not one_d: # convert to 2D\n y_act.shape = (self.shape[0],1)\n y_act = y_act * np.ones( (1, self.shape[1]))\n\n x_act.shape = (1, self.shape[1])\n x_act = x_act * np.ones( (self.shape[0], 1))\n\n return y_act, x_act", "title": "" }, { "docid": "6d0a60942d02374dc5e449cdfde24844", "score": "0.54562473", "text": "def get_cross(plane):\n vec_a = plane[0:3]\n vec_b = plane[3:6]\n vec_c = plane[6:9]\n vec_ca = vec_c - vec_a\n vec_ba = vec_b - vec_a\n return np.cross(vec_ca, vec_ba)", "title": "" }, { "docid": "07fdfbcf80d7a8b1aa66069087ca0f24", "score": "0.542422", "text": "def get_plane(coords, direction=None):\n\n coords = np.array(coords)\n p0 = np.cross(coords[0] - coords[2], coords[0] - coords[-1]) / np.linalg.norm(np.cross(coords[0] - coords[2],\n coords[0] - coords[-1]))\n\n # Fitting function to a plane\n def fitfunc(p, coords):\n average = np.average(coords, axis=0)\n return np.array([np.dot(p, average - c) for c in coords])\n\n # Error function (including force norm(normal) = 1)\n errfunc = lambda p, x: fitfunc(p, x)**2 + (np.linalg.norm(p) - 1.0)**2\n\n p1, flag = leastsq(errfunc, p0, args=(coords,))\n\n\n # Check final result\n point = np.average(coords, axis=0).tolist()\n normal = np.array(p1)/np.linalg.norm(p1)\n\n if direction is not None:\n vector = coords[direction[1]] - coords[direction[0]]\n # proj = vector - np.dot(vector, normal)*normal\n projected = np.cross(normal, np.cross(vector, normal))\n projected /= np.linalg.norm(projected)\n\n normal = standardize_vector(normal)\n projected = standardize_vector(projected)\n\n return point, normal, projected\n\n normal = standardize_vector(normal)\n return point, normal", "title": "" }, { "docid": "cbbbbfc336e41522a30bdb704edac2af", "score": "0.54209256", "text": "def getValues(self):\n\t\tx = float(self.x.getValue())\n\t\ty = float(self.y.getValue())\n\t\tz = float(self.z.getValue())\n\t\tyaw = float(self.yaw.getValue())\n\t\t\n\t\treturn x, y, z, yaw", "title": "" }, { "docid": "7c612fc0baf81a8d9cb45b7ad9b3777c", "score": "0.54172015", "text": "def local_n1(self, x, y):\n # just flat:\n a = -x / (2*self.focus) # -dz/dx\n b = -y / (2*self.focus) # -dz/dy\n if self.zmax is not None:\n z = (x**2 + y**2) / (4*self.focus)\n if isinstance(a, np.ndarray):\n a[z > self.zmax] = 0\n if isinstance(b, np.ndarray):\n b[z > self.zmax] = 0\n c = np.ones_like(x)\n norm = (a**2 + b**2 + 1)**0.5\n return [a/norm, b/norm, c/norm]", "title": "" }, { "docid": "adee3d74279dd5e439e9b4fc779d1bae", "score": "0.5383327", "text": "def _assign_points_to_plane(points, planes):\n labels = []\n for point in points:\n distances = []\n for plane in planes:\n # distance from every plane the point\n distances.append(abs(plane[:3].dot(point) + plane[3]) / np.linalg.norm(plane[:3]))\n # label is the index of the plane with the minimal distance\n labels.append(np.argmin(distances))\n return np.array(labels)", "title": "" }, { "docid": "c551687273d383dd4d9cc9ffe6934b85", "score": "0.535705", "text": "def line(x0, y0, x1, y1):\n a = (y1 - y0)/float(x1 - x0)\n b = y0 - a*x0\n return a, b", "title": "" }, { "docid": "c551687273d383dd4d9cc9ffe6934b85", "score": "0.535705", "text": "def line(x0, y0, x1, y1):\n a = (y1 - y0)/float(x1 - x0)\n b = y0 - a*x0\n return a, b", "title": "" }, { "docid": "49cc457de9c9e93e23a2f1e1a1b79767", "score": "0.53488183", "text": "def intersection(vertex_a, vertex_b, vertex_c, vertex_d, plane):\n\n if plane == \"LO\":\n vertex_offset = vertex_b - vertex_a\n vertex_b = view_coords_i(vertex_offset.x, vertex_offset.y, vertex_offset.z)\n vertex_offset = vertex_d - vertex_a\n vertex_d = view_coords_i(vertex_offset.x, vertex_offset.y, vertex_offset.z)\n vertex_offset = vertex_c - vertex_a\n vertex_c = view_coords_i(vertex_offset.x, vertex_offset.y, vertex_offset.z)\n vector_ref = Vector((0, 0, 0))\n coord_a = (vertex_c.x, vertex_c.y)\n coord_b = (vertex_d.x, vertex_d.y)\n coord_c = (vertex_b.x, vertex_b.y)\n coord_d = (vector_ref.x, vector_ref.y)\n else:\n a1, a2, a3 = set_mode(plane)\n coord_a = (vertex_c[a1], vertex_c[a2])\n coord_b = (vertex_d[a1], vertex_d[a2])\n coord_c = (vertex_a[a1], vertex_a[a2])\n coord_d = (vertex_b[a1], vertex_b[a2])\n v_stack = np.vstack([coord_a, coord_b, coord_c, coord_d])\n h_stack = np.hstack((v_stack, np.ones((4, 1))))\n line_a = np.cross(h_stack[0], h_stack[1])\n line_b = np.cross(h_stack[2], h_stack[3])\n x_loc, y_loc, z_loc = np.cross(line_a, line_b)\n if z_loc == 0:\n return Vector((0, 0, 0)), False\n new_x_loc = x_loc / z_loc\n new_z_loc = y_loc / z_loc\n if plane == \"LO\":\n new_y_loc = 0\n else:\n new_y_loc = vertex_a[a3]\n # Order Vector Delta\n if plane == \"XZ\":\n vector_delta = Vector((new_x_loc, new_y_loc, new_z_loc))\n elif plane == \"XY\":\n vector_delta = Vector((new_x_loc, new_z_loc, new_y_loc))\n elif plane == \"YZ\":\n vector_delta = Vector((new_y_loc, new_x_loc, new_z_loc))\n else:\n # Must be Local View Plane\n vector_delta = view_coords(new_x_loc, new_z_loc, new_y_loc) + vertex_a\n return vector_delta, True", "title": "" }, { "docid": "5ed75cfd0e3736997e37e2fcb2038e6e", "score": "0.53354037", "text": "def vector(self):\n return self.p2 - self.p1", "title": "" }, { "docid": "26069d106eb4db3e6eb0723314ef93b4", "score": "0.53300816", "text": "def extract_planes(planes: np.ndarray) -> np.ndarray:\n blue_or_alpha_plane = planes[..., -1]\n return blue_or_alpha_plane.flatten()", "title": "" }, { "docid": "cebe2481263aeeabd7293b3e862ac951", "score": "0.5313136", "text": "def get_plane_normal(points):\n v1 = points[1] - points[0]\n v2 = points[2] - points[0]\n return np.cross(v1, v2)", "title": "" }, { "docid": "f431c4ae5be557f720a073bc59423e8c", "score": "0.5312038", "text": "def get_projected_angles(v0, v1):\n unit_vecs = get_local_plane_coordinate_system(v0)\n local_vector = np.dot(unit_vecs, v1)\n thetax = np.arcsin(local_vector[0]/np.sqrt(local_vector[0]**2+local_vector[2]**2))\n thetay = np.arcsin(local_vector[1]/np.sqrt(local_vector[1]**2+local_vector[2]**2))\n return thetax, thetay", "title": "" }, { "docid": "22a4b5e38cff6389e5850726fc8eef25", "score": "0.5290164", "text": "def plane(self):\n return self._state.plane", "title": "" }, { "docid": "02ebb09efa1dc06d89836dc252e5791f", "score": "0.52883124", "text": "def hyperplane(x, w, b, v): # hyperplane v = x.w+b\n return (-w[0] * x - b + v) / w[1]", "title": "" }, { "docid": "ce9e7e25012052e1b87e57f0a1e0af87", "score": "0.5286241", "text": "def plane_to_plane(plane_point1, plane_normal1, plane_point2, plane_normal2,\n epsilon=1e-6):\n line_direction = np.cross(plane_normal1, plane_normal2)\n if np.linalg.norm(line_direction) > epsilon:\n line_direction, line_moment = plane_intersects_plane(\n plane_point1, plane_normal1, plane_point2, plane_normal2,\n line_direction=line_direction)\n line_point, _ = line_from_pluecker(line_direction, line_moment)\n return 0.0, line_point, line_point\n else:\n dist, closest_point_plane2 = point_to_plane(\n plane_point1, plane_point2, plane_normal2)\n return dist, plane_point1, closest_point_plane2", "title": "" }, { "docid": "75d09d83b2776ac84cc7881372c126d8", "score": "0.52755344", "text": "def trans_between_origin_and_plane(pla:Plane) -> (Matrix, Matrix):\n to_origin_matrices = []\n to_plane_matrices = []\n origin, axis_x, axis_y, axis_z = plane.decon(pla)\n # this is the last move\n to_plane_vector = vector.con_point(origin)\n to_origin_matrices.append(matrix.translation(-to_plane_vector))\n to_plane_matrices.append(matrix.translation(to_plane_vector))\n\n # need to match each vectors\n # gonna match x,y,z\n # so looking into z rotation first\n\n # look for a vector that can be rotated\n vector_on_xy = vector.project_on_xyplane(axis_x)\n if vector_on_xy.length != 0:\n angle = vector.angle_2_vectors(Vector(1,0,0), vector_on_xy)\n else:\n vector_on_xy = vector.project_on_xyplane(axis_y)\n angle = vector.angle_2_vectors(Vector(0,1,0), vector_on_xy)\n quarter = vector.quarter_on_plane(vector_on_xy,'xy')\n if quarter == 0 or quarter == 1:\n angle = -angle\n to_origin = matrix.rotation_z(angle)\n to_plane = matrix.rotation_z(-angle)\n to_origin_matrices.insert(0,to_origin)\n to_plane_matrices.append(to_plane)\n axis_x = trans.transform(axis_x, to_origin)\n axis_y = trans.transform(axis_y, to_origin)\n axis_z = trans.transform(axis_z, to_origin)\n\n # look into x rotation\n vector_on_yz = vector.project_on_yzplane(axis_y)\n if vector_on_yz.length != 0:\n angle = vector.angle_2_vectors(Vector(0,1,0), vector_on_yz)\n else:\n vector_on_yz = vector.project_on_yzplane(axis_z)\n angle = vector.angle_2_vectors(Vector(0,0,1), vector_on_yz)\n quarter = vector.quarter_on_plane(vector_on_yz, 'yz')\n if quarter == 0 or quarter == 1:\n angle = -angle\n to_origin = matrix.rotation_x(angle)\n to_plane = matrix.rotation_x(-angle)\n to_origin_matrices.insert(0,to_origin)\n to_plane_matrices.append(to_plane)\n axis_x = trans.transform(axis_x, to_origin)\n axis_y = trans.transform(axis_y, to_origin)\n axis_z = trans.transform(axis_z, to_origin)\n\n # look into y rotation\n vector_on_xz = vector.project_on_xzplane(axis_z)\n if vector_on_xz.length != 0:\n angle = vector.angle_2_vectors(Vector(0,0,1), vector_on_xz)\n else:\n vector_on_xz = vector.project_on_xzplane(axis_x)\n angle = vector.angle_2_vectors(Vector(1,0,0), vector_on_xz)\n quarter = vector.quarter_on_plane(vector_on_xz, 'xz')\n if quarter == 0 or quarter == 1:\n angle = -angle\n to_origin = matrix.rotation_y(angle)\n to_plane = matrix.rotation_y(-angle)\n to_origin_matrices.insert(0, to_origin)\n to_plane_matrices.append(to_plane)\n\n # all matrices collected\n to_origin_matrix = matrix.combine_matrix(*to_origin_matrices)\n to_plane_matrix = matrix.combine_matrix(*to_plane_matrices)\n\n return to_origin_matrix, to_plane_matrix", "title": "" }, { "docid": "6e4e871d6ae201fb92d08d99169f1efa", "score": "0.52535856", "text": "def pointOnPlane(p1, p2, point, normal):\n if np.sign(dToPlane(p1, point, normal))== np.sign(dToPlane(p2, point, normal)):\n print ('WARNING: POINTS NOT ON DIFFERENT SIDE OF PLANE')\n return\n linevec = p1-p2 #vector along the line\n distance =(np.dot( (point - p1),normal))/(np.dot(linevec, normal)) #see wikipedia, Line-plane_intersection\n return distance*linevec + p1", "title": "" }, { "docid": "93e198cbdd22c3d83ac769560c3e788d", "score": "0.52465975", "text": "def __init__(self, x, y):\n PlaneHypothesis.__init__(self, x, y, 0)", "title": "" }, { "docid": "f6919e956b303f689d90c529c26eec94", "score": "0.5233085", "text": "def calcular_diferencia_vectores(x1,y1,z1,x2,y2,z2):\n\n\tdiferencia_x = x1-x2\n\tdiferencia_y = y1-y2\n\tdiferencia_z = z1-z2\n\n\treturn diferencia_x,diferencia_y,diferencia_z", "title": "" }, { "docid": "6618f5ce4fbcb3d96d0b0ea384139bc3", "score": "0.52213746", "text": "def calculate(self):\n # Create lists to store coordinates\n x_coords = []\n y_coords = []\n z_coords = []\n\n # Structure is [[{'y': 0, 'x': 0, 'z': 0}, {'y': 1, 'x': 1, 'z': 0}], [{'y': 0, 'x': 0, 'z': 0}]]\n for row in self._control_points:\n x_coords_temp = []\n y_coords_temp = []\n z_coords_temp = []\n for column in row:\n x_coords_temp.append(column['x'])\n y_coords_temp.append(column['y'])\n z_coords_temp.append(column['z'])\n x_coords.append(x_coords_temp)\n y_coords.append(y_coords_temp)\n z_coords.append(z_coords_temp)\n\n # Loop through span lists to calculate surface points\n t = 0\n for sv in self._spans_v:\n k = 0\n for su in self._spans_u:\n sp_x = self._calculate_point(x_coords, self._basis_functions_u[k], self._basis_functions_v[t], su, sv)\n sp_y = self._calculate_point(y_coords, self._basis_functions_u[k], self._basis_functions_v[t], su, sv)\n sp_z = self._calculate_point(z_coords, self._basis_functions_u[k], self._basis_functions_v[t], su, sv)\n sp_coords = {'x': sp_x, 'y': sp_y, 'z': sp_z}\n self._surface_coordinates.append(sp_coords)\n k += 1\n t += 1", "title": "" }, { "docid": "89627481bb1f98d76dcb05ca259996ed", "score": "0.52122146", "text": "def point_function(x,y,z=None):\n # Presumes image position from top left\n # Sometimes negative Y coordinates are implicit\n y = abs(y)\n\n point_2D = camera.unproject(Metashape.Vector((x,y)))\n vect = model.pickPoint(camera.center, point_2D)\n\n #estimating ray and surface intersection\n return tuple(vect)", "title": "" }, { "docid": "720f5776c9f61a6113d583e926e8fd19", "score": "0.5203995", "text": "def directed_hausdorff_distance_2D(plane1, plane2):\n\n max_distance = 0\n currrent_distance = 0\n\n # Create point objects\n point1 = np.empty(2, np.intc)\n point2 = np.empty(2, np.intc)\n\n temp_point_min = np.empty(2, np.intc)\n\n point_min = np.zeros(2, np.intc)\n point_max = np.zeros(2, np.intc)\n\n # Iterate over all points in grid 1\n for plane1_index, plane1_value in np.ndenumerate(plane1):\n\n if (plane1_value == 0): continue\n\n point1[0] = plane1_index[0]\n point1[1] = plane1_index[1]\n\n min_distance = inf\n\n # Iterate over all points in grid 2\n for plane2_index, plane2_value in np.ndenumerate(plane2):\n\n if (plane2_value == 0): continue\n\n point2[0] = plane2_index[0]\n point2[1] = plane2_index[1]\n\n currrent_distance = euclidean_distance_2D(point1, point2)\n\n if currrent_distance <= max_distance:\n min_distance = 0\n break\n\n if (min_distance > currrent_distance):\n min_distance = currrent_distance\n\n temp_point_min[0] = point2[0]\n temp_point_min[1] = point2[1]\n\n if max_distance < min_distance:\n max_distance = min_distance\n\n point_max[0] = point1[0]\n point_max[1] = point1[1]\n\n point_min[0] = temp_point_min[0]\n point_min[1] = temp_point_min[1]\n\n return (max_distance, (point_min, plane2), (point_max, plane1))", "title": "" }, { "docid": "efb26416947aac6df8680b2e6e76e7b1", "score": "0.5197453", "text": "def points(self):\n return (self.p1, self.p2)", "title": "" }, { "docid": "e18a52837240a19bf0e20b29ae9f9b57", "score": "0.5188001", "text": "def xytors(x,y):\n\t\n\tL1 = (math.sqrt(3.0)*y+1.0)/3.0\n\tL2 = (-3.0*x - math.sqrt(3.0)*y + 2.0)/6.0\n\tL3 = ( 3.0*x - math.sqrt(3.0)*y + 2.0)/6.0\n\t\n\tr = -L2 + L3 - L1\n\ts = -L2 - L3 + L1\n\treturn(numpy.array([r,s]))", "title": "" }, { "docid": "b017313b188c8e4c8794046feb006622", "score": "0.5178918", "text": "def project_on_plane(self, plane=\"z\", point=None, direction=None):\n coords = self.points()\n\n if plane == \"x\":\n coords[:, 0] = self.GetOrigin()[0]\n intercept = self.xbounds()[0] if point is None else point\n self.x(intercept)\n elif plane == \"y\":\n coords[:, 1] = self.GetOrigin()[1]\n intercept = self.ybounds()[0] if point is None else point\n self.y(intercept)\n elif plane == \"z\":\n coords[:, 2] = self.GetOrigin()[2]\n intercept = self.zbounds()[0] if point is None else point\n self.z(intercept)\n\n elif isinstance(plane, vedo.shapes.Plane):\n normal = plane.normal / np.linalg.norm(plane.normal)\n pl = np.hstack((normal, -np.dot(plane.pos(), normal))).reshape(4, 1)\n if direction is None and point is None:\n # orthogonal projection\n pt = np.hstack((normal, [0])).reshape(4, 1)\n # proj_mat = pt.T @ pl * np.eye(4) - pt @ pl.T # python3 only\n proj_mat = np.matmul(pt.T, pl) * np.eye(4) - np.matmul(pt, pl.T)\n\n elif direction is None:\n # perspective projection\n pt = np.hstack((np.array(point), [1])).reshape(4, 1)\n # proj_mat = pt.T @ pl * np.eye(4) - pt @ pl.T\n proj_mat = np.matmul(pt.T, pl) * np.eye(4) - np.matmul(pt, pl.T)\n\n elif point is None:\n # oblique projection\n pt = np.hstack((np.array(direction), [0])).reshape(4, 1)\n # proj_mat = pt.T @ pl * np.eye(4) - pt @ pl.T\n proj_mat = np.matmul(pt.T, pl) * np.eye(4) - np.matmul(pt, pl.T)\n\n coords = np.concatenate([coords, np.ones((coords.shape[:-1] + (1,)))], axis=-1)\n # coords = coords @ proj_mat.T\n coords = np.matmul(coords, proj_mat.T)\n coords = coords[:, :3] / coords[:, 3:]\n\n else:\n vedo.logger.error(f\"unknown plane {plane}\")\n raise RuntimeError()\n\n self.alpha(0.1)\n self.points(coords)\n return self", "title": "" }, { "docid": "8f4ed250cc3397f87dc7aec18c85b455", "score": "0.5178808", "text": "def local_z1(self, x, y):\n # just flat:\n return self.local_z(x, y)", "title": "" }, { "docid": "6c8a76623ad232690734cc7ea99fd6b2", "score": "0.5171448", "text": "def vp_from_two_lines(l1, l2):\n vp = np.cross(l1, l2)\n x, y, z = vp\n if not np.isclose(z, 0.0, rtol=10.0 ** -10.0, atol=10.0 ** -10.0):\n x = int(x / z)\n y = int(y / z)\n vp = (x, y, 1.0)\n return vp", "title": "" }, { "docid": "1dcda4b3ad5f47ab887d8374120cba7f", "score": "0.5166649", "text": "def project_from_plane(pp, plane):\n \n # Check\n if pp.ndim != 2:\n raise ValueError('project_from_plane needs an Nx2 array.')\n if pp.shape[1] != 2:\n raise ValueError('project_from_plane needs 2D points.')\n \n # Prepare\n pp2 = pp\n a, b, c, d = plane\n phix = np.arctan(a/c)\n phiy = np.arctan(b/c)\n \n # Init 3D points\n pp3 = PointSet(np.zeros((pp2.shape[0], 3), 'float32'))\n \n # Rotate the points\n pp3[:,0] = pp2[:,0] * np.cos(phix)\n pp3[:,1] = pp2[:,1] * np.cos(phiy)\n \n # Find the z value for all points\n pp3[:,2] = -(pp3[:,0]*a + pp3[:,1]*b + d) / c\n \n return pp3", "title": "" }, { "docid": "e9d65e4087ce1e431b424b18df0936fe", "score": "0.5156897", "text": "def getPlane(self):\n return self.plane;", "title": "" }, { "docid": "9d7be38b39abd6ad4bc8ae39bd5a0484", "score": "0.515652", "text": "def GetClippingPlaneInDataCoords(self, vtkMatrix4x4, p_int, p_float=..., p_float=..., p_float=..., p_float=...):\n ...", "title": "" }, { "docid": "a5cd9119b6f145555074d756b8458f4e", "score": "0.5154822", "text": "def validPlane(c1, c2, plane):\n return c1.color == plane.color and plane.color == c2.color", "title": "" }, { "docid": "208bacabfe0c7f00fcbd6ea224499353", "score": "0.5154494", "text": "def get_normal(x1, y1, x2, y2):\n a = (y2 - y1) / (x2 - x1)\n b = y1 - a * x1\n return (a, b)", "title": "" }, { "docid": "a2ffd544b207189a7706f64cb9cd5745", "score": "0.5153128", "text": "def get_plane(self):\n return self.plane.copy()", "title": "" }, { "docid": "23bbe2bcfab9fa327fd41f6fcba55dd1", "score": "0.5142304", "text": "def __init__(self, plane_z, plane_w):\n self.Z, self.W = plane_z, plane_w\n self.plane = np.empty((3, self.Z, self.W))", "title": "" }, { "docid": "3d72bbde2e462d09e290e4f215ae88f7", "score": "0.51233345", "text": "def GetParametricCoords(self):\n ...", "title": "" }, { "docid": "3d72bbde2e462d09e290e4f215ae88f7", "score": "0.51233345", "text": "def GetParametricCoords(self):\n ...", "title": "" }, { "docid": "f3edbd27dce0f64c1571b9bab5b0f360", "score": "0.512207", "text": "def data(self):\n return np.array(zip(self.x, self.y)), self.z", "title": "" }, { "docid": "90984c449f51df22df210f681bfc6b39", "score": "0.5117237", "text": "def from_points(self, point1, point2):\r\n\t\tself.x = point2[0] - point1[0]\r\n\t\tself.y = point2[1] - point1[1]\r\n\t\tself.z = point2[2] - point1[2]", "title": "" }, { "docid": "5b5c24656fc69ed8b5cb5ed900aea439", "score": "0.5108196", "text": "def getVectors(self):\n return [(x,y) for x in [-1,0,1] for y in [-1,0,1]]", "title": "" }, { "docid": "138c2f77b7553b8272c66235d33fa7a4", "score": "0.51006925", "text": "def trintersection((x1,y1),(x2,y2)):\n if x1>=x2:\n m1=x2\n else:\n m1=x1\n \n if y1>=y2:\n m2=y2\n else:\n m2=y1\n return (m1,m2)", "title": "" }, { "docid": "d6cb821ddc180e497f22c2f192f5db3b", "score": "0.50813067", "text": "def get_result_beam(self):\n \n if self.dim_flag=='1D':\n return self.U2, self.x2\n elif self.dim_flag=='2D':\n return self.U2,self.x2,self.y2\n else:\n print(\"Empty system!\")\n sys.exit(1)", "title": "" }, { "docid": "43648cc2903826ed3b9136de35379906", "score": "0.50770575", "text": "def from_line(self, xy_set):\n if len(xy_set.shape) > 2:\n raise Exception(\"coords_to_plane: feed this function with just one set of coords per time\")\n\n x_set, y_set = xy_set[:, 0], xy_set[:, 1]\n self.plane = np.stack((\n np.tile(x_set, (self.Z, 1)),\n np.tile(y_set, (self.Z, 1)),\n np.moveaxis(np.tile(np.arange(0, self.Z, dtype=np.float), (x_set.size, 1)), 0, 1)\n ))", "title": "" }, { "docid": "c6312f4bef0a6c128bc295e8b754b9d1", "score": "0.5061626", "text": "def planeFit(points):\n points = np.reshape(points, (np.shape(points)[0], -1)) # Collapse trialing dimensions\n points = np.transpose(points)\n assert points.shape[0] <= points.shape[1], \"There are only {} points in {} dimensions.\".format(points.shape[1], points.shape[0])\n ctr = points.mean(axis=1)\n x = points - ctr[:,np.newaxis]\n M = np.dot(x, x.T) # Could also use np.cov(x) here.\n return ctr, np.linalg.svd(M)[0][:,-1]", "title": "" }, { "docid": "c6312f4bef0a6c128bc295e8b754b9d1", "score": "0.5061626", "text": "def planeFit(points):\n points = np.reshape(points, (np.shape(points)[0], -1)) # Collapse trialing dimensions\n points = np.transpose(points)\n assert points.shape[0] <= points.shape[1], \"There are only {} points in {} dimensions.\".format(points.shape[1], points.shape[0])\n ctr = points.mean(axis=1)\n x = points - ctr[:,np.newaxis]\n M = np.dot(x, x.T) # Could also use np.cov(x) here.\n return ctr, np.linalg.svd(M)[0][:,-1]", "title": "" }, { "docid": "61694d70b31a54d893272ef229634d03", "score": "0.5060976", "text": "def evaluate(self, points):\n\n if len(points.shape) == 1:\n points = points[np.newaxis, :]\n\n opEval = createOperationEval(self.grid) \n vec = DataVector(self.dim)\n vals = np.zeros(points.shape[1])\n\n for i_pt in xrange(points.shape[1]):\n scaled_0_1_point = self.scale_to_0_1(points[:, i_pt])\n for i_dim in xrange(points.shape[0]):\n vec.set(i_dim, scaled_0_1_point[i_dim])\n vals[i_pt] = opEval.eval(self.alpha, vec)\n\n return vals", "title": "" }, { "docid": "e77704a7d5d9a9c85d3b5d2a377a32a9", "score": "0.50592285", "text": "def first_order_condition(self, x, y):\n tl_m = kronecker_product(torch.diag(self.mu_z), self.m_x+self.m_z)\n tr_m = kronecker_product(self.plan_z, self.m_z)\n bl_m = kronecker_product(-torch.transpose(self.plan_z, 0, 1), self.m_z)\n br_m = kronecker_product(torch.diag(self.nu_z), self.m_y+self.m_z)\n a = torch.cat([torch.cat([tl_m, tr_m], dim=1), torch.cat([bl_m, br_m], dim=1)])\n vec_x, vec_y = x.flatten(), y.flatten()\n\n upper_m = kronecker_product(torch.transpose(self.plan_x, 0, 1), self.m_x).matmul(vec_x)\n lower_m = kronecker_product(self.plan_y, self.m_y).matmul(vec_y)\n ans = torch.matmul(a.inverse(), torch.cat([upper_m, lower_m]))\n return ans.split([self.n_anchors_x*self.n_dim, self.n_anchors_y*self.n_dim])", "title": "" }, { "docid": "afb0be9fdfe2f1aab64c41dae56a7cf4", "score": "0.505634", "text": "def __eq__(self, plane2):\n \n # Check if the normal vector of the line is zero\n if self.normal_vector.is_zero():\n if not plane2.normal_vector.is_zero():\n return False\n else:\n diff = self.constant_term - plane2.constant_term\n # Check if the constant terms of the two lines are the same\n return MyDecimal(diff).is_near_zero() \n \n if not self.is_parallel_with(plane2):\n return False\n basepoint1 = self.basepoint\n basepoint2 = plane2.basepoint\n basepoint_diff = basepoint1.minus(basepoint2)\n \n n = plane2.normal_vector\n \n return basepoint_diff.is_orthogonal_to(n)", "title": "" }, { "docid": "07e5878dbf4f2e7d9415d8cef53abd8c", "score": "0.5050428", "text": "def planesub(bcrdata):\n\n # Make an array of x and y values for each pixel\n xvals = np.arange(bcrdata['xpixels'])\n yvals = np.arange(bcrdata['ypixels'])\n xs, ys = np.meshgrid(xvals, yvals)\n x = xs.ravel()\n y = ys.ravel()\n\n # Now have 3 np arrays containing X, Y and Z coords - fit a plane\n\n A = np.column_stack([x, y, np.ones_like(x)])\n abc, residuals, rank, s = np.linalg.lstsq(A, bcrdata['data'])\n\n # print 'Coefficients: ' + str(abc)\n\n # The \"real\" regression coefficients can be calculated, but we need the\n # MIDAS origin and step size\n\n xstep, ystep = get_step(bcrdata)\n xorigin, yorigin = get_origin(bcrdata)\n a1 = abc[0] / xstep\n b1 = abc[1] / ystep\n c1 = abc[2] - (a1 * xorigin) - (b1 * yorigin)\n\n # print 'Real coefficients: ' + str(a1) + ', ' + str(b1) + ', ' + str(c1)\n\n # Create a grid containing this fit\n zgrid = np.array( [x[ind]*abc[0] + y[ind]*abc[1] + abc[2] for ind in np.arange(len(x))] )\n zgrid = zgrid.round().astype(long)\n\n # Subtract fit and shift Z values\n imagefit = bcrdata['data'] - zgrid + abc[2].round().astype(long)\n\n # Return a copy of the original data structure, but with the plane subtracted image data\n\n newbcr = bcrdata\n newbcr['data'] = imagefit\n return newbcr", "title": "" }, { "docid": "220401927222daa9a544746e1788761e", "score": "0.5049129", "text": "def getLine(A, B):\n x0, y0 = A[0], A[1]\n x1, y1 = B[0], B[1]\n vy = y1 - y0\n vx = x1 - x0\n return vx, vy, x0, y0", "title": "" }, { "docid": "077bb0d68dc69bd0a30a1253159b51fe", "score": "0.5049107", "text": "def fit_plane(points):\n center = sum(points) / len(points)\n x = np.array(points - center)\n M = np.dot(np.transpose(x),x)\n normal = np.linalg.svd(M)[0][:,-1]\n return center, normal", "title": "" }, { "docid": "d6b21a61161e7618a89d34bfdbde22fd", "score": "0.5041118", "text": "def local_n(self, x, y):\n a = -x / self.Rs # -dz/dx\n b = -y / self.Rm # -dz/dy\n c = 1.\n\n norm = np.sqrt(a**2 + b**2 + 1)\n a /= norm\n b /= norm\n c /= norm\n\n sinpitch = -b\n cospitch = np.sqrt(1 - b**2)\n\n sinroll = -a\n cosroll = np.sqrt(1 - a**2)\n\n aB = np.zeros_like(a)\n# bB = c\n# cB = -b\n bB = np.ones_like(a)\n cB = np.zeros_like(a)\n\n if self.alpha:\n bB, cB = raycing.rotate_x(bB, cB, self.cosalpha, -self.sinalpha)\n\n# if self.alpha: from BentLaueCylinder\n# b, c = raycing.rotate_x(b, c, -self.sinalpha, -self.cosalpha)\n# else:\n# b, c = c, -b\n aB, cB = raycing.rotate_y(aB, cB, cosroll, -sinroll)\n bB, cB = raycing.rotate_x(bB, cB, cospitch, sinpitch)\n\n normB = (bB**2 + cB**2 + aB**2)**0.5\n\n return [aB/normB, bB/normB, cB/normB, a/norm, b/norm, c/norm]", "title": "" }, { "docid": "54032433913e1804b93fa882833a3b49", "score": "0.5032894", "text": "def local_n(self, x, y):\n a = 0. # -dz/dx\n b = -y / self.R # -dz/dy\n c = 1.\n# norm = (a**2 + b**2 + c**2)**0.5\n# return a/norm, b/norm, c/norm\n norm = (b**2 + 1)**0.5\n return [a/norm, b/norm, c/norm]", "title": "" }, { "docid": "4789ce93b394a6af4b6ac6a2ffa1b3cb", "score": "0.5026059", "text": "def plane_hypotheses(dataset):\n\n # Complete this for extra credit\n return", "title": "" }, { "docid": "3d3ad1cd0117736a0802b08191e232c2", "score": "0.5022912", "text": "def closest_point_on_plane(point, plane):\n base, normal = plane\n x, y, z = base\n a, b, c = normalize_vector(normal)\n x1, y1, z1 = point\n d = a * x + b * y + c * z\n k = (a * x1 + b * y1 + c * z1 - d) / (a**2 + b**2 + c**2)\n return [x1 - k * a,\n y1 - k * b,\n z1 - k * c]", "title": "" }, { "docid": "53a20b7deb7b4823fab89125b90f4277", "score": "0.50199986", "text": "def planeproject(x,n):\n\treturn x-np.dot(x,n)/np.linalg.norm(n)*vecnorm(n)", "title": "" }, { "docid": "b27e8f0694833a74600b8d19c6a22e46", "score": "0.5018677", "text": "def x1y1x2y2(self) -> typing.Tuple[int, int, int, int]:\n return (self.x1, self.y1, self.x2, self.y2)", "title": "" }, { "docid": "272279fa06c088cbe1975e16de2d7585", "score": "0.50180584", "text": "def get_4points_for_2rooms(h3d, room1_id, room2_id, args):\n # two rooms\n room1, room2 = h3d.rooms[room1_id], h3d.rooms[room2_id]\n h3d.set_target_room(room1)\n h3d.set_target_room(room2)\n room1_conn_map, _, room1_inroomDist, _ = h3d.env.house.connMapDict[room1_id]\n room2_conn_map, _, room2_inroomDist, _ = h3d.env.house.connMapDict[room2_id]\n\n if args.source_mode == 'nearby':\n # source target (p \\in [target_dist, source_dist] of room1)\n source_point_cands = np.argwhere((room1_conn_map > args.min_dist_thresh) & (room1_conn_map <= args.source_dist_thresh) )\n elif args.source_mode == 'random':\n source_point_cands = np.argwhere((room1_conn_map > 0) & (room2_conn_map > 0) ) # randomly spawned\n else:\n raise NotImplementedError\n source_point_idx = np.random.choice(source_point_cands.shape[0])\n source_point = (source_point_cands[source_point_idx][0], source_point_cands[source_point_idx][1], np.random.choice(h3d.angles))\n\n # room 1 point (p \\in [0, target_dist])\n room1_point_cands = np.argwhere((room1_inroomDist >= 0) & (room1_inroomDist <= args.dist_to_room_center_thresh))\n room1_point_idx = np.random.choice(room1_point_cands.shape[0])\n room1_point = (room1_point_cands[room1_point_idx][0], room1_point_cands[room1_point_idx][1], np.random.choice(h3d.angles))\n\n # room 2 point (p \\in [0, target_dist])\n room2_point_cands = np.argwhere((room2_inroomDist >= 0) & (room2_inroomDist <= args.dist_to_room_center_thresh) )\n room2_point_idx = np.random.choice(room2_point_cands.shape[0])\n room2_point = (room2_point_cands[room2_point_idx][0], room2_point_cands[room2_point_idx][1], np.random.choice(h3d.angles))\n\n if args.source_mode == 'nearby':\n # end target (p \\in [target_dist, source_dist] for room2)\n end_point_cands = np.argwhere((room2_conn_map > args.min_dist_thresh) & (room2_conn_map <= args.end_dist_thresh) )\n elif args.source_mode == 'random':\n end_point_cands = np.argwhere((room1_conn_map > 0) & (room2_conn_map > 0) ) # randomly spawned\n else:\n raise NotImplementedError\n end_point_idx = np.random.choice(end_point_cands.shape[0])\n end_point = (end_point_cands[end_point_idx][0], end_point_cands[end_point_idx][1], np.random.choice(h3d.angles))\n\n return source_point, room1_point, room2_point, end_point", "title": "" }, { "docid": "86a991018723903ba726158dd79aaec7", "score": "0.50126046", "text": "def parameters(self):\n coeff = (self.normal_vector.x * self.point.x \n + self.normal_vector.y * self.point.y)\n return (self.normal_vector.x, self.normal_vector.y, coeff)", "title": "" }, { "docid": "60cff5210c534b09a60d941b8dbf3a4e", "score": "0.5012459", "text": "def scalar(x1, y1, x2, y2):\n return x1 * x2 + y1 * y2", "title": "" }, { "docid": "a06953ee054d5e283c2c7cf4bbf992ef", "score": "0.5003986", "text": "def get_local_plane_coordinate_system(vector):\n min_index = min(enumerate(vector), key=lambda x: abs(x[1]))[0]\n y_dir = unit_vecs[min_index] - vector*np.dot(unit_vecs[min_index], vector)\n y_dir = y_dir / np.linalg.norm(y_dir)\n x_dir = np.cross(y_dir, vector)\n x_dir = x_dir / np.linalg.norm(x_dir)\n local_unit_vecs = np.array([x_dir, y_dir, vector])\n return local_unit_vecs", "title": "" }, { "docid": "f740abea5ec90c7338acf6c2c4f52ebb", "score": "0.5000788", "text": "def get_xyz(self,nodes=None):\n log.debug('Return x, y and z values ...')\n if nodes is None:\n return self.xyz\n else:\n V = [self.node_dict_id[u] for u in nodes]\n return self.xyz[V]", "title": "" }, { "docid": "4ca73fbbeb81adedb9c6d73b8fac1288", "score": "0.4997019", "text": "def _get_distance_from_plane(point, plane):\n plane_xyz = plane[0:3]\n distance = np.inner(plane_xyz, point) - plane[3]\n return distance / np.linalg.norm(plane_xyz)", "title": "" }, { "docid": "dc938a1454928fb8792db535812494ea", "score": "0.49963275", "text": "def get_real_coordinates(ratio, x1, y1, x2, y2):\n real_x1 = int(round(x1 // ratio))\n real_y1 = int(round(y1 // ratio))\n real_x2 = int(round(x2 // ratio))\n real_y2 = int(round(y2 // ratio))\n return real_x1, real_y1, real_x2, real_y2", "title": "" }, { "docid": "b77ea349926c47ef7094b5b02e7f586c", "score": "0.4995006", "text": "def liner_function(x1, y1, x2, y2):\n if x1 == x2:\n a = 1\n b = 0\n c = - x1\n elif y1 == y2:\n a = 0\n b = 1\n c = - y1\n else:\n b = 1\n a = - (y1 - y2) / (x1 - x2)\n c = - a * x1 - b * y1\n return a, b, c", "title": "" }, { "docid": "92584f733247fbe15773cdb556f93199", "score": "0.4984811", "text": "def vector(point_i,point_j):\n \n return ((point_j.x - point_i.x , point_j.y - point_i.y))", "title": "" }, { "docid": "449b3388712eac577d2f3aaab41c688e", "score": "0.49831906", "text": "def z(self, x, y):\n return x+1j*y", "title": "" }, { "docid": "336eb2c9de8283ad39dcf18936b143ee", "score": "0.49818334", "text": "def rayon1(xA, x0, yA, y0, zA, z0):\n return sqrt((xA-x0)**2+(yA-y0)**2+(zA-z0)**2)", "title": "" }, { "docid": "37fdfacb94684a7edc8b54b360edf622", "score": "0.49814603", "text": "def _get_xy(self):\n return self.x, self.y", "title": "" }, { "docid": "b1c287ca85cf149dcdaca9e38262a622", "score": "0.49665695", "text": "def vprod(p1, p2):\n return Vec3(p1.y*p2.z - p1.z*p2.y,\n p1.z*p2.x - p1.x*p2.z,\n p1.x*p2.y - p1.y*p2.x)", "title": "" }, { "docid": "1466022f5040340a764d535d0561ae1e", "score": "0.49601823", "text": "def getData(self):\n return (self.X, self.Y)", "title": "" }, { "docid": "c0747ddbeabf45f90dca2aff804a7486", "score": "0.4955628", "text": "def get_real_data(self):\n assert self.X\n assert self.y\n real_flag = ~self.virtual_flag()\n return self.X()[real_flag], self.y()[real_flag]", "title": "" }, { "docid": "8de51c5b9b0f49d6dfbb63d9fdf2f974", "score": "0.49538052", "text": "def xy2radec(hdr,x,y):\n\n\twcs = wcs.WCS(hdr)\n\n\tpixcrd = np.array([[x,y]], np.float_)\n\n\tskycrd = wcs.wcs_pix2sky(pixcrd,1)\n\n\tra = skycrd[0][0]\n\n\tdec = skycrd[0][1]\n\n\treturn (ra,dec)", "title": "" }, { "docid": "90ea096a139c6bdfcf07dde0a0a487cc", "score": "0.49525857", "text": "def get_coordinates(self):\n return np.array([self.x,self.y,self.z,self.vx,self.vy,self.vz,\n self.phi,self.theta,self.gamma,\n self.phidot,self.thetadot,self.gammadot])", "title": "" }, { "docid": "39893c17de8f7e9153326407ea8e6a35", "score": "0.49518344", "text": "def compute_displacements(objectsA = np.array([ Bivarg() ]),\n objectsB = np.array([ Bivarg() ])):\n\n nobj = len(objectsA)\n xobs = np.array([o.mu[0] for o in objectsA])\n yobs = np.array([o.mu[1] for o in objectsA])\n vxobs = np.array([objectsB[i].mu[0] - objectsA[i].mu[0] for i in range(nobj) ])\n vyobs = np.array([objectsB[i].mu[1] - objectsA[i].mu[1] for i in range(nobj) ])\n sxobs = np.array([objectsB[i].sigma[0,0] + objectsA[i].sigma[0,0] for i in range(nobj) ])\n syobs = np.array([objectsB[i].sigma[1,1] + objectsA[i].sigma[1,1] for i in range(nobj) ])\n return xobs, yobs, vxobs, vyobs, sxobs, syobs", "title": "" }, { "docid": "8599cb3593d722a55c18c7ac485cadfe", "score": "0.49472073", "text": "def slice_faces_plane(self,points,faces,plane_normal,plane_origin,pre_bound_index=None):\n\n if len (points) == 0:\n return points, faces\n # dot product of each vertex with the plane normal indexed by face\n # so for each face the dot product of each vertex is a row\n # shape is the same as faces (n,3)\n # dots = np.einsum ('i,ij->j', plane_normal,(points - plane_origin).T)[faces]\n dots = np.einsum ('i,ij->j', plane_normal, (points[:, :3] - plane_origin).T) # (n,) 每个顶点与原向量夹角\n\n flag_points = np.zeros (points.shape[0])\n flag_points[dots < -1e-8] = 1 # outside\n flag_points[dots > 1e-8] = -1 # inside\n flag_points[np.logical_and (dots >= -1e-8, dots <= 1e-8)] = 0 # on plane\n # Find vertex orientations w.r.t. faces for all triangles:\n # -1 -> vertex \"inside\" plane (positive normal direction)\n # 0 -> vertex on plane\n # 1 -> vertex \"outside\" plane (negative normal direction)\n signs = flag_points[faces] # (faces_n,3)\n\n # Find all triangles that intersect this plane\n # onedge <- indices of all triangles intersecting the plane\n # inside <- indices of all triangles \"inside\" the plane (positive normal)\n signs_sum = signs.sum (axis=1, dtype=np.int8)\n signs_asum = np.abs (signs).sum (axis=1, dtype=np.int8)\n\n # Cases:\n # (0,0,0), (-1,0,0), (-1,-1,0), (-1,-1,-1) <- inside\n # (1,0,0), (1,1,0), (1,1,1) <- outside\n # (1,0,-1), (1,-1,-1), (1,1,-1) <- onedge\n onedge = np.logical_and (signs_asum >= 2,np.abs (signs_sum) <= 1)\n\n inside = (signs_sum == -signs_asum)\n\n bount_index = np.argwhere (flag_points == 0)[:, 0] # 在平面上的点为边界点\n bount_color_weigth=np.mean(points[faces[onedge].reshape((-1,))][:,3:],axis=0) #(27,)\n # Automatically include all faces that are \"inside\"\n new_faces = faces[inside]\n\n # Separate faces on the edge into two cases: those which will become\n # quads (two points inside plane) and those which will become triangles\n # (one vertex inside plane)\n triangles = points[faces][:,:,0:3]#(face_n,3,3)\n cut_triangles = triangles[onedge]##(onedge_n,3,3)\n cut_faces_quad = faces[np.logical_and (onedge, signs_sum < 0)] #(1,-1,-1) 得到点的index #(cut_faces_quad_n,3) 3->points_index\n cut_faces_tri = faces[np.logical_and (onedge, signs_sum >= 0)]#(1,1,-1),(1,0,-1)\n\n cut_signs_quad = signs[np.logical_and (onedge, signs_sum < 0)] #对应点在平面哪一边#(cut_faces_quad_n,3) 3-> 0;-1;1\n cut_signs_tri = signs[np.logical_and (onedge, signs_sum >= 0)]\n\n # If no faces to cut, the surface is not in contact with this plane.\n # Thus, return a mesh with only the inside faces\n if len (cut_faces_quad) + len (cut_faces_tri) == 0:\n\n if len (new_faces) == 0:\n # if no new faces at all return empty arrays\n empty = (np.zeros ((0, 3), dtype=np.float64),\n np.zeros ((0, 3), dtype=np.int64))\n return empty\n\n # 获取边界点,\n\n\n bount_index = np.unique (bount_index)\n\n # Automatically include all faces that are \"inside\"\n new_faces = faces[inside].reshape (-1)\n if pre_bound_index is not None:\n new_index = np.concatenate ((new_faces, bount_index, pre_bound_index), axis=0)\n else:\n new_index = np.concatenate ((new_faces, bount_index), axis=0)\n # find the unique indices in the new faces\n # using an integer-only unique function\n unique, inverse = grouping.unique_bincount (new_index,\n minlength=len (points),\n return_inverse=True)\n\n # use the unique indices for our final points and faces\n final_vert = points[unique]\n if pre_bound_index is not None:\n final_face = inverse[:new_faces.shape[0]].reshape ((-1, 3))\n new_bound_index = inverse[new_faces.shape[0]:new_faces.shape[0] + bount_index.shape[0]]\n pre_bound_index = inverse[new_faces.shape[0] + bount_index.shape[0]:]\n\n return final_vert, final_face, new_bound_index, pre_bound_index\n\n final_face = inverse[:new_faces.shape[0]].reshape ((-1, 3))\n new_bound_index = inverse[new_faces.shape[0]:]\n\n\n return final_vert, final_face,new_bound_index\n\n # Extract the intersections of each triangle's edges with the plane\n o = cut_triangles # origins #(onedge_n,3,3)\n d = np.roll (o, -1, axis=1) - o # directions\n num = (plane_origin - o).dot (plane_normal) # compute num/denom\n denom = np.dot (d, plane_normal)\n denom[denom == 0.0] = 1e-12 # prevent division by zero\n dist = np.divide (num, denom)\n # intersection points for each segment\n int_points = np.einsum ('ij,ijk->ijk', dist, d) + o\n\n # Initialize the array of new points with the current points\n new_points = points\n\n # Handle the case where a new quad is formed by the intersection\n # First, extract the intersection points belonging to a new quad\n quad_int_points = int_points[(signs_sum < 0)[onedge], :, :] #(quad_int_points_n,3)\n num_quads = len (quad_int_points)\n if num_quads > 0:\n # Extract the vertex on the outside of the plane, then get the points\n # (in CCW order of the inside points)\n quad_int_inds = np.where (cut_signs_quad == 1)[1]\n quad_int_verts = cut_faces_quad[\n np.stack ((range (num_quads), range (num_quads)), axis=1),\n np.stack (((quad_int_inds + 1) % 3, (quad_int_inds + 2) % 3), axis=1)]\n # (cut_faces_quad_n,3) 3->points_index\n # Fill out new quad faces with the intersection points as points\n new_quad_faces = np.append (\n quad_int_verts,\n np.arange (len (new_points),\n len (new_points) +\n 2 * num_quads).reshape (num_quads, 2), axis=1)\n\n # Extract correct intersection points from int_points and order them in\n # the same way as they were added to faces\n new_quad_points = quad_int_points[\n np.stack ((range (num_quads), range (num_quads)), axis=1),\n np.stack ((((quad_int_inds + 2) % 3).T, quad_int_inds.T),\n axis=1), :].reshape (2 * num_quads, 3)\n new_quad_points=np.concatenate((new_quad_points,np.tile(bount_color_weigth,[len(new_quad_points),1])),axis=1)\n # Add new points to existing points, triangulate quads, and add the\n # resulting triangles to the new faces\n bount_index=np.append(bount_index,range(len (new_points),len (new_points) +len(new_quad_points)),axis=0)\n new_points = np.append (new_points, new_quad_points, axis=0)\n new_tri_faces_from_quads = geometry.triangulate_quads (new_quad_faces)\n new_faces = np.append (new_faces, new_tri_faces_from_quads, axis=0)\n\n # Handle the case where a new triangle is formed by the intersection\n # First, extract the intersection points belonging to a new triangle\n tri_int_points = int_points[(signs_sum >= 0)[onedge], :, :]\n num_tris = len (tri_int_points)\n if num_tris > 0:\n # Extract the single vertex for each triangle inside the plane and get the\n # inside points (CCW order)\n tri_int_inds = np.where (cut_signs_tri == -1)[1]\n tri_int_verts = cut_faces_tri[range (\n num_tris), tri_int_inds].reshape (num_tris, 1)\n\n # Fill out new triangles with the intersection points as points\n new_tri_faces = np.append (\n tri_int_verts,\n np.arange (len (new_points),\n len (new_points) +\n 2 * num_tris).reshape (num_tris, 2),\n axis=1)\n\n # Extract correct intersection points and order them in the same way as\n # the points were added to the faces\n new_tri_points = tri_int_points[\n np.stack ((range (num_tris), range (num_tris)), axis=1),\n np.stack ((tri_int_inds.T, ((tri_int_inds + 2) % 3).T),\n axis=1),\n :].reshape (2 * num_tris, 3)\n new_tri_points = np.concatenate((new_tri_points,np.tile(bount_color_weigth,[len(new_tri_points),1])),axis=1) # (new_tri_points_n,30)\n # Append new points and new faces\n bount_index = np.append (bount_index, range (len (new_points), len (new_points) + len (new_tri_points)),\n axis=0)\n new_points = np.append (new_points, new_tri_points, axis=0)\n new_faces = np.append (new_faces, new_tri_faces, axis=0)\n\n new_faces = new_faces.reshape (-1)\n if pre_bound_index is not None:\n new_index = np.concatenate ((new_faces, bount_index, pre_bound_index), axis=0)\n else:\n new_index = np.concatenate ((new_faces, bount_index), axis=0)\n # find the unique indices in the new faces\n # using an integer-only unique function\n unique, inverse = grouping.unique_bincount (new_index,\n minlength=len (new_points),\n return_inverse=True)\n\n # use the unique indices for our final points and faces\n final_vert = new_points[unique]\n if pre_bound_index is not None:\n final_face = inverse[:new_faces.shape[0]].reshape ((-1, 3))\n new_bound_index = inverse[new_faces.shape[0]:new_faces.shape[0] + bount_index.shape[0]]\n pre_bound_index = inverse[new_faces.shape[0] + bount_index.shape[0]:]\n\n return final_vert, final_face, new_bound_index, pre_bound_index\n\n final_face = inverse[:new_faces.shape[0]].reshape ((-1, 3))\n new_bound_index = inverse[new_faces.shape[0]:]\n\n return final_vert, final_face, new_bound_index", "title": "" }, { "docid": "d22ed74d1927cc6b56b14824b444dcf4", "score": "0.49459726", "text": "def get_XY(self):\n return self.X, self.Y", "title": "" }, { "docid": "53c53ffb8d2a5eb27eb9521b2afb5fc9", "score": "0.49337316", "text": "def con_2_vectors(axis1: Vector, axis2: Vector, axis1_hint: str, axis2_hint: str, origin:Point):\n projected = vector.project_on_another(axis1, axis2)\n axis2 = vector.con_2_points(point.con_from_vector(projected), point.con_from_vector(axis2))\n axis3 = vector.cross(axis1, axis2)\n\n axis_dic = {'x':None,'y':None,'z':None}\n axis_dic[axis1_hint] = axis1\n axis_dic[axis2_hint] = axis2\n for i in axis_dic:\n if i == None:\n axis_dic[i] = axis3\n\n if any([i is None for i in axis_dic.values()]):\n raise\n return plane.con_3_vectors(*axis_dic.value(),origin)\n\n # # check perpendicularity and if not build new axis2\n # if not np.isclose(vector.dot(axis1, axis2), 0.0, atol=DEF_TOLERANCE):\n # p = point.con_from_vector(axis2)\n # p_on_v = point.perpendicular_on_vector(axis1, p)\n # axis2 = vector.con_2_points(p_on_v, p)\n # # make a set\n # axis = {'x':None, 'y':None, 'z':None}\n # axis[axis1_hint] = axis1\n # axis[axis2_hint] = axis2\n #\n # matrices_origin_to_plane = [matrix.translation(vector.con_from_point(origin))]\n # if axis['x'] != None:\n # # if axis is given need to match to origin's axis\n # # determine by rotating which origin axis y or z\n # # TODO what to do with tolarence\n # # if np.isclose(v.z,0.0,atol=TOLERANCE):\n # if not np.isclose(axis['x'].z,0.0,atol=DEF_TOLERANCE):\n # # there is a value to rotate around y axis\n # projected = vector.project_on_xzplane(axis['x'])\n # q = vector.quarter_on_plane(projected,'xz')\n # angle = vector.angle_2_vectors(Vector(1,0,0), projected)\n # if q == 0 or q == 1:\n # angle = -angle\n # to_origin = matrix.rotation_y(angle)\n # to_plane = matrix.rotation_y(-angle)\n # matrices_origin_to_plane.append(to_plane)\n # axis[axis1_hint] = trans.transform(axis[axis1_hint], to_origin)\n # axis[axis2_hint] = trans.transform(axis[axis2_hint], to_origin)\n # if not np.isclose(axis['x'].y,0.0,atol=DEF_TOLERANCE):\n # # there is a value to rotate around z axis\n # projected = vector.project_on_xyplane(axis['x'])\n # q = vector.quarter_on_plane(projected, 'xy')\n # angle = vector.angle_2_vectors(Vector(1,0,0), projected)\n # if q == 0 or q == 1:\n # angle = -angle\n # to_origin = matrix.rotation_z(angle)\n # to_plane = matrix.rotation_z(-angle)\n # matrices_origin_to_plane.append(to_plane)\n # axis[axis1_hint] = trans.transform(axis[axis1_hint], to_origin)\n # axis[axis2_hint] = trans.transform(axis[axis2_hint], to_origin)\n #\n # if axis['y'] != None:\n # if not np.isclose(axis['y'].z,0.0,atol=DEF_TOLERANCE):\n # # there is a value to rotate around x axis\n # projected = vector.project_on_yzplane(axis['y'])\n # q = vector.quarter_on_plane(projected, 'yz')\n # angle = vector.angle_2_vectors(Vector(0, 1, 0), projected)\n # if q == 0 or q == 1:\n # angle = -angle\n # to_origin = matrix.rotation_x(angle)\n # to_plane = matrix.rotation_x(-angle)\n # matrices_origin_to_plane.append(to_plane)\n # axis[axis1_hint] = trans.transform(axis[axis1_hint], to_origin)\n # axis[axis2_hint] = trans.transform(axis[axis2_hint], to_origin)\n # if not np.isclose(axis['y'].x,0.0,atol=DEF_TOLERANCE):\n # # there is a value to rotate around z axis\n # projected = vector.project_on_xyplane(axis['y'])\n # q = vector.quarter_on_plane(projected, 'xy')\n # angle = vector.angle_2_vectors(Vector(0, 1, 0), projected)\n # if q == 0 or q == 1:\n # angle = -angle\n # to_origin = matrix.rotation_z(angle)\n # to_plane = matrix.rotation_z(-angle)\n # matrices_origin_to_plane.append(to_plane)\n # axis[axis1_hint] = trans.transform(axis[axis1_hint], to_origin)\n # axis[axis2_hint] = trans.transform(axis[axis2_hint], to_origin)\n #\n # if axis['z'] != None:\n # # if axis is given need to match to origin's axis\n # # determine by rotating which origin axis y or z\n # # TODO what to do with tolarence\n # # if np.isclose(v.z,0.0,atol=TOLERANCE):\n # if not np.isclose(axis['z'].y,0.0,atol=DEF_TOLERANCE):\n # # there is a value to rotate around y axis\n # projected = vector.project_on_yzplane(axis['z'])\n # q = vector.quarter_on_plane(projected, 'yz')\n # angle = vector.angle_2_vectors(Vector(0, 0, 1), projected)\n # if q == 0 or q == 1:\n # angle = -angle\n # to_origin = matrix.rotation_x(angle)\n # to_plane = matrix.rotation_x(-angle)\n # matrices_origin_to_plane.append(to_plane)\n # axis[axis1_hint] = trans.transform(axis[axis1_hint], to_origin)\n # axis[axis2_hint] = trans.transform(axis[axis2_hint], to_origin)\n # if not np.isclose(axis['z'].x,0.0,atol=DEF_TOLERANCE):\n # # there is a value to rotate around z axis\n # projected = vector.project_on_xzplane(axis['z'])\n # q = vector.quarter_on_plane(projected, 'xz')\n # angle = vector.angle_2_vectors(Vector(0, 0, 1), projected)\n # if q == 0 or q == 1:\n # angle = -angle\n # to_origin = matrix.rotation_y(angle)\n # to_plane = matrix.rotation_y(-angle)\n # matrices_origin_to_plane.append(to_plane)\n # axis[axis1_hint] = trans.transform(axis[axis1_hint], to_origin)\n # axis[axis2_hint] = trans.transform(axis[axis2_hint], to_origin)\n #\n # default_plane = Plane([0,0,0],[1,0,0],[0,1,0],[0,0,1])\n # default_plane = trans.transform(default_plane, *matrices_origin_to_plane)\n # return default_plane", "title": "" }, { "docid": "a76feb0b186526c8b1455d62eedc7b37", "score": "0.49314255", "text": "def get_coordinates(self):\n num_points = self.params.x_dim * self.params.y_dim\n x_dim = self.params.x_dim\n y_dim = self.params.y_dim\n\n x_range = (np.arange(x_dim) - (x_dim-1)/2.0)/(x_dim-1)/0.5\n y_range = (np.arange(y_dim) - (y_dim-1)/2.0)/(y_dim-1)/0.5\n x_mat = np.matmul(np.ones((y_dim, 1)), x_range.reshape((1, x_dim)))\n y_mat = np.matmul(y_range.reshape((y_dim, 1)), np.ones((1, x_dim)))\n r_mat = np.sqrt(x_mat*x_mat + y_mat*y_mat)\n x_mat = np.tile(x_mat.flatten(), self.params.batch_size).reshape(\n self.params.batch_size, num_points, 1)\n y_mat = np.tile(y_mat.flatten(), self.params.batch_size).reshape(\n self.params.batch_size, num_points, 1)\n r_mat = np.tile(r_mat.flatten(), self.params.batch_size).reshape(\n self.params.batch_size, num_points, 1)\n return x_mat, y_mat, r_mat", "title": "" }, { "docid": "8847937be3cf440061548995c93ba088", "score": "0.49223593", "text": "def _check_X_y(self, X, y):\r\n assert set(y) == {-1, 1}, 'Response variable must be ±1'\r\n return X, y", "title": "" }, { "docid": "4a31d0bdfa0711fd03f330270d03f81a", "score": "0.49209887", "text": "def calculate(triangle, a, b, c):\r\n x = triangle[0][0] * a + triangle[1][0] * b + triangle[2][0] * c\r\n y = triangle[0][1] * a + triangle[1][1] * b + triangle[2][1] * c\r\n return x, y", "title": "" }, { "docid": "fbc5d2d2ecfa7c1ed45f74afceacd086", "score": "0.49187112", "text": "def crossProduct(v1,v2):\n x1=v1.destination.x-v1.origin.x\n y1=v1.destination.y-v1.origin.y \n\n x2=v2.destination.x-v2.origin.x \n y2=v2.destination.y-v2.origin.y \n\n return crossProductBasic(x1,y1,x2,y2)", "title": "" }, { "docid": "13cc84321500dfde1f27a0ed6fd9927f", "score": "0.49175578", "text": "def cross(p1, p2):\n x = p1[1] * p2[2] - p1[2] * p2[1]\n y = p1[2] * p2[0] - p1[0] * p2[2]\n z = p1[0] * p2[1] - p1[1] * p2[0]\n return x, y, z", "title": "" }, { "docid": "7d5b50a54e46992e8a5432f04f518971", "score": "0.49167523", "text": "def line_between_points(x1, y1, x2, y2):\n coefficients = np.polyfit([x1, x2], [y1, y2], 1)\n return coefficients[0], coefficients[1]", "title": "" }, { "docid": "ffc78018017cdcee5613c108634b5ea1", "score": "0.49135733", "text": "def stuffit(a, b ,c, x1, x2, debug=False):\n x1 = np.array(x1).reshape((1,3))\n x2 = np.array(x2).reshape((1,3))\n line = np.vstack((x1,x2))\n # Hey, first get the normals\n n = normalise(x2 - x1)\n # We now have a nice normal vector...\n # Parametrise the plane orthogonal to that\n if abs(n[0,2]) > 0.2:\n # Hmmm - improve here...\n e1 = n + np.array([[10, 0, 0]])\n else:\n e1 = n + np.array([[0, 0, 10]])\n if debug:\n LOG.debug(\"Was: %s\" % str(n))\n LOG.debug(\"Now: %s\" % str(e1))\n e1 = normalise(project_onto_complement(n, e1))\n if debug:\n LOG.debug(\"in plane: %s\" % str(e1))\n # This fixes our orientation\n e2 = np.cross(n, e1)\n miss= np.fabs(e2 - project_onto_complement(n, e2))\n assert (np.fabs(miss) < 1e-5).all()\n\n LOG.debug(\"miss: %.6f\" % miss.max())\n LOG.debug(\"Basis:\")\n LOG.debug(\"e1: %s\" % str(e1))\n LOG.debug(\"e2: %s\" % str(e2))\n LOG.debug(\"e1 dot e2: %.7f \" % (e1*e2).sum())\n LOG.debug(\"norm e2: %.7f\" % (e2**2).sum())\n LOG.debug(\"norm e1: %.7f\" % (e1**2).sum())\n LOG.debug(\"norm n: %.7f\" % (n**2).sum())\n LOG.debug(\"normal: %s\" % str(n))\n \n # Now find a basis for the 'in-between' plane, where points on \n # ellipsoid, and normals, are parametrised by unit vectors\n te1 = transform_to_ellipsoid(e1, a, b, c)\n te2 = transform_to_ellipsoid(e2, a, b, c)\n te1, te2 = basis_to_onb(te1, te2)\n det = (te2*np.cross(n,te1)).sum()\n LOG.debug(\"WUP: %.2f\" % (det))\n if debug:\n # Get some points on the ellipsoid, if we wanna do some plotting\n ellipse = get_some_points_on_ellipsoid(a, b, c)\n # Parametrise some normals in that plane.\n vs = np.linspace(0, 2*np.pi, 10000).reshape((10000,1))\n ns = np.cos(vs)*te1 + np.sin(vs)*te2\n on_ellipsoid = transform_to_ellipsoid(ns, a, b, c)\n in_plane = project_onto_complement(n, on_ellipsoid)\n \n LOG.debug(\"ONB for TP:\")\n LOG.debug(\"e1: %s, e2: %s\" % (str(te1), str(te2)))\n LOG.debug(\"norms: %.6f %.6f, dot: %.6f\" % ((te1**2).sum(), (te2**2).sum(), (te1*te2).sum()))\n # OK, find the matrix for the composite map ( to_ellipsoid -> project_onto_complement)\n # Going from T(P) to P.\n # We are using the basis for P determined above {e1, e2}, and {te1, te2} for T(P)\n # Image vectors:\n pe1 = project_onto_complement(n, transform_to_ellipsoid(te1, a ,b, c))\n pe2 = project_onto_complement(n, transform_to_ellipsoid(te2, a, b, c))\n # Coefficients of composite wrt current basis {e1, e2} (and identified as T(P))\n a11 = (pe1*e1).sum()\n a21 = (pe1*e2).sum()\n a12 = (pe2*e1).sum() \n a22 = (pe2*e2).sum()\n sym_dif = ((pe1*e2).sum() - (pe2*e1).sum())\n LOG.debug(\"IS SYMMETRIC: %.6f\" % sym_dif)\n # OK - so not symmetric...\n A = np.array(((a11,a12),(a21,a22)))\n LOG.debug(\"Determinant of A: %.4f\" % (np.linalg.det(A)))\n u, w, v = np.linalg.svd(A)\n LOG.debug(\"Singular values: %.7f, %.7f\" % (w[0],w[1]))\n # AND: make sure we preserve orientation here\n # These are the eigenvectors expressed in the 'standard' basis\n u1 = u[:,0]\n u2 = u[:,1]\n v1 = v[:,0]\n v2 = v[:,1]\n ee1 = u1[0]*e1+u1[1]*e2\n ee2 = u2[0]*e1+u2[1]*e2\n # corresponding basis of TP:\n tee1 = v1[0]*te1+v1[1]*te2\n tee2 = v2[0]*te1+v2[1]*te2\n # linalg.svd seems to sometime reverse the orientation\n det_u = np.linalg.det(u)\n det_v = np.linalg.det(v)\n det = (ee2*np.cross(n,ee1)).sum()\n #if det_u < 0:\n # ee1 = -ee1\n # tee1 = -tee1\n LOG.debug(\"Det U: %.2f, Det V: %.2f, det: %.2f\" % (det_u, det_v, det))\n LOG.debug(\"'Eigenbasis' for P, TP:\")\n LOG.debug(\"ee1: %s, tee1: %s\" % (str(ee1), str(tee1)))\n LOG.debug(\"ee2: %s, tee2: %s\" % (str(ee2), str(tee2)))\n LOG.debug(\"DOTS: %.8f %.8f\" % ((ee1*n).sum(), (ee2*n).sum()))\n LOG.debug(\"NORMS %.8f %.8f\" % ( (ee1**2).sum(), (ee2**2).sum()))\n LOG.debug(\"ONB? %.8f\" % (ee1*ee2).sum())\n # Get the 'line point' in the orthogonal plane...\n L = project_onto_complement(n, x1)\n # Just checking...\n miss= np.fabs((L -project_onto_complement(n, x2)))\n assert miss.max() < 1e-5\n # compute x,y in 'eigenbasis' - same coords up in TP... \n x = (ee1*L).sum()\n y = (ee2*L).sum()\n #Try to minimize - brute force\n p, dist = project_onto_ellipse(x, y, w)\n if debug:\n # Just debugging\n xp = (in_plane * ee1).sum(axis=1)\n yp = (in_plane * ee2).sum(axis=1)\n plt.plot(xp, yp)\n plt.scatter(x, y, s=20)\n plt.scatter(p[0], p[1], s=49)\n plt.axis(\"equal\")\n plt.show()\n # Coordinates of normal in 'eigenbasis' - using the \"inverse\" transform\n # Use q inverse\n \n n1 = p[0] / w[0]\n n2 = p[1] / w[1]\n # We now use the identification of P and TP\n unit_vector = n1*tee1 + n2*tee2\n # Sometimes we seem to get the antipodal vector, due to some orientation thing (in numpy?)\n # Corresponds to multiplying either u or v with -1\n normal = normalise(transform_to_ellipsoid(unit_vector, 1/a, 1/b, 1/c))\n direc = (L*normal).sum()\n LOG.debug(\"DIRECTION: %.4f\" % direc)\n if direc < 0:\n unit_vector = - unit_vector\n LOG.debug(\"Unit vector: %s\" % unit_vector)\n assert abs(np.sqrt((unit_vector**2).sum()) - 1) < 1e-5\n final_point = transform_to_ellipsoid(unit_vector, a, b, c)\n if debug:\n plot_in_3d([ellipse],[line],[final_point[0]])\n final_normal = normalise(transform_to_ellipsoid(unit_vector, 1/a, 1/b, 1/c))\n final_dot = (final_normal*n).sum()\n LOG.debug(\"FINAL DOT: %.6f\" % final_dot)\n return final_normal, dist", "title": "" }, { "docid": "b070b5973eb662e593f7e17f399c6b5c", "score": "0.4904806", "text": "def blintersection((x1,y1),(x2,y2)):\n if x1>=x2:\n m1=x1\n else:\n m1=x2\n \n if y1>=y2:\n m2=y1\n else:\n m2=y2\n return (m1,m2)", "title": "" }, { "docid": "0af45ca1debbe5d99bb6c6cad817e923", "score": "0.4904467", "text": "def plane2(plane, outname=None, which='xy', simulation=None, interpolation=None,\n axes=[], levels=None, logscale=False, clim=[], title='',\n set_cbar=True, cmap='viridis', xlim=[], ylim=[], aspect='equal', \n nbins=6, clabel='', axis=None, label_xy=True, norm_axes=False):\n import numpy as np\n import matplotlib.pyplot as plt\n from .utils import get_ticks\n\n sim = simulation\n if axis: plt.sca(axis)\n\n #------------\n # Sets levels and ticks to use on the contour\n ticklabels, formatting, levels_con = get_ticks(plane, levels=levels, logscale=logscale, clim=clim, nbins=nbins)\n vmin=levels_con.min()\n vmax=levels_con.max()\n #------------\n\n #------------\n # Sets x y grid\n if axes:\n ax1, ax2 = axes\n else:\n if simulation:\n if which == 'xy':\n ax1 = sim.domain.x\n ax2 = sim.domain.y\n elif which == 'xz':\n ax1 = sim.domain.x\n ax2 = sim.domain.z\n else:\n ax1 = np.arange(0, plane.shape[0])\n ax2 = np.arange(0, plane.shape[1])\n if norm_axes:\n ax1/=sim.inv_depth\n ax2/=sim.inv_depth\n else:\n ax1 = np.arange(0, plane.shape[0])\n ax2 = np.arange(0, plane.shape[1])\n xmin=ax1.min()\n ymin=ax2.min()\n xmax=ax1.max()\n ymax=ax2.max()\n #------------\n\n #----------\n # Sets labels latex style (no label is set if which is not given\n if label_xy:\n if which=='xz':\n plt.xlabel('$\\mathbf{x(m)}$')\n plt.ylabel('$\\mathbf{z(m)}$')\n elif which=='xy':\n plt.xlabel('$\\mathbf{x(m)}$')\n plt.ylabel('$\\mathbf{y(m)}$')\n #----------\n\n #----------\n # To plot logscale, we must do it manually\n if logscale:\n from matplotlib.colors import LogNorm\n im = plt.imshow(plane.T, interpolation=interpolation, animated=True, cmap=cmap, origin='lower',\n norm=LogNorm(vmin=vmin, vmax=vmax), extent=[xmin, xmax, ymin, ymax])\n else:\n im = plt.imshow(plane.T, interpolation=interpolation, animated=True, cmap=cmap, origin='lower',\n vmin=vmin, vmax=vmax, extent=[xmin, xmax, ymin, ymax])\n #----------\n\n #-------\n # Actual plotting is done here\n im.axes.set_xlim(*xlim)\n im.axes.set_ylim(*ylim)\n im.axes.grid()\n #-------\n\n #-------\n # If this is going to animation, cbar shoudnt be set here\n if set_cbar:\n cbar = plt.colorbar(label=clabel)\n plt.gca().set_aspect(aspect)\n plt.title(title)\n plt.tight_layout()\n #-------\n\n if outname:\n print('saving figure...', end='')\n plt.savefig(outname, bbox_inches='tight')\n plt.close()\n print('done.')\n return im\n else:\n return im", "title": "" }, { "docid": "ea9b54628040170b02e50c42408b603e", "score": "0.4900545", "text": "def get_a_b_points(self):\n return self.a_point, self.b_point", "title": "" } ]
4f00eaca0fc433e411723abda22c137d
Residual bias to apply to the original series.
[ { "docid": "320c987c473f838ff6deea0f1ae4e5d8", "score": "0.57894796", "text": "def bias(self):\n return self.mbmod.bias", "title": "" } ]
[ { "docid": "acb249e0cd8c60c67e636075f8019064", "score": "0.6814907", "text": "def bellman_residual(self, Q, samples, weights=None):\n pass", "title": "" }, { "docid": "e49b2a09e0363f2a786c0dc2bed04578", "score": "0.6548202", "text": "def __residuals__(self):\n\t\tself.fitted = np.dot(self.X, self.coef)\n\t\tself.residuals = self.Y - self.fitted \n\t\tself.got_residuals = True", "title": "" }, { "docid": "6f47b8539f11ca6a846b40848064c8c8", "score": "0.6434929", "text": "def _get_bias(self):\n return int (self.fit_intercept) - .5", "title": "" }, { "docid": "cc169688222b3626796cc6404e418ba0", "score": "0.6397226", "text": "def get_residual(self, key):\n return np.subtract(self.y[key], self.model.predict(self.X[key]))", "title": "" }, { "docid": "f479ffe7c5f60a3a7a90b3a7c4f14b33", "score": "0.63844305", "text": "def compute_bellman_residual(self, batch, target_state_action_value=None):\n raise NotImplementedError", "title": "" }, { "docid": "5a7b31390280b0e3a424ac8dd139b86d", "score": "0.6373636", "text": "def update_residual(self):\n mm = _utils.matmul\n self.R = mm(self.A, self.X) - mm(self.B, self.X) * self.E", "title": "" }, { "docid": "6283942d9f0e504f3739683ed6faf942", "score": "0.63163245", "text": "def functional_residual(self, hidden_vars):\r\n return self.functional(hidden_vars) - self.observe(self.params)", "title": "" }, { "docid": "5a571d1e196ade194c0f38d742a0be98", "score": "0.62852705", "text": "def adjust_bias(self):\n self.bias += self.lr * self.delta", "title": "" }, { "docid": "11c11a5fd9f7e34e1ed4c5771e7f946e", "score": "0.623525", "text": "def _residuals(self, theta: np.array) -> np.ndarray:\n return self.data.X * theta + self.data.offsets - self.data.y", "title": "" }, { "docid": "c5b40506b995f7d786a2b364c0dce33c", "score": "0.622053", "text": "def _calculate_residual(self, parameters) -> np.ndarray:\n\n simulated_data = self._simulate_experiment(parameters)\n simulated_data = simulated_data.drop(\n simulated_data.columns.difference(self.cols), axis=1\n )\n\n return np.array(self.experimental_data - simulated_data)", "title": "" }, { "docid": "a5c80beb9dfaa96739da06f8da057d01", "score": "0.62114096", "text": "def norm_residual(self) -> float:\n return self.fixed_point_residual", "title": "" }, { "docid": "5166c044c39969eedd6fc595d47fe595", "score": "0.6194359", "text": "def residual(a, b):\n d = np.subtract(a, b)\n d[3:6] = anorm(d[3:6])\n return d", "title": "" }, { "docid": "bec765e6d53a95979ed62fe6e8ae0650", "score": "0.60987407", "text": "def obtener_bias(self):\n\t\treturn self.__bias", "title": "" }, { "docid": "11f51357f3467c9030ef866ec37dd7f0", "score": "0.60954994", "text": "def bias_error(self, dddx):\n return -self.dt**3 * dddx / (2*self.k)", "title": "" }, { "docid": "8c46145dd0851f01eefe9ab090d9b433", "score": "0.60768604", "text": "def graminv_residual(self) -> np.ndarray:\n raise NotImplementedError", "title": "" }, { "docid": "3c4ac5fb26903fc418895724140011a3", "score": "0.60137534", "text": "def bellman_residual(self, Q, samples, weights=None):\n _, _, _, r, s_prime, absorbing, sa = utils.split_data(samples, self._state_dim, self._action_dim)\n return self._bellman_residual_single(Q, r, s_prime, absorbing, sa) if weights is None else \\\n self._bellman_residual_multi(Q, r, s_prime, absorbing, sa, weights)", "title": "" }, { "docid": "105e24444c00cae1325638e0f88b8cc7", "score": "0.60075486", "text": "def residual(a, b):\n y = a - b\n y[2] = normalize_angle(y[2])\n return y", "title": "" }, { "docid": "0ef351e42993f38969144361f30ae24a", "score": "0.59884536", "text": "def getResiduals(self):\n return self.y - self.getBestFitModel()", "title": "" }, { "docid": "9c38772daa3ab529e594f09de59ac165", "score": "0.59681195", "text": "def _bellman_residual_multi(self, Q, r, s_prime, absorbing, sa, weights):\n current_w = Q._w\n Q_values_prime = Q.value_actions_weights(s_prime, weights, absorbing)\n amax = np.argmax(Q_values_prime, axis=1).astype(\"int64\")\n state = np.repeat(np.arange(s_prime.shape[0], dtype=\"int64\"), weights.shape[0])\n maxQ = self._q_target.value_actions(s_prime, absorbing)\n maxQ = maxQ[state, amax.flatten()].reshape(s_prime.shape[0], weights.shape[0])\n ret = r[:, None] + self._gamma * maxQ * (1 - absorbing[:, None]) - Q.value_weights(sa, weights)\n Q._w = current_w\n return ret", "title": "" }, { "docid": "5ca7d625fdc4ecaf076e5bc9b6440455", "score": "0.5920252", "text": "def _bellman_residual_single(self, Q, r, s_prime, absorbing, sa):\n Qs_prime = Q.value_actions(s_prime, absorbing)\n maxQ = self._q_target.value(np.concatenate((s_prime, np.argmax(Qs_prime, axis=1)[:, np.newaxis]), axis=1))\n return r + self._gamma * maxQ * (1 - absorbing) - Q.value(sa)", "title": "" }, { "docid": "dfe379424feb9b7ee8b8342fb1aa7562", "score": "0.5913846", "text": "def mean_bias_error(y_trues, y_preds):\r\n return np.mean(y_trues - y_preds)", "title": "" }, { "docid": "684d316999c05de575a03be82ed44592", "score": "0.5913611", "text": "def maximal_linear_bias_relative(self):\n return self.maximal_linear_bias_absolute()/(2.0**self.m)", "title": "" }, { "docid": "e23f9c6694c5ebc766be859095ba5282", "score": "0.5897362", "text": "def lr_mult_bias(self):\n return 2., 20.", "title": "" }, { "docid": "e23f9c6694c5ebc766be859095ba5282", "score": "0.5897362", "text": "def lr_mult_bias(self):\n return 2., 20.", "title": "" }, { "docid": "c725100ac63cff97cb21d818d39c8020", "score": "0.5861002", "text": "def resid(self):\n return self.Y - self.predicted", "title": "" }, { "docid": "955c648f637bf244adce3741cdaf34bc", "score": "0.5860171", "text": "def resid(self):\n beta = self.theta # the LikelihoodModelResults has parameters named 'theta'\n X = self.model.design\n return self.Y - self.predicted", "title": "" }, { "docid": "0ea0e0e02ab25cb9dac0a5487ffcc072", "score": "0.58432376", "text": "def residual(source,recovered):\r\n residual=np.abs(source-recovered)/np.max(np.abs(source))*100\r\n return(residual)", "title": "" }, { "docid": "074fa314213cc27c179b5c4d2d26f17e", "score": "0.5820992", "text": "def biasDecisionStump():\n global bias\n minErr = 0\n minBias = 0\n _y = np.sort(y)\n for i in range(len(_y)):\n bias = y[i]\n _Err, _bias = squareErr()\n if i == 0:\n minErr = _Err\n if _Err <= minErr:\n minErr = _Err\n minBias = _bias\n bias = [minBias] * numberOfSample * 2", "title": "" }, { "docid": "9eb06feab7f4615bbe61c4b5c36c686a", "score": "0.5817287", "text": "def dense_residual_block(net, weights, shifts, scales):\n tmp = dense_conv(net, weights[0], 1, shifts[0], scales[0])\n return net + dense_conv(tmp, weights[1], 1, shifts[1], scales[1], relu=False)", "title": "" }, { "docid": "c1f7f0f912b8bf9dbdc6f29f7bc59dfa", "score": "0.5799102", "text": "def _residual(self, paras, t, data):\n x0 = paras['s'].value, paras['e'].value, paras['i'].value, paras['z'].value\n model = self._g(t, x0, paras)\n # you only have data for one of your variables\n x2_model = model[:, 2]\n return (x2_model - data).ravel()", "title": "" }, { "docid": "8db46bb6cde235e169147f5b7de42d30", "score": "0.5778184", "text": "def GetBiasVector(self):\n ...", "title": "" }, { "docid": "63f655cd755034dc249cd23f729ef34a", "score": "0.57344204", "text": "def residual(self, params, data, obs):\n Jmax = params['Jmax'].value\n Vcmax = params['Vcmax'].value\n Rd = params['Rd'].value\n\n if hasattr(data, \"Par\"):\n (An, Anc, Anj) = self.call_model(Ci=data[\"Ci\"], Tleaf=data[\"Tleaf\"],\n Par=data[\"Par\"], Jmax=Jmax,\n Vcmax=Vcmax, Rd=Rd)\n else:\n\n (An, Anc, Anj) = self.call_model(Ci=data[\"Ci\"], Tleaf=data[\"Tleaf\"],\n Jmax=Jmax, Vcmax=Vcmax, Rd=Rd)\n return (obs - An)", "title": "" }, { "docid": "f5623ec2e5ae8ebbc336ebdd4ec9ac65", "score": "0.5732392", "text": "def relative_change(self):\n assert(self.m > 1)\n if len(self.residual_vectors) == 0:\n change = 1.0\n else:\n #change = norm(self.residual_vectors[-1])/norm(self.trial_vectors[-1])\n # average relative change over the last 2 iterations\n\n change = 0.0\n navg = min(len(self.residual_vectors), 2)\n for i in range(1, navg+1):\n change += norm(self.residual_vectors[-i])/norm(self.trial_vectors[-i])\n change /= float(navg)\n\n #\n return change", "title": "" }, { "docid": "bde472ae493434824290d38d605b1fe9", "score": "0.5717927", "text": "def MSE_residual(A, B):\n residual = []\n for i in range(A.Values.shape[0]):\n for j in range(A.Values.shape[1]):\n if A.Values[i,j] != A.Values[i,j]: # Null check \n continue \n x = i * A.Spacing[0] + A.Origin[0]\n y = j * A.Spacing[1] + A.Origin[1] \n f_r = BilinearInterpolate(x=x, y=y, image=B)\n residual.append(A.Values[i,j] - f_r)\n return np.array([residual], dtype=float).T", "title": "" }, { "docid": "4d4bc604443f5b42b507882fcaca4927", "score": "0.5681197", "text": "def rir_to_lossy(self):\n return self.to_lossy(True, *self.to_reversible())", "title": "" }, { "docid": "5ec834d4755503e7fa07aa435f06f17d", "score": "0.5673244", "text": "def _residual(self, x, in_filter, out_filter, stride,\n activate_before_residual=False):\n if activate_before_residual:\n with tf.variable_scope('shared_activation'):\n x = self._batch_norm('init_bn', x)\n x = self._relu(x, self.relu_leakiness)\n orig_x = x\n else:\n with tf.variable_scope('residual_only_activation'):\n orig_x = x\n x = self._batch_norm('init_bn', x)\n x = self._relu(x, self.relu_leakiness)\n\n with tf.variable_scope('sub1'):\n x = self._conv('conv1', x, 3, in_filter, out_filter, stride)\n\n with tf.variable_scope('sub2'):\n x = self._batch_norm('bn2', x)\n x = self._relu(x, self.relu_leakiness)\n x = self._conv('conv2', x, 3, out_filter, out_filter, [1, 1, 1, 1])\n\n with tf.variable_scope('sub_add'):\n if in_filter != out_filter:\n orig_x = tf.nn.avg_pool(orig_x, stride, stride, 'VALID')\n orig_x = tf.pad(\n orig_x, [[0, 0], [0, 0], [0, 0],\n [(out_filter - in_filter) // 2, (out_filter - in_filter) // 2]])\n x += orig_x\n\n tf.logging.debug('image after unit %s', x.get_shape())\n return x", "title": "" }, { "docid": "482cb1c5b03d254beb29fdfe1fceb7a9", "score": "0.5665806", "text": "def compute_residual(self, state, **kwargs):\n assert_is_instance(state, ISolverState, descriptor=\"Solver's State\", checking_obj=self)", "title": "" }, { "docid": "3856a86c465beded9c35789a4cfb5a55", "score": "0.5653214", "text": "def deviance_residual(y, log_prob):\n if not (np.unique(y) == np.arange(2)).all():\n raise ValueError(\"y must be encoded as 1 or 0\")\n return (2 * y - 1) * np.sqrt(- 2 * (y * log_prob[:, 0] + (1 - y) * log_prob[:, 1]))", "title": "" }, { "docid": "e8d7b6016c4b894d65df316aa39a1be9", "score": "0.56496125", "text": "def residual(self):\n return cvxtypes.abs()(self._expr)", "title": "" }, { "docid": "9b013948435ad7c3891f3f0692184384", "score": "0.5639185", "text": "def GetResidual(Y,window=120,groups=[12,6,4,3,2]):\n Yt = SSA(Y,window,groups)\n R = Y-Yt.sum(axis=0)\n return R", "title": "" }, { "docid": "73e81e3a6f77cf50b27faadd1838cc60", "score": "0.56135356", "text": "def residual(self, parameters, data, obs):\n Ea = parameters[\"Ea\"].value\n\n if self.peaked:\n Hd = parameters[\"Hd\"].value\n delS = parameters[\"delS\"].value\n # First arg is 1.0, as this is calculated at the normalised temp\n model = self.call_model(1.0, Ea, data[\"Tav\"], delS, Hd)\n else:\n model = self.call_model(1.0, Ea, data[\"Tav\"])\n\n return (obs - model)", "title": "" }, { "docid": "dbccf2864990ab33c977dab65b389cd6", "score": "0.560917", "text": "def temp_bias(self, value):\n self.ela_h = self.orig_ela_h + value * 150\n self._temp_bias = value", "title": "" }, { "docid": "1301927c8355d741b94db59f7eb6857c", "score": "0.55985814", "text": "def temp_bias_series(self):\n return self._temp_bias_series", "title": "" }, { "docid": "bb19c1a5bb89794477d85b334afdab5b", "score": "0.55953443", "text": "def test_regress_residuals(self):\n x = [1.0, 2.0, 3.0, 4.0, 5.0]\n y = [2.1, 4.2, 5.9, 8.4, 9.6]\n result = regress_residuals(x, y)\n assert_allclose(result, [-0.1, 0.08, -0.14, 0.44, -0.28])", "title": "" }, { "docid": "b7f85b50df9f2c72a7a603b1643c6784", "score": "0.55939716", "text": "def bias(ground_station_data, model_data):\n return metrics.bias(ground_station_data, model_data)", "title": "" }, { "docid": "f31a46f30c4c326cf451b5f77e3fb044", "score": "0.55611104", "text": "def get_residualplot(self) -> None:", "title": "" }, { "docid": "7b606e71767294d0c05d37f0bffcc18e", "score": "0.5555755", "text": "def fit_residual(self):\n chain = self.cpartition.chain\n self.lhs_tls.fit_residual(chain)\n self.rhs_tls.fit_residual(chain)", "title": "" }, { "docid": "5397fd540bd6ceae4ac9d410fb72ced7", "score": "0.5517607", "text": "def squareErr():\n errSum = 0\n for i in range(len(x)):\n diff = np.subtract( y[i], x[i]*np.transpose(w)+bias )\n err = np.sum(np.square(diff))\n errSum += err\n return errSum, bias", "title": "" }, { "docid": "0f0c26fd2e1d04ccbf858bc9a2530b7a", "score": "0.5497446", "text": "def getResidual(self, initial, params, pivot, settings):\n residual = initial.copy()\n for t, (target, data) in enumerate(params.items()):\n mask = data['mask']\n residual[:, t] = np.ma.MaskedArray(residual[:, t], mask=mask, fill_value=np.nan).filled()\n return residual", "title": "" }, { "docid": "417b9c9831390261e8aad4a6304b16de", "score": "0.54861903", "text": "def resamp(i,x,rg):\n # we explain the Gibbs in the hw, so we omit the comments here\n if i ==0:\n y = x[1]/2+np.sqrt(1/2)*rg.normal()\n elif i == len(x)-1:\n y = x[-2]/2 + np.sqrt(1/2)*rg.normal()\n else:\n y = (x[i-1]+x[i+1])/2+np.sqrt(1/2)*rg.normal()\n\n return y", "title": "" }, { "docid": "05e7b604b2f398c60c6db6efefc27bcf", "score": "0.5476715", "text": "def residual_of(self, tag, z):\n z_input = self.gen_complete_measurement(tag,z)\n return np.subtract(z_input, self.HJacob(self.x,tag = tag)@self.x_prior)", "title": "" }, { "docid": "d9fde455c16c6404e01764c240d35d24", "score": "0.547452", "text": "def resid(self):\n return self.endog[self.k_ar :] - self.fittedvalues", "title": "" }, { "docid": "3b174ff0681bc5d2da41b521cbbc2dc2", "score": "0.5471201", "text": "def _residual(self, inputs, stride=1, outputs_collections=None, scope=None):\n\t\twith tf.variable_scope(scope, 'residual', [inputs]) as sc:\n\t\t\tdepth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)\n\t\t\tif self.use_batch_norm:\n\t\t\t\tpreact = slim.batch_norm(inputs, activation_fn=tf.nn.relu, scope='preact')\n\t\t\telse:\n\t\t\t\tpreact = tf.nn.relu(inputs)\n\t\t\tif depth_in != self.features:\n\t\t\t\tshortcut = slim.conv2d(preact, self.features, [1, 1], stride=stride,\n \t normalizer_fn=None, activation_fn=None,\n \t scope='shortcut')\n\t\t\telse:\n\t\t\t\tif stride == 1:\n\t\t\t\t\tshortcut = preact\n\t\t\t\telse:\n\t\t\t\t\tshortcut = slim.max_pool2d(preact, [1, 1], stride=stride, scope='shortcut')\n\t\t\tresidual = slim.conv2d(preact, self.features//4, [1, 1], stride=1, scope='conv1')\n\t\t\tresidual = slim.conv2d(residual, self.features//4, 3, stride=stride, scope='conv2')\n\t\t\tresidual = slim.conv2d(residual, self.features, [1, 1], stride=1,\n normalizer_fn=None, activation_fn=None,\n scope='conv3')\n\t\t\toutput = shortcut + residual\n\t\t\treturn slim.utils.collect_named_outputs(outputs_collections,\n sc.name,\n output)", "title": "" }, { "docid": "a13c424ac3d05d6873d2acb686a83943", "score": "0.5430977", "text": "def _residual_fn(self, t: np.ndarray, a: np.ndarray,\n f: np.ndarray) -> BeatResidualFunc: # pylint: disable=W0613\n # Time axis for model integration\n t_max = np.max(t)\n u = np.arange(np.ceil(t_max * self.fs).astype(int), dtype=float)\n np.true_divide(u, self.fs, out=u)\n a_lin = utils.dsp.db2a(a)\n\n def _residual_fn_(beat_args: BeatModelParams) -> np.ndarray:\n a_est = np.interp(t, u, beatsdrop.ModalBeat(*beat_args).am(u))\n return np.reshape(np.subtract(a_lin, a_est), newshape=(-1,))\n\n return _residual_fn_", "title": "" }, { "docid": "fe00605cd9bcaa434befbe1c8bbf8e77", "score": "0.5425811", "text": "def __call__(self, results):\n return ar_bias_correct(results, self.p, self.invM)", "title": "" }, { "docid": "ae3c231271e7673859e94e234f8af02f", "score": "0.5420792", "text": "def observation_bias(self, k):\n return 0.0", "title": "" }, { "docid": "e1a0126f23af7e6e0791e7deff2c19e6", "score": "0.5417861", "text": "def establecer_bias(self, bias):\n\t\tself.__bias = bias", "title": "" }, { "docid": "4bbcd88e13b4c2993872e063f1408105", "score": "0.5414796", "text": "def _bias_from_sr_and_pod(success_ratio_array, pod_array):\n\n return pod_array / success_ratio_array", "title": "" }, { "docid": "602e37e6d40c3e4a85816c0abb59069c", "score": "0.54092854", "text": "def bias_filter(self, data, count):\n if count >= len(data):\n return data\n signals = data[:count]\n remains = data[count:]\n bias = np.average(remains)\n return signals - bias", "title": "" }, { "docid": "4802c5df6a9c314a98906724b9e50b30", "score": "0.54090446", "text": "def df_resid(self):\n return self.nobs - self.df_model", "title": "" }, { "docid": "7afda8a026c4bf2a30c8f23b01a1b938", "score": "0.53935736", "text": "def _lstsq_residual(b, n, nrhs):\n raise NotImplementedError", "title": "" }, { "docid": "9ababfa949869fb09f3f23b0372e62f2", "score": "0.5392154", "text": "def exponential_residual(p, x, y):\n # translate parameter values to dict and compute predicted xalue for x\n v = p.valuesdict()\n prediction = exponential(x, v['neutral_cells'], v['fitness'], v['origin'])\n return prediction - y", "title": "" }, { "docid": "fbafdc7c85977df905acd0ce6034cb7a", "score": "0.53873634", "text": "def initialize_bias(self, bias: torch.Tensor):\n assert bias.ndim == 1\n n = bias.shape[0]\n upper_bound = n // 2\n lower_bound = -((n - 1) // 2)\n # Use float64 to prevent precision loss due to cumsum\n x = torch.arange(lower_bound, upper_bound + 1, dtype=torch.float64)\n x1 = x - 0.5\n x2 = x + 0.5\n if self._transform is not None:\n x = self._transform.inverse_transform(x)\n x1 = self._transform.inverse_transform(x1)\n x2 = self._transform.inverse_transform(x2)\n probs = (x2 - x1) / (x**2 + 1)\n probs = probs / probs.sum()\n probs = probs.cumsum(dim=0)\n probs = torch.cat(\n [torch.tensor([1e-20], dtype=torch.float64), probs[:-1]], dim=0)\n with torch.no_grad():\n bias.copy_(((1 - probs) / probs).log().to(torch.float32))", "title": "" }, { "docid": "772b1bb516a7a8fcfe33f5c1b65f57ed", "score": "0.5367908", "text": "def _bottleneck_residual(self, x, in_filter, out_filter, stride,\n activate_before_residual=False):\n if activate_before_residual:\n with tf.variable_scope('common_bn_relu'):\n x = self._batch_norm('init_bn', x)\n x = self._relu(x, self.relu_leakiness)\n orig_x = x\n else:\n with tf.variable_scope('residual_bn_relu'):\n orig_x = x\n x = self._batch_norm('init_bn', x)\n x = self._relu(x, self.relu_leakiness)\n\n with tf.variable_scope('sub1'):\n x = self._conv('conv1', x, 1, in_filter, out_filter / 4, stride)\n\n with tf.variable_scope('sub2'):\n x = self._batch_norm('bn2', x)\n x = self._relu(x, self.relu_leakiness)\n x = self._conv('conv2', x, 3, out_filter / 4, out_filter / 4, [1, 1, 1, 1])\n\n with tf.variable_scope('sub3'):\n x = self._batch_norm('bn3', x)\n x = self._relu(x, self.relu_leakiness)\n x = self._conv('conv3', x, 1, out_filter / 4, out_filter, [1, 1, 1, 1])\n\n with tf.variable_scope('sub_add'):\n if in_filter != out_filter:\n orig_x = self._conv('project', orig_x, 1, in_filter, out_filter, stride)\n x += orig_x\n\n tf.logging.info('image after unit %s', x.get_shape())\n return x", "title": "" }, { "docid": "8d2338abc68bbc8c96aa2897bde8aee2", "score": "0.53640133", "text": "def min_residual(self):\n if \"residual\" in self._conditions:\n return self._conditions[\"residual\"]\n else:\n return None", "title": "" }, { "docid": "244dc97e6bcaa42875c67e0cfb114646", "score": "0.535734", "text": "def backward_pass(self, residual):\n\n # print(self.nodes, residual.shape, self.opv[\"dA\"].shape )\n # print(residual.shape, self.opv[\"X\"].shape, self.weight.shape, self.opv[\"dA\"])\n\n residual = residual * self.opv[\"dA\"] # updater residual term\n self.opv[\"dW\"] = self.opv[\"X\"].T @ residual\n self.opv[\"db\"] = residual.sum(axis=0, keepdims=True)\n\n return residual @ self.weight.T", "title": "" }, { "docid": "0af17a1474a10bff673b72da33bde5e9", "score": "0.53528196", "text": "def _bellman_error_single(self, Q, samples):\n br = self.bellman_residual(Q, samples) ** 2\n return br.sum()", "title": "" }, { "docid": "82ac4ec9650283f21b2dafac8d2776ad", "score": "0.53471106", "text": "def calculate_residuals(model, features, label, predictions):\n df_results = pd.DataFrame({'Actual': label, 'Predicted': predictions})\n df_results['Residuals'] = abs(df_results['Actual']) - abs(df_results['Predicted'])\n \n return df_results", "title": "" }, { "docid": "b7461fbf2a8bdf76a95038d5048d991c", "score": "0.5342469", "text": "def add_bias(self, a):\n return np.insert(a, 0, 1, axis=0)", "title": "" }, { "docid": "b688ddd5b5496180f33ed8daced49546", "score": "0.53401965", "text": "def residual(self, x, network):\n x.requires_grad = True\n Id_tensor = torch.ones(x.shape[0], )\n if torch.cuda.is_available():\n Id_tensor = Id_tensor.cuda()\n\n u = network(x).reshape(-1, )\n grad_u = torch.autograd.grad(u, x, grad_outputs=Id_tensor, create_graph=True)[0]\n grad_t_u = grad_u[:, 0]\n residual = grad_t_u\n for i in range(self.d):\n grad_xdxd_u = torch.autograd.grad(grad_u[:, self.d],\n x,\n grad_outputs=Id_tensor,\n create_graph=True)[0][:, self.d]\n residual = residual - self.param * grad_xdxd_u\n return residual", "title": "" }, { "docid": "911cd22c0e0e71f0aa1319e439b206e5", "score": "0.5337772", "text": "def env_residual_rms(env, xs, ys):\n xs = np.asarray(xs)\n ys = np.asarray(ys)\n\n resids = Polynomial(env)(xs) - ys\n\n return np.std(resids)", "title": "" }, { "docid": "b2c3c152a012c0b65b8c7c6f838218f6", "score": "0.5333079", "text": "def error_residuals(self, level=1, verbose=True):\n \n if verbose:\n print('`error_residuals`: force uncertainties to match residuals')\n \n self.set_sys_err(positive=True, in_place=True)\n\n # residual\n r = np.abs(self.fmodel - self.fnu*self.ext_redden*self.zp)\n \n # Update where residual larger than uncertainty\n upd = (self.efnu > 0) & (self.fnu > self.param['NOT_OBS_THRESHOLD'])\n upd &= (r > level*self.efnu) & (self.fmodel > 0)\n upd &= np.isfinite(self.fnu) & np.isfinite(self.efnu)\n \n self.error_residuals_update = upd\n \n self.efnu[upd] = r[upd]/level #np.sqrt(var_new[upd])", "title": "" }, { "docid": "291f8e4308e03bf944b4bc90de6ab844", "score": "0.53229403", "text": "def gain_bias(self, max_rates, intercepts):\n gain = max_rates / (1 - intercepts)\n bias = -intercepts * gain\n return gain, bias", "title": "" }, { "docid": "af8759ac1daad26d12bd7eb419ca2bc7", "score": "0.53067964", "text": "def temp_bias(self):\n return self.mbmod.temp_bias", "title": "" }, { "docid": "af8759ac1daad26d12bd7eb419ca2bc7", "score": "0.53067964", "text": "def temp_bias(self):\n return self.mbmod.temp_bias", "title": "" }, { "docid": "1c7223b36f33baa2ec57d825deb2ab72", "score": "0.52974176", "text": "def linearity(self):\n return self.maximal_linear_bias_absolute() << 1", "title": "" }, { "docid": "7793c0e693b3b49e446ad3aa01177e6a", "score": "0.52890563", "text": "def residual(self, phi, i, j):\n return abs(phi[i + 1][j] + phi[i - 1][j] + phi[i][j + 1] + phi[i][j - 1] - 4 * phi[i][j])", "title": "" }, { "docid": "c58a6777857a9e3de4d68970bb0e7d07", "score": "0.5288326", "text": "def bias(self, value):\n value = int(value)\n return audioop.bias(self._frames, self._sampwidth, value)", "title": "" }, { "docid": "03325f33fc8e0d22a93128da3f81189e", "score": "0.52715033", "text": "def temp_bias_series(self, data):\n # Need to perform some checks to make sure that the series fits the glacier.\n if not isinstance(data, np.ndarray):\n if not isinstance(data, Sequence):\n raise TypeError(\"The bias data should be a ndarray or a Sequence.\")\n # Cast data to a numpy array.\n data = np.asarray(data)\n if not np.issubdtype(data.dtype, np.number):\n raise TypeError(\"Bias data should be of the type integer or float\")\n # If this passes we can add it.\n else:\n # Create the year array for the df. The last year of the current df\n # will always be the year before the \"start\" of the added bias data.\n next_year = self._temp_bias_series.year.iloc[-1] + 1\n years = np.arange(next_year, next_year + len(data))\n # Create the df\n df = pd.DataFrame({\"year\": years, \"bias\": data})\n # Concat to the old series.\n self._temp_bias_series = pd.concat(\n [self._temp_bias_series, df]\n ).reset_index(drop=True)", "title": "" }, { "docid": "7c273fdbd5fed9fafda28c6e534f0d23", "score": "0.52703464", "text": "def R2_adj(self):\n if not self.model.has_intercept:\n warnings.warn(\"model does not have intercept term, \" +\\\n \"SST inappropriate\")\n d = 1. - self.R2\n d *= ((self.df_total - 1.) / self.df_resid)\n return 1 - d", "title": "" }, { "docid": "80850967ba7de287c591c730f75ace6e", "score": "0.5267747", "text": "def initialize_bias(self, bias: torch.Tensor):\n assert bias.ndim == 1\n n = bias.shape[0]\n upper_bound = n // 2\n lower_bound = -((n - 1) // 2)\n x = torch.arange(lower_bound, upper_bound + 1, dtype=torch.float32)\n x1 = x - 0.5\n x2 = x + 0.5\n if self._transform is not None:\n x = self._transform.inverse_transform(x)\n x1 = self._transform.inverse_transform(x1)\n x2 = self._transform.inverse_transform(x2)\n probs = (x2 - x1) / (x**2 + 1)\n probs = probs / probs.sum()\n with torch.no_grad():\n bias.copy_(probs.log())", "title": "" }, { "docid": "b24bc9588afca736182ac2a130716d5e", "score": "0.52661157", "text": "def bias_variance_tradeoff(X, z, B, model=LinearRegression, **kwargs):\n # split data\n test_size = int(0.35*X.shape[0])\n X_train, X_test, Z_train, Z_test = train_test_split(X, z, test_size=test_size)\n\n # make main model\n beta, beta_var = model(X_train, Z_train, **kwargs)\n Z_hat = X_test@beta\n MSE, R2 = MSE_R2(Z_hat, Z_test)\n\n # bootstrap\n Z_pred = np.zeros((B, Z_test.shape[0]))\n\n for boot in range(B):\n # draw indexes for training\n idx = np.random.randint(0, X_train.shape[0], X_train.shape[0])\n\n # make bootstrap model\n beta, beta_var = model(X_train[idx], Z_train[idx], **kwargs)\n Z_pred[boot] = X_test@beta\n\n # compute statistics (pointwise)\n # Z_diff = Z_test - Z_pred\n Z_pred_mean = np.mean(Z_pred, axis=0)\n\n Bias2_pw = np.mean(np.abs(Z_test - Z_pred_mean), axis=0)**2\n ModVar_pw = np.mean((Z_pred - Z_pred_mean)**2, axis=0)\n\n # bootstrap average\n Bias2 = np.array((np.mean(Bias2_pw), np.std(Bias2_pw)))\n ModVar = np.array((np.mean(ModVar_pw), np.std(ModVar_pw)))\n\n return MSE, Bias2, ModVar", "title": "" }, { "docid": "4b86a9f2603b36b564ee7dfd8fe519b5", "score": "0.5264361", "text": "def regret(self):\n return self.means.max() * np.arange(1, self.T + 1) - np.cumsum(np.array(self.means)[self.arm_sequence])", "title": "" }, { "docid": "91699aeaa08dc7a2ed655c26545243c6", "score": "0.5264255", "text": "def MSE_cost(residual):\n return np.sum(residual**2)/residual.size", "title": "" }, { "docid": "9b7f22241e934b4ff48acd85ec11ae54", "score": "0.5259996", "text": "def meanstatebias(modclim, obsclim):\n\n b = modclim - obsclim\n return b", "title": "" }, { "docid": "8a723f08756f87a1d3b68af4de682819", "score": "0.5254406", "text": "def regret_old(self):\n means = np.array(pd.DataFrame(self.means, self.time_changes).reindex(np.arange(self.T)).fillna(method='ffill'))\n return np.cumsum([means[t].max() - means[t, self.arm_sequence[t]] for t in range(self.T)])", "title": "" }, { "docid": "8a723f08756f87a1d3b68af4de682819", "score": "0.5254406", "text": "def regret_old(self):\n means = np.array(pd.DataFrame(self.means, self.time_changes).reindex(np.arange(self.T)).fillna(method='ffill'))\n return np.cumsum([means[t].max() - means[t, self.arm_sequence[t]] for t in range(self.T)])", "title": "" }, { "docid": "8503f4e8273cb17efd94bb24cc1c37d6", "score": "0.52543306", "text": "def regret_old(self):\n\n means = np.array(pd.DataFrame(self.means, self.time_changes).reindex(np.arange(self.T)).fillna(method='ffill'))\n return np.cumsum([means[t].max() - means[t, self.arm_sequence[t]] for t in range(self.T)])", "title": "" }, { "docid": "8503f4e8273cb17efd94bb24cc1c37d6", "score": "0.52543306", "text": "def regret_old(self):\n\n means = np.array(pd.DataFrame(self.means, self.time_changes).reindex(np.arange(self.T)).fillna(method='ffill'))\n return np.cumsum([means[t].max() - means[t, self.arm_sequence[t]] for t in range(self.T)])", "title": "" }, { "docid": "db72d26b8008759b137cd6e3c61525b5", "score": "0.5248721", "text": "def residuals(self):\n return np.array([step.residual for step in self.data], dtype=np.object)", "title": "" }, { "docid": "29e2b801f97bbb265e14c76fbc3bdb03", "score": "0.524382", "text": "def _eventRs(self, phi, u):\n with np.errstate(all='ignore'):\n return 1/u[0] - self.Rs", "title": "" }, { "docid": "ede9aa0de59cd3bd423e48a9cdad8f7e", "score": "0.52431613", "text": "def jackknife_bias(a, f):\n return (len(a) - 1) * np.mean(jackknife(a, f) - f(a))", "title": "" }, { "docid": "2b3b422a50331c7726dd023f17f45fba", "score": "0.5233232", "text": "def R2_adj(self):\n if not self.model.has_intercept:\n warnings.warn(\"model does not have intercept term, SST inappropriate\")\n d = 1. - self.R2\n d *= ((self.df_total - 1.) / self.df_resid)\n return 1 - d", "title": "" }, { "docid": "eb9c2feb7090ca09f539247b377d8a38", "score": "0.5225588", "text": "def get_norm_penalized_residuals(self, spline, norm_weight = 10, residual_weight = 1):\n \n from scipy.linalg import norm\n\n #the exponent is a magic number and subject to change\n err = (norm_weight*norm(spline(self.xRange))**5) + (residual_weight*sqrt(spline.get_residual()))\n return err", "title": "" }, { "docid": "8dd97353c799d58b4ff52cf66e95964b", "score": "0.5190884", "text": "def relu(self,x):\n x = np.array(x)\n x[x<0] = 0\n return x", "title": "" }, { "docid": "9861d8652129750ce1d8149ae632fed1", "score": "0.51821405", "text": "def residual(params, data):\n alpha = params[\"alpha\"].value\n # note that alpha = mu / (k_B * T)\n model = mathematics.Fermi_integral(alpha, 0.5)\n complexResidue = abs(data - model)\n return complexResidue # noqa: RET504", "title": "" }, { "docid": "dbe86e35bb75cc5d8254bbe09f18b56d", "score": "0.5180117", "text": "def residualPatDB(I):\n I = abs(I)\n residual = np.absolute(diff - BG - sumPat(I))\n return residual", "title": "" }, { "docid": "0333abcafb0db97da1c93d5afee0a2b1", "score": "0.51732785", "text": "def relu(x):\n return np.maximum(x, 0)", "title": "" }, { "docid": "a76b754583819271d88d6c1c79e6bd23", "score": "0.517269", "text": "def mean_residual_deviance(self):\n return self._metric_json[\"mean_residual_deviance\"]", "title": "" } ]
7049b094f1c8a271d841ce0e7df9399a
This is used to submit a new transaction to the chain. Endusers send transactions and aren't concerned about the blocks, per se. This function takes a signed transaction will validate it is cryptographically sound. It will then need to check that there is sufficient balance to make the transfer. If these are good, we will add the transaction to a list of those that will be considered when the next block is mined.
[ { "docid": "cce96330cccdbd3709dd3ae9f1be3f0f", "score": "0.73359656", "text": "def submit_transaction(self, transaction: Transaction) -> None:\n \n # Ensure the transaction is properly signed by the private key\n transaction.validate()\n \n # Make sure the funds exist for the requested transaction \n\n sender_balance = self.get_balance(transaction.sender_address)\n \n if sender_balance < transaction.value:\n raise Exception(f\"Insufficient tokens available ({transaction.value} required, {sender_balance} available)\")\n \n # Transaction checks out--add to the list of our pending transactions!\n self.pending_transactions.append(transaction)", "title": "" } ]
[ { "docid": "cee49b3b4a228ec167664066a94e01d5", "score": "0.64855164", "text": "def add_transaction(transaction_amount, last_transaction=[1]):\n if last_transaction == None:\n last_transaction = [1]\n\n blockchain.append([last_transaction, transaction_amount])", "title": "" }, { "docid": "1e739386150cb08266257f1eae41ff8c", "score": "0.64238834", "text": "def validate_tx(self, tx, public_key):\n\t\tis_correct_hash = False\n\t\tis_signed = False\n\t\tis_funded = True\n\t\tis_all_spent = False\n\t\tconsumed_previous = False\n\n\n\t\t# Check for equal value\n\t\tbalance = self.show_user_balance(tx['sender'], False) #returns balance without printing\n\t\tsent_amt = 0\n\t\tremain_amt = 0\n\t\t# get len of previous trans to index final balance\n\t\tfor usr, amount in tx['receivers'].items():\n\t\t\t# Check for consumed coins amount\n\t\t\tif usr == tx['sender']:\n\t\t\t\tremain_amt = amount\n\t\t\telse:\n\t\t\t\tsent_amt = amount\n\n\t\t# save remaining amount and sender address to senders to check for double spend\n\t\tremain_coins = {}\n\t\tremain_coins['sender'] = tx['sender']\n\t\tremain_coins['amount'] = remain_amt\n\t\tself.senders.append(remain_coins)\n\n\t\t# iterate through senders list and check for double spend\n\t\t# if sender is present with the same remaining balance twice in the same\n\t\t# block, it is a double spend \n\t\tdup_test = []\n\t\tfor i in self.senders:\n\t\t\tif i in dup_test:\n\t\t\t\tconsumed_previous = True\n\t\t\telse:\n\t\t\t\tdup_test.append(i)\n\n\n\t\t# print('Balance :', balance)\n\t\t# print('last_location :', tx['locations'][num_trans-1]['amount'])\n\t\t#print('consumed coins : ', consumed_coins)\n\n\t\tif balance == remain_amt+sent_amt:\n\t\t\tis_all_spent=True\n\t\t# Check if consumed coins are valid\n\n\t\t# Check current balance\n\t\tif balance == 0:\n\t\t\tis_funded = False\n\n\n\t\ttest_hash = ['sender', 'locations', 'receivers']\n\t\ttemp_tx = {}\n\t\tfor item in test_hash:\n\t\t\ttemp_tx[item] = tx[item]\n\t\ttest_tx = self.hash(temp_tx)\n\t\tif test_tx == tx['hash']:\n\t\t\tis_correct_hash = True\n\n\t\tis_signed = ecdsa.verify(tx['signature'],tx['hash'],\n\t\t\t public_key,curve=curve.secp256k1,\n\t\t\t hashfunc=hashlib.sha256)\n\n\t\t# # print the errors\n\t\tif is_signed is not True:\n\t\t\tprint('Invalid Transaction: Invalid signature!!!')\n\t\telif is_correct_hash is not True:\n\t\t\tprint(\"Invalid Transaction: Invalid hash!!\")\n\t\telif is_funded is not True:\n\t\t\tprint('Invalid Transaction: No balance!!!')\n\t\telif is_all_spent is not True:\n\t\t\tprint('Invalid Transaction: Not all coins are spent!!!')\n\t\telif consumed_previous is True:\n\t\t\tprint('Invalid Transaction: Double spend!!!')\n\n\t\tif (is_correct_hash and is_signed and is_funded and is_all_spent and not consumed_previous):\n\t\t\treturn tx", "title": "" }, { "docid": "672e790166dec010b4d39f75cd3da82c", "score": "0.64229107", "text": "def create_transaction():\n global JUSTADDEDTX, NEWTX\n JUSTADDEDTX = True\n NEWTX = Transaction(myWalletAddress, request.form.get('toAddress'), int(request.form.get('amount')))\n NEWTX.signTransaction(PRIVATE_KEY)\n # This extra try/except is neccessary for the bug i havent been able to fix yet\n try:\n try:\n blockchain.addTransaction(NEWTX)\n except ecdsa.EcdsaError:\n blockchain.addTransaction(NEWTX)\n except:\n try:\n blockchain.addTransaction(NEWTX)\n except ecdsa.EcdsaError:\n blockchain.addTransaction(NEWTX)\n return redirect(url_for('pending'))", "title": "" }, { "docid": "69dfde186b6b4202375cbead9a414cc0", "score": "0.6382087", "text": "def send(self, data, *args, **kwargs):\n Node.send(self, data, args, kwargs)\n transaction_data = json.loads(data['transaction'])\n transaction = Transaction(from_wallet=transaction_data['body']['from'],\n to_wallets=transaction_data['body']['to'],\n signature=transaction_data['signature'],\n public_key=transaction_data['public_key'])\n\n if transaction.verify():\n logging.debug(\"Transaction being included in the mining queue. {}\".format(data['transaction']))\n self._transaction_queue_lock.acquire()\n self._transaction_queue.append(transaction)\n self._transaction_queue_lock.release()", "title": "" }, { "docid": "23fb6ec5755d6b05862ea078b8721a86", "score": "0.62743855", "text": "def send_transaction(self):\n self.error_label.hide()\n recipient = self.keystore.resolve_name(self.recipient_edit.text())\n if recipient == 'Error':\n self.error_label.setText('Error: Recipient name could not be found in the keystore!')\n self.error_label.show()\n return\n amount = self.amount_edit.value()\n fee = math.ceil(amount * 0.05)\n\n if amount + fee > int(self.balance_label.text().split(': ')[1]):\n self.error_label.setText('Error: Current balance is not sufficient for this transaction!')\n self.error_label.show()\n return\n\n transaction = self.prepare_transaction(recipient, amount, fee, time.time())\n\n self.chain_queue.put(('new_transaction',\n transaction,\n 'gui'\n ))", "title": "" }, { "docid": "de7939fe54f898d06748a3adb1e09511", "score": "0.62152946", "text": "def is_valid_transaction(transaction):\n if transaction.metadata == MINING_REWARD_INPUT:\n if list(transaction.transaction_data.values()) != [MINING_REWARD]:\n raise Exception(\"This is an invalid mining reward\")\n return \n \n transaction_data_total = sum(transaction.transaction_data.values())\n\n if transaction.metadata['amount'] != transaction_data_total:\n raise Exception(\"Invalid transaction -- metadata does not match transaction data\")\n\n if not Wallet.verify(transaction.metadata['public_key'], transaction.transaction_data, transaction.metadata['signature']):\n raise Exception(\"Invalid signature\")", "title": "" }, { "docid": "77d22336d335f5a7e8e1acb90fe2eecb", "score": "0.6176001", "text": "def submit_transaction(\n transaction: Transaction, wallet: Wallet, client: Client = JSON_RPC_CLIENT\n) -> Response:\n return safe_sign_and_submit_transaction(transaction, wallet, client)", "title": "" }, { "docid": "8be0bed8933e719fb7b7ecdd953389d4", "score": "0.6166408", "text": "def mine(self, min_transaction_count=0, min_commission=-1):\n self._mining = True\n print(\"Starting miner\")\n network = Network(self._quantcoin)\n while self._mining:\n known_blocks = self._quantcoin.blocks()\n number_of_blocks = len(known_blocks)\n self._last_block = known_blocks[-1].digest() if number_of_blocks > 0 else 'genesis_block'\n self._transaction_queue_lock.acquire()\n if min_transaction_count > len(self._transaction_queue):\n logging.info(\"Not enough transactions: {} transactions.\".format(len(self._transaction_queue)))\n self._transaction_queue_lock.release()\n time.sleep(5)\n continue\n if min_commission > 0:\n commission = sum(t.commission() for t in self._transaction_queue)\n if commission < min_commission:\n logging.info(\"Target commission not reached: {} commission reached.\".format(commission))\n self._transaction_queue_lock.release()\n time.sleep(5)\n continue\n\n block = Block(author=self._wallet,\n transactions=self._transaction_queue,\n previous_block=binascii.a2b_base64(self._last_block))\n\n self._transaction_queue = []\n self._transaction_queue_lock.release()\n\n logging.info(\"Starting to mine block.\")\n block_index = self.last_block_index()\n start_nonce = 0\n while (block_index == self.last_block_index() and self.mining() and\n not block.proof_of_work(self._network_difficulty,\n start_nonce, start_nonce + 100)):\n start_nonce += 101\n\n if block.nonce() is not None:\n network.new_block(block)\n logging.info(\"Block found! Block digest: {}; Transactions: {}\"\n .format(block.digest(), len(block.transactions())))\n print(\"Block found! Block digest: {}; Transactions: {}; difficulty: {}\"\n .format(block.digest(), len(block.transactions()), self._network_difficulty))\n\n print(\"Terminating miner...\")", "title": "" }, { "docid": "9cc0ddcace84113e7695bfbcb6f11176", "score": "0.6124058", "text": "def are_valid_transactions_in_chain(chain: list):\n transactions_ids = set()\n\n for i in range(len(chain)):\n block = chain[i]\n\n if block.hash != GENESIS_BLOCK_HASH: \n has_mining_reward = False\n\n for transaction_dict in block.data: \n transaction = Transaction.from_json(transaction_dict)\n \n # unique transaction check\n if transaction.id in transactions_ids:\n raise Exception(f\"Duplicated transaction with id:{transaction.id}\")\n transactions_ids.add(transaction.id)\n\n # check 1 mining reward\n if transaction.input == MINING_REWARD_INPUT:\n if has_mining_reward:\n raise Exception(f\"Multiple mining rewards in block: {block.hash}\")\n has_mining_reward = True\n\n # check transaction validity\n Transaction.is_valid_transaction(transaction)\n\n # check the balance is correct through the blockchain blocks (skip reward miner wallet address)\n if transaction.input['address'] != MINING_REWARD_FROM_ADDRESS:\n historic_blockchain = Blockchain() \n historic_blockchain.chain = chain[0:i]\n historic_balance = Wallet.calculate_balance(historic_blockchain, transaction.input['address'])\n if transaction.input['address'] != MINING_REWARD_FROM_ADDRESS:\n if historic_balance != transaction.input['amount']:\n raise Exception(f\"Invalid input transaction amount not equals historic balance \\\n amount for transaction.id {transaction.id}\")\n\n if not has_mining_reward:\n raise Exception(f\"No mining reward found in block: {block.hash}\")", "title": "" }, { "docid": "b9a39d9e7d00dd83ee5f734b348f0f65", "score": "0.6090797", "text": "def new_transaction(self, sender, recipient, amount):\n\n self.current_transaction.append({\n 'semder':sender,\n 'recipient':recipient,\n 'amount':amount,\n })\n\n return self.last_block['index'] + 1", "title": "" }, { "docid": "08cdcdd3faa628c1845bcea7977df38d", "score": "0.6088494", "text": "def add_transaction(self, recipient, sender, signature, amount=1.0, is_receiving=False):\n # if self.public_key is None:\n # return False\n\n transaction = Transaction(sender, recipient, signature, amount)\n if Verification.verify_transaction(transaction, self.get_balance):\n self.__open_transactions.append(transaction)\n self.save_data()\n\n if not is_receiving:\n for node in self.__peer_nodes:\n url = 'http://{}/broadcast-transaction'.format(node)\n try:\n response = requests.post(url, json={'sender': sender, 'recipient': recipient, 'amount': amount,\n 'signature': signature})\n if response.status_code == 400 or response.status_code == 500:\n print(\"Transaction declined, needs resolving\")\n return False\n except requests.exceptions.ConnectionError:\n continue\n\n return True\n\n return False", "title": "" }, { "docid": "36cee3d43546f38d15b0f98f123c439b", "score": "0.6082461", "text": "def valid_transaction(transaction, blockchain, coins_from_issuer, block_transactions):\r\n # Should we check if the blockchain is valid first?\r\n # get source and destination of transaction\r\n (block_id, transaction_id) = transaction.src_transact_id\r\n if block_id == 0:\r\n if len(blockchain) == 0:\r\n # invalid blockchain\r\n return False\r\n src_str = blockchain[0].block.issr_pub_key\r\n else:\r\n try:\r\n src_str = blockchain[block_id].get_transaction(transaction_id).dst_pub_key\r\n except:\r\n # Some error gettting the transaction either wrong block_id or wrong transaction_id\r\n return False\r\n try:\r\n src_key = load_pem_public_key(bytes.fromhex(src_str), backend=default_backend())\r\n except:\r\n # Some error making the key\r\n return False\r\n dst_str = transaction.dst_pub_key\r\n\r\n # check if transaction signature is valid\r\n if not transaction.verify(src_key):\r\n return False\r\n\r\n # check if transaction is a registration from the issuer or a vote\r\n if src_str == blockchain[0].block.issr_pub_key:\r\n # issuer issuing new vote\r\n if dst_str in coins_from_issuer:\r\n # can never double register\r\n # TODO maybe allow? depends on election type\r\n return False\r\n\r\n # make sure no double register within same block\r\n for transaction_bc in block_transactions:\r\n # TODO shouldn't just check dest, need to also check if source\r\n # is issuer, otherwise prevents A registering and B voting for A\r\n # in the same block\r\n if transaction_bc.dst_pub_key == dst_str and transaction_bc.src_transact_id == (0,0):\r\n return False\r\n return True\r\n\r\n # voter sending vote to candidate\r\n if src_str not in coins_from_issuer:\r\n # never got any coins, so cannot vote\r\n return False\r\n if coins_from_issuer[src_str] == 0:\r\n # already spent coin, so cannot vote\r\n return False\r\n\r\n # make sure no double spend within one block\r\n for transaction_bc in block_transactions:\r\n if transaction_bc.src_transact_id == (block_id, transaction_id):\r\n return False\r\n\r\n return True", "title": "" }, { "docid": "79652c87f9c6378a3cc0734196ff4ee2", "score": "0.6024163", "text": "def add_transaction_to_current_array(self, sender_address, recipient_address, amount, signature):\n transaction = OrderedDict({'sender_address': sender_address,\n 'recipient_address': recipient_address,\n 'amount': amount})\n verify = self.verify_transaction_signature(sender_address, signature, transaction)\n if verify:\n self.transactions.append(transaction)\n return len(self.chain) + 1\n else:\n return False", "title": "" }, { "docid": "120ba8cfde70e99c4d608685a8de5569", "score": "0.5961783", "text": "def new_transaction(self, sender, recipient, amount) -> int:\n tran = {\n 'sender': sender,\n 'recipient': recipient,\n 'amount': amount,\n }\n # At this time the transaction should be broadcast and futher verified.\n self.current_transactions.append(tran)\n return self.last_block['index'] + 1", "title": "" }, { "docid": "34ff9a75300c837f1f8f5758560bc25c", "score": "0.59094536", "text": "def new_transaction(self, fdata):\n # transaction append\n cdata = bytes(fdata['data'])\n sign = bytes(fdata['sign'])\n\n # Error\n if not rsa.verify(cdata, sign, self.auth['pubkey']):\n pass\n\n data = rsa.decrypt(cdata, self.prikey)\n data = literal_eval(data.decode('utf8'))\n rand_id = data['rand_id']\n candidate = data['candidate']\n\n tx = { 'sender': rand_id, 'receiver': candidate }\n if tx in self.transactions_buffer:\n return False\n\n if self.valid_transaction(rand_id):\n self.transactions_buffer.append(tx)\n return True\n else:\n return False", "title": "" }, { "docid": "226be53d552f7edd66a718817dc067f7", "score": "0.59040284", "text": "def new_transaction(self, sender, recipient, amount):\n self.current_transactions.append({\n 'sender': sender,\n 'recipient': recipient,\n 'amount': amount\n })\n return self.last_block['index'] + 1", "title": "" }, { "docid": "f6a9c03b9bdadb5e0dbed1e8f364fc42", "score": "0.5892688", "text": "def transaction_checking(grid: MultiCircuit, transactions: Transactions, agent_id_to_grid_id, dt=1):\n\n # declare the final transactions list\n final_transactions = Transactions()\n\n for transaction in transactions:\n\n hash = transaction_mine(grid, transaction, agent_id_to_grid_id, dt=dt)\n\n # if there are no errors\n if hash is not None:\n # modify the transaction, adding a hash based on the voltage\n transaction.hash = hash\n\n # store the transaction\n final_transactions.append(transaction)\n else:\n # since the transactions are sorted, if a critical state is found the rest are curtailed\n return final_transactions\n\n # return the approved transactions\n return final_transactions", "title": "" }, { "docid": "a912ed602d6abab82ec69a72af3d308a", "score": "0.58813524", "text": "def test_wallet_sign_transaction_positive(self):\n private_key_paths = {\n EthereumCrypto.identifier: ETHEREUM_PRIVATE_KEY_PATH,\n FetchAICrypto.identifier: FETCHAI_PRIVATE_KEY_PATH,\n }\n wallet = Wallet(private_key_paths)\n signed_transaction = wallet.sign_transaction(\n EthereumCrypto.identifier,\n transaction={\"gasPrice\": 50, \"nonce\": 10, \"gas\": 10},\n )\n assert type(signed_transaction) == dict, \"No signed transaction returned\"", "title": "" }, { "docid": "d33ef2a655d1f0346bdc9fd56eab678f", "score": "0.58786803", "text": "def create_new_transaction(self, sender, recipient, amount):\r\n\r\n self.transactions.append({\r\n 'sender': sender,\r\n 'recipient': recipient,\r\n 'amount': amount,\r\n })\r\n\r\n return self.last_block['index'] + 1", "title": "" }, { "docid": "f25f53fd6fb7507ae22941619f48f6ae", "score": "0.58625984", "text": "def verify_transaction(transaction):\n \"\"\" Arguments:\n :transaction: a dictionary including sender, recipient, and amount to be sent\n \"\"\"\n sender_balance = get_balance(transaction['sender'])\n return sender_balance >= transaction['amount']", "title": "" }, { "docid": "225519be6947e14f0b7a83324ccb68eb", "score": "0.58570814", "text": "def submit_txn_mint():\n\n consumption = 0 # MINTED\n\n post_object = {\n 'consumption': consumption,\n }\n return post_txn(post_object)", "title": "" }, { "docid": "acd467fce61ac58ad6159cbd998a58ce", "score": "0.5834873", "text": "def mine_new_block(blockchain, lst):\n time.sleep(2)\n block = Block()\n counter = 0\n for t in lst:\n block.add_transaction(t)\n miner = Miner()\n updated_nonce = miner.proof_of_work(block.get_hash(), difficulty)\n block.block_header.nonce = updated_nonce\n blockchain.add_block(block)\n print(\"Current blockchain length: \", len(blockchain.blocks))\n print(\"Current # of votes in the blockchain:\", len(blockchain.get_tickets()))", "title": "" }, { "docid": "f6174eeca1ec634eae936925a84730ed", "score": "0.57867986", "text": "def commit(self):\n result = self.valid_block()\n\n if result:\n self.status[3][0].add(self.node_identifier)\n else:\n self.status[3][1].add(self.node_identifier)\n\n data = {'result': result, 'phase': 2, 'index': self.current_block.get('index')}\n\n sign = rsa.sign(str(data).encode('utf8'), self.prikey, 'SHA-1')\n headers = {'Content-Type': 'application/json'}\n\n for node in self.nodes:\n url = 'http://' + self.nodes[node][0] + '/consensus' # idx 0 = addr\n print(\"commit: from\", self.node_identifier, \"to\", url)\n cdata = rsa.encrypt(str(data).encode('utf8'), self.nodes[node][1]) # idx 1 = pubkey\n fdata = {'data': list(cdata), 'sign': list(sign), 'id': self.node_identifier}\n jdata = json.dumps(fdata)\n\n threading.Thread(target=self.block_thread, args=(url, headers, jdata)).start()\n self.is_block = False\n print(\"commit thread_id : \" + str(self.thread_id))\n signal.pthread_kill(self.thread_id,signal.SIGUSR1)\n return result", "title": "" }, { "docid": "13624525a857fd21fa2e09d9029e0e9c", "score": "0.57851547", "text": "def sign_and_execute(self, w3fun, gas, signer):\n if not signer:\n raise PermissionError(\"Cannot sign without a signer with a private key\")\n tx_signed = signer.sign(w3fun, gas)\n self.w3.eth.send_raw_transaction(tx_signed.rawTransaction)\n tx_hash = self.w3.toHex(self.w3.keccak(tx_signed.rawTransaction))\n current_gas_price = self.get_gas_price()\n estimated_cost = gas * current_gas_price\n print(\"Transaction sent, paying up to \" + str(round(estimated_cost, 6)) + \" FTM, id: \" + tx_hash)\n\n if self.txmode == \"batch\":\n self.pending_transactions.append({\"tx_hash\": tx_hash, \"gas_price\": current_gas_price})\n return {\"status\": \"pending\", \"hash\": tx_hash, \"receipt\": None}\n else:\n # Check receipt status\n print(\"Waiting for receipt...\")\n tx_receipt = self.wait_for_tx(tx_hash, gas_price_for_log = current_gas_price)\n tx_status = \"success\" if tx_receipt.status == 1 else \"failure\"\n return {\"status\": tx_status, \"hash\": tx_hash, \"receipt\": tx_receipt}", "title": "" }, { "docid": "fa4d610b9b32e41b7544beb6a73c3b50", "score": "0.57668704", "text": "def verify_transaction(transaction):\n tx_message = json.dumps(transaction.to_dict(include_signature=False))\n if isinstance(transaction, GenesisTransaction):\n # TODO: We should probably be more careful about validating genesis transactions\n return True\n \n # Verifying input transactions\n for tx in transaction.inputs:\n if not verify_transaction(tx.transaction):\n logging.error(\"Invalid parent transaction\")\n return False\n \n # Verifying a single wallet owns all the inputs\n first_input_address = transaction.inputs[0].parent_output.recipient\n for txin in transaction.inputs[1:]:\n if txin.parent_output.recipient != first_input_address:\n logging.error(\n \"Transaction inputs belong to multiple wallets (%s and %s)\" %\n (txin.parent_output.recipient, first_input_address)\n )\n return False\n \n if not verify_signature(first_input_address, tx_message, transaction.signature):\n logging.error(\"Invalid transaction signature, trying to spend someone else's money ?\")\n return False\n \n # Here compute_fee is called to trigger an assert if output sum is great than input sum. Without this,\n # a miner could put such an invalid transaction.\n compute_fee(transaction.inputs, transaction.outputs)\n \n return True", "title": "" }, { "docid": "7772bc21ca99850720349be38a8e927a", "score": "0.57635313", "text": "def mine(self):\n if not self.new_transactions:\n return False\n\n for transaction in self.new_transactions:\n last_block = self.last_block()\n new_block = Block(last_block.index + 1, transaction, time.time(), last_block.hash)\n proof = self.find_proof_of_work(new_block)\n self.add_block(new_block, proof)\n\n self.new_transactions = []\n return True", "title": "" }, { "docid": "11cf5464b44e906110c744725c0806a0", "score": "0.57402414", "text": "def add_transaction_to_list(self, tx):\n k = rlp.encode(self.transaction_count)\n self.transactions.update(k, rlp.encode(tx))\n r = self.mk_transaction_receipt(tx)\n self.receipts.update(k, rlp.encode(r))\n self.bloom |= r.bloom # int\n self.transaction_count += 1", "title": "" }, { "docid": "f15d44692d4b6881b6925438600fccc0", "score": "0.57194644", "text": "def process_incoming_block(self, block: Block, term: int, leader_id: str, block_history: list):\n # process block returns true if it is valid and added to blockchain and ledger\n if term == self.term and block.index == self.blockchain.get_last_block().index + 1: # and self.verify_all_signatures(block):\n # if node is a follower\n if self.node_id != leader_id:\n # Check if there is enough stake\n if block.verify_proof_of_stake():\n self.add_to_blockchain(block, leader_id)\n\n else:\n # verify transactions through ledger\n valid_boolean, change_or_bad_tx = self.ledger.verify_transaction(block.transactions, block.index)\n # Check node that sent the block does not exceed generation rate. Otherwise, no block is added.\n # This prevents a node from sending too many blocks (i.e., taking control of the chain).\n if (self.leader_counts[leader_id] / self.term) < self.probability:\n if valid_boolean and self.sig not in block.signatures.keys():\n print('Signing block with stake: ', sum([tx.amount for tx in block.transactions]) / 2 + .1)\n block.signatures[self.sig] = sum([tx.amount for tx in block.transactions]) / 2 + .1\n block_history.append(self.node_id)\n contents = {'block': str(block), 'leader_id': leader_id, 'term': str(term),\n 'history': json.dumps(block_history)}\n\n if block.verify_proof_of_stake():\n self.send_peer_msg(type='Block', contents=contents, peer=leader_id)\n else:\n options = [peer for peer in self.peers if peer not in block_history]\n to_node = options[random.randrange(len(options))]\n self.send_peer_msg(type='Block', contents=contents, peer=to_node)\n # Node is leader\n else:\n # Check if there is enough stake\n if block.verify_proof_of_stake():\n self.add_to_blockchain(block, leader_id)\n rewardees = [self.peer_signatures[sig] for sig in block.signatures.keys()]\n rewardees.append(self.node_id)\n print('Reward these hard working folx: ', rewardees)\n for peer in rewardees:\n reward_tx = str(Transaction(_to=peer, _from='reward', amount=1))\n for destination in ['0', '1', '2', '3']:\n self.messenger.send({'type': 'Transaction', 'contents': reward_tx}, destination)\n\n # if stake was sufficient, block will be complete, otherwise block will go get more signatures\n else:\n print(\"leader \", self.node_id, \"needs more signatures\")\n self.send_blockchain_msg(type='Block',\n contents={'block': str(block), 'leader_id': leader_id, 'term': term,\n 'history': json.dumps(block_history)})", "title": "" }, { "docid": "f088eb3fceda23d382c43dfca9b3c435", "score": "0.568973", "text": "def verify_transaction(transaction, get_balances, check_funds=True):\n sender_balance = get_balances()\n if check_funds:\n if transaction.amount > sender_balance:\n logger.warning(\n 'Transaction amount higher than available funds || Transaction: {} & Available funds: {}'\n .format(transaction, sender_balance))\n return False\n if not Wallet.verify_transaction_signature(transaction):\n logger.warning('Invalid transaction signature.')\n return False\n return True", "title": "" }, { "docid": "9f816186ba8e2d45b58efd18031f65bf", "score": "0.5680235", "text": "def addTransaction(self, sender, receiver, amount=0.90):", "title": "" }, { "docid": "229e88a5da02b537cfcb3bc8879a5df7", "score": "0.5674656", "text": "def add_transaction(\n transactions,\n txid,\n selected_transactions,\n deselected_transactions,\n selected_weight,\n block_weight,\n):\n if transactions[txid].parents:\n flag = 0\n for parent in transactions[txid].parents:\n if parent not in selected_transactions:\n flag = 1\n break\n if flag == 0:\n if selected_weight + transactions[txid].weight < block_weight:\n selected_weight += transactions[txid].weight\n selected_transactions.append(txid)\n\n for child in transactions[txid].childs:\n if child in deselected_transactions:\n (\n selected_transactions,\n deselected_transactions,\n selected_weight,\n ) = add_transaction(\n transactions,\n child,\n selected_transactions,\n deselected_transactions,\n selected_weight,\n block_weight,\n )\n else:\n deselected_transactions.append(txid)\n else:\n if selected_weight + transactions[txid].weight < block_weight:\n selected_weight += transactions[txid].weight\n selected_transactions.append(txid)\n return (selected_transactions, deselected_transactions, selected_weight)", "title": "" }, { "docid": "b354770621ebc31ab58420e80002967f", "score": "0.5650032", "text": "def add_transaction(sender, receiver, amount, timestamp):\n Transaction.objects.insert([Transaction(sender=sender,\n receiver=receiver,\n amount=amount,\n timestamp=timestamp),\n Transaction(sender=receiver,\n receiver=sender,\n amount=-amount,\n timestamp=timestamp)])", "title": "" }, { "docid": "ff95b772cd1ab1f0e379f3b680f04861", "score": "0.56478363", "text": "def validate_txn(txn: Transaction,\n as_coinbase: bool = False,\n siblings_in_block: Iterable[Transaction] = None,\n allow_utxo_from_mempool: bool = True,\n ) -> Transaction:\n txn.validate_basics(as_coinbase=as_coinbase)\n\n available_to_spend = 0\n\n for i, txin in enumerate(txn.txins):\n utxo = utxo_set.get(txin.to_spend)\n\n if siblings_in_block:\n utxo = utxo or find_utxo_in_list(txin, siblings_in_block)\n\n if allow_utxo_from_mempool:\n utxo = utxo or find_utxo_in_mempool(txin)\n\n if not utxo:\n raise TxnValidationError(\n f'Insufficient Funds, poor guy, Orphan block. ' ,\n to_orphan=txn)\n\n if utxo.is_coinbase and \\\n (get_current_height() - utxo.height) < \\\n Params.COIN_MATURITY:\n raise TxnValidationError(f'Coin base inputs not matured. ')\n\n try:\n validate_signature_for_spend(txin, utxo, txn)\n except TxUnlockError:\n raise TxnValidationError(f'{txin} is not a valid spend of {utxo}')\n\n available_to_spend += utxo.value\n\n if available_to_spend < sum(o.value for o in txn.txouts):\n raise TxnValidationError('Spend value is more than available')\n\n return txn", "title": "" }, { "docid": "c005e3b17ffb9ba549402096f5e97f92", "score": "0.5646626", "text": "def __invoke_makeVote(self, transaction, block, params):\n self.logd('__invoke_makeVote() start')\n\n subject = params['subject']\n items = params['items']\n itemsLen = len(items)\n createAddress = params['createAddress']\n set_balance_str(self.__db, 'subject', subject)\n set_balance_str(self.__db, 'createAddress', createAddress)\n set_balance_str(self.__db, 'itemCnt', str(itemsLen))\n\n idx = 0\n\n while idx < itemsLen :\n set_balance_str(self.__db, 'item_' + str(idx), items[idx])\n set_balance(self.__db, 'item_' + str(idx) + '_cnt', 0)\n idx = idx + 1\n\n self.logd('__invoke_makeVote() end')", "title": "" }, { "docid": "13eeae2d24a600d4fda5ce219d8d3b70", "score": "0.56337076", "text": "def add_transaction(recepient, sender = owner, amount = 1.0):\n \"\"\" Arguments:\n :sender: the sender of the values\n :recipient: the receiver of the values\n :amount: the value being sent (default is 1.0)\n \"\"\"\n transaction = {\n 'sender': sender,\n 'recipient': recepient,\n 'amount': amount\n }\n if verify_transaction(transaction):\n open_transactions.append(transaction)\n participants.add(sender)\n participants.add(recipient)\n return True\n return False", "title": "" }, { "docid": "27eb24e096ab9800cb34c644a76c7b75", "score": "0.5624359", "text": "def perform_transaction(self, transaction):\n\n self.cash = db.execute('SELECT cash FROM users WHERE uid=:u', u=self.uuid)[0]['cash']\n\n if transaction.action : # buying\n if transaction.price * float(transaction.quantity) > self.cash:\n raise Exception(f'Not enough cash to buy {transaction.quantity} shares of {transaction.symbol} at ${transaction.price:.2f}.')\n else:\n amount_to_sell = self.get_quantity(transaction.symbol)\n if amount_to_sell < transaction.quantity:\n raise Exception(f'Not enough shares of {transaction.symbol} to sell.')\n \n transaction.student = self\n\n # ADD TRANSACTION TO SQL TABLE\n count_transactions = db.execute(\"SELECT COUNT(1) FROM transactions\")[0]['count']\n uid = self.uuid\n tid = gen_random_string(8)\n ts = datetime.now()\n db.execute(\"INSERT INTO transactions (pk, tid, uid, type, symbol, price, quantity, buy, ts) VALUES (:c, :t, :u, :ty, :s, :p, :q, :a, :time)\", c=count_transactions, t=tid, u=uid, ty=transaction.type, s=transaction.symbol, p=transaction.price, q=transaction.quantity, a=transaction.action, time=ts)\n db.execute(\"COMMIT\")\n\n # change student cash\n\n if transaction.action : # buying\n self.cash = self.cash - transaction.price * float(transaction.quantity)\n else :\n self.cash = self.cash + transaction.price * float(transaction.quantity)\n db.execute(\"UPDATE users SET cash=:c WHERE uid=:u\", c=self.cash, u=uid)", "title": "" }, { "docid": "4324a30fdacb2d4343018013a10b7868", "score": "0.56159693", "text": "def createTransaction(self, password, destList):\n self.checkUpdate()\n newAddr = Address()\n newAddr.encryptPrivateKey(password)\n total = sum([ i[1] for i in destList ])\n if total <= self.count:\n destList.append( (str(newAddr), (self.count - total)) )\n transac = Transaction(self.addr.public(), destList)\n self.addr.decryptPrivateKey(password)\n transac.sign(self.addr)\n debug('valid: '+('True' if transac.is_signed() else 'False'))\n self.addr.encryptPrivateKey(password)\n if not self.relay.submit_transaction(transac):\n return False\n self.addrList.append(newAddr)\n self.addr = newAddr\n add_address(self.user_ID, self.addr, len(self.addrList)-1)\n return True\n else:\n return False", "title": "" }, { "docid": "8fcaeb48b54121628edc266c49663716", "score": "0.5611124", "text": "def validate(self, blockchain_state):\n if self.signature is None:\n raise ValidationError(\"Transaction isn't singed\")\n sender_wallet = blockchain_state.wallets.get(self.sender, None)\n if sender_wallet is None or sender_wallet.balance < (self.amount + self.fee):\n if not Config.IS_TEST_NET:\n raise InsufficientBalanceError()\n if sender_wallet is not None and sender_wallet.nonce_counter >= self.nonce:\n raise DuplicateNonceError(\"Wallet nonce is grater then transaction nonce\")\n if type(self.amount) not in (int, float) or self.amount <= 0:\n raise ValidationError(\"amount must be number grater then 0\")\n if type(self.fee) not in (int, float) or self.fee <= 0:\n raise ValidationError(\"fee must be number grater then 0\")\n if not self.is_signature_verified():\n raise ValidationError(\"transaction signature is not valid\")", "title": "" }, { "docid": "c25f66bf43c9ec521a83c3a2ce2cb198", "score": "0.5591086", "text": "def add_to_blockchain(self, hash_uid, pub_key):\n print(\">>> Sending request to bc to add new txn\")\n self.send_message_to_bc(bc_msg.new_txn(hash_uid, \"\"), pub_key)\n # Adding a new transaction implicitly means to mine it.\n # print(\">>> Sending request to bc to mine the new txn\")\n # self.send_message_to_bc(bc_msg.mine()) ", "title": "" }, { "docid": "f612642ef647bce059c2fa34a3bc904f", "score": "0.55888283", "text": "def verify(self):\n transaction = self.to_bytes()\n message = Message(transaction, self.signature, self.senderPublicKey)\n is_valid = message.verify()\n if not is_valid:\n raise ArkInvalidTransaction('Transaction could not be verified')", "title": "" }, { "docid": "03ef29a7bbd0042e5066285cf16c89f8", "score": "0.5582147", "text": "def new_block(self, amount, proof, time_proof, previous_hash):\n block = {\n 'index': len(self.chain)+1,\n 'timestamp': strftime(\"%a, %d %b %Y %H:%M:%S\", gmtime()),\n 'transaction': amount,\n 'proof' : proof,\n 'time_proof' : time_proof,\n 'previous_hash': previous_hash,\n }\n\n # Reset the transaction \n self.transactions = 0\n # Add the hash of the block inside the block\n block['hash'] = self.hashBlock(block)\n # We must to make sure that the new block is correct\n if self.valid_block(block) :\n # The block is added to the wainting list of blocks\n self.putting_block(block)\n else :\n print(\"The block \" + str(block['index']) + \" with an amount of \" + str(block['transaction']) + \" is not valid\")\n return block", "title": "" }, { "docid": "f984beaa47ca83ac5afeb209e19b78df", "score": "0.5559299", "text": "def mine_block():\n last_block = blockchain[-1]\n hashed_block = hash_block(last_block)\n reward_transaction = {\n 'sender': 'MINING',\n 'recipient': owner,\n 'amount': MINING_REWARD\n }\n copied_transactions = open_transactions[:]\n copied_transactions.append(reward_transaction)\n block = {\n 'previous_hash': hashed_block,\n 'index': len(blockchain),\n 'transactions': copied_transactions\n }\n blockchain.append(block)\n return True", "title": "" }, { "docid": "1177099baf92176deb55a59f4043900b", "score": "0.555845", "text": "def new_transaction(self,sender,recipator,amout):\n self.current_transactions.append({\n \"sender\":sender,\n \"recipator\":recipator,\n \"amout\":amout\n })\n return self.last_block['index'] + 1", "title": "" }, { "docid": "dd2637e37007fc065f19ab86a1fdb82e", "score": "0.5550398", "text": "def sign_transaction(self, private_key):\n self.sender_signature = CryptoUtils.sign_transaction(private_key, self.transaction_hash)", "title": "" }, { "docid": "c8675039a861c36eab665fcf642fb5a3", "score": "0.5540035", "text": "def send_signed_transaction(\n self, is_waiting_for_confirmation: bool, tx_signed: Any\n ) -> str:\n tx_signed = cast(AttributeDict, tx_signed)\n hex_value = self._api.eth.sendRawTransaction(tx_signed.rawTransaction)\n tx_digest = hex_value.hex()\n logger.debug(\"TX digest: {}\".format(tx_digest))\n if is_waiting_for_confirmation:\n while True:\n try:\n self._api.eth.getTransactionReceipt(hex_value)\n logger.debug(\"Transaction validated - exiting\")\n break\n except web3.exceptions.TransactionNotFound: # pragma: no cover\n logger.debug(\"Transaction not found - sleeping for 3.0 seconds\")\n time.sleep(3.0)\n return tx_digest", "title": "" }, { "docid": "a9e8334b9b6c3215aa32a53e2aa7990d", "score": "0.552862", "text": "def create_new_transaction(\n sender_name: str,\n sender_public_key: str,\n recipient_name: str,\n recipient_public_key: str,\n amount: int,\n signature: str,\n) -> Transaction:\n transaction_data = {\n \"sender_public_key\": sender_public_key,\n \"recipient_public_key\": recipient_public_key,\n \"amount\": int(amount),\n }\n\n validate_transaction(sender_public_key, signature, transaction_data)\n\n new_transaction = Transaction.objects.create(\n sender_name=sender_name,\n sender_public_key=sender_public_key,\n recipient_name=recipient_name,\n recipient_public_key=recipient_public_key,\n amount=amount,\n signature=signature,\n )\n\n return new_transaction", "title": "" }, { "docid": "29eb5a1d6aacd5066c0cf2dd32265d9e", "score": "0.55279255", "text": "def valid_proof( transactions, last_hash, proof ):\n # Create a string with all the hash inputs\n guess = (str([tx.to_ordered_dict() for tx in transactions]) + str(last_hash) + str(proof)).encode()\n # Hash the string\n # IMPORTANT: This is NOT the same hash as will be stored in the previous_hash. It's a not a block's hash. It's only used for the proof-of-work algorithm.\n guess_hash = hash_string_sha256( guess )\n # Only a hash (which is based on the above inputs) which starts with two 0s is treated as valid\n # This condition is of course defined by you. You could also require 10 leading 0s - this would take significantly longer (and this allows you to control the speed at which new blocks can be added)\n return guess_hash[0:2] == '00'", "title": "" }, { "docid": "b79ef81548cc9a2439d4385ccd40b4df", "score": "0.552524", "text": "def new_tx(self, sender, receiver, amount):\n self.tx.append({\n 'sender': sender,\n 'receiver': receiver,\n 'amount': amount,\n })\n\n return self.last_block['index'] + 1", "title": "" }, { "docid": "c62bb9caa7ca28e560795811d6a35b06", "score": "0.55182964", "text": "def new_transaction(self, content):\n\n index = max(self.graph.keys()) + 1\n # edges = self.tip_selection(list(self.graph.keys()))\n\n edge1 = self.MCMC()\n edge2 = self.MCMC()\n edges = [edge1, edge2]\n\n for e in edges:\n tips = self.graph[e]['edges']\n new_edge = []\n checking = e\n while not (self.valid_proof(self.graph[tips[0]]['proof'], self.graph[tips[1]]['proof'],\n self.graph[checking]['proof']) and\n self.check_balance()):\n # print(str(self.valid_proof(self.graph[tips[0]]['proof'], self.graph[tips[1]]['proof'], self.graph[checking]['proof'])) + \" \" +\n # str(self.check_balance(checking)))\n print(checking)\n print(self.get_balance())\n del self.graph[checking] # Delete from graph <-- radical maybe not necessary step\n new_edge = self.MCMC()\n checking = new_edge\n tips = self.graph[new_edge]['edges']\n if edges.count(edges[0]) == len(edges) and new_edge: # if list elements are the same\n edges = [new_edge, new_edge]\n break\n elif new_edge:\n edges[edges.index(e)] = new_edge\n\n data = {'edges': edges,\n 'timestamp': time(),\n 'transaction': content,\n 'proof': self.proof_of_work(self.graph[edges[0]], self.graph[edges[1]]),\n 'previous_hash_1': self.hash(self.graph[edges[0]]),\n # TODO: Eigentlich unnötig die previous hashs. Raus?\n 'previous_hash_2': self.hash(self.graph[edges[1]])\n }\n self.graph[index] = data", "title": "" }, { "docid": "d625fc16dca77a67aa34594d55ed1b34", "score": "0.5512029", "text": "def __invoke_sendTransaction(self, transaction, block, params):\n self.logd('__invoke_sendTransaction() start')\n\n from_address = params['from']\n to_address = params['to']\n value = str_to_int(params['value'])\n\n if value <= 0:\n raise IcxError(Code.INVALID_PARAMS, f'value({value}) is invalid.')\n\n from_balance = get_balance(self.__db, from_address)\n if from_balance < value:\n raise IcxError(Code.INVALID_PARAMS,\n f'from_balance({from_balance}) is less than transaction value({value})')\n\n to_balance = get_balance(self.__db, to_address)\n\n from_balance -= value\n to_balance += value\n\n set_balances(self.__db,\n {from_address: from_balance, to_address: to_balance})\n\n self.logd('__invoke_sendTransaction() end')", "title": "" }, { "docid": "62a0cf5378a3718f5063837c545833c9", "score": "0.5504609", "text": "def broadcast_signed_transfer_tx(self, signed_tx, name=DEFAULT_WALLET, enckey=None):\n return self.client.call('wallet_broadcastSignedTransferTransaction', [name, enckey or get_enckey()], signed_tx)", "title": "" }, { "docid": "9defcf342860cf7c40cff1507ae8639f", "score": "0.547933", "text": "def spend_peer_send_tx(peer_send_tx_id, trade_id):\n\n print \"Spending coins from trade \" + trade_id\n audit = TradeDao(trade_id)\n\n offer = audit.load_json('2_offer.json')\n offer_currency_code = NETWORK_CODES[offer['offer_currency_hash']]\n private_key_a = audit.load_private_key('2_private_key.txt')\n secret = audit.load_secret('2_secret.txt')\n\n # Connect to the wallet\n altcoin.SelectParams(offer['offer_currency_hash'])\n proxy = altcoin.rpc.AltcoinProxy(service_port=config['daemons'][offer_currency_code]['port'], btc_conf_file=config['daemons'][offer_currency_code]['config'])\n fee_rate = CFeeRate(config['daemons'][offer_currency_code]['fee_per_kb'])\n\n # Monitor the block chain for TX3 being relayed\n statbuf = os.stat(audit.get_path('4_tx2.txt'))\n print \"Waiting for TX \" + b2lx(peer_send_tx_id) + \" to confirm\"\n peer_send_tx = wait_for_tx_to_confirm(proxy, audit, peer_send_tx_id, statbuf.st_mtime)\n\n # TODO: Verify the secret we have matches the one expected; this is covered by\n # verify script later, but good to check here too\n\n # Get an address to pull the funds into\n own_address = proxy.getnewaddress(\"CATE \" + trade_id)\n\n # Create a new transaction spending TX3, using the secret and our private key\n own_receive_tx = build_receive_tx(proxy, peer_send_tx, private_key_a, secret, own_address, fee_rate)\n\n # Send the transaction to the blockchain\n bitcoin.core.scripteval.VerifyScript(own_receive_tx.vin[0].scriptSig, peer_send_tx.vout[0].scriptPubKey, own_receive_tx, 0, (SCRIPT_VERIFY_P2SH,))\n try:\n proxy.sendrawtransaction(own_receive_tx)\n except bitcoin.rpc.JSONRPCException as err:\n if err.error['code'] == -25:\n print \"Send transaction \" + b2lx(peer_send_tx_id) + \" for trade \" + trade_id + \" has already been spent\"\n else:\n raise err\n ready_transactions.pop(peer_send_tx_id, None)\n\n # Add a file to indicate the TX is complete\n audit.save_text('6_complete.txt', b2lx(own_receive_tx.GetHash()))", "title": "" }, { "docid": "8fc70512763845734020f79c960c7e85", "score": "0.54743063", "text": "def mine_transactions():\n global JUSTADDEDTX\n JUSTADDEDTX = False\n blockchain.minePendingTransactions(myWalletAddress)\n # logger.info({'status': 200, 'message': 'Mined'})\n return jsonify({'status': 200, 'message': 'Mined'})", "title": "" }, { "docid": "cb141453963be7c18e07a4cdb47a3109", "score": "0.5473182", "text": "def check_valid_transaction_request(tx):\n newfrombalance = tx.from_member_obj.getbalance() + tx.amount\n if tx.group_obj.individual_credit_limit < newfrombalance:\n raise TxExceedsLimits(tx.group_obj.individual_credit_limit, newfrombalance)\n\n newtobalance = tx.to_member_obj.getbalance() - tx.amount\n if newtobalance < tx.group_obj.individual_debt_limit:\n raise TxExceedsLimits(tx.group_obj.individual_debt_limit, newtobalance)\n\n return True", "title": "" }, { "docid": "123a75770c36ba5ca0b5761016928315", "score": "0.54688627", "text": "def valid_chain(self, chain):\r\n last_block = chain[0]\r\n current_index = 1\r\n\r\n while current_index < len(chain):\r\n block = chain[current_index]\r\n #print(last_block)\r\n #print(block)\r\n #print(\"\\n-----------\\n\")\r\n # Check that the hash of the block is correct\r\n if block['previous_hash'] != self.hash(last_block):\r\n return False\r\n\r\n # Check that the Proof of Work is correct\r\n transactions = block['transactions']\r\n # Need to make sure that the dictionary is ordered. Otherwise we'll get a different hash\r\n transaction_elements = ['sender', 'value']\r\n transactions = [OrderedDict((k, transaction[k]) for k in transaction_elements) for transaction in transactions]\r\n\r\n if not self.valid_proof(transactions, block['previous_hash'], block['nonce']):\r\n return False\r\n\r\n last_block = block\r\n current_index += 1\r\n\r\n return True", "title": "" }, { "docid": "8e6afaf2dd319a5d902331d9d6fe1b3f", "score": "0.54678607", "text": "def mine(self, pointer=None):\n\t\tif not self.transaction_pending:\n\t\t\treturn False #\"no pending transaction awaiting\"\n\n\t\tnew_block = Block(index=self.chain[-1].index+1,\n\t\t\t\t\t\ttimestamp=time.time(),\n\t\t\t\t\t\ttransaction=self.transaction_pending,\n\t\t\t\t\t\tpointer=pointer,\n\t\t\t\t\t\tprevious_hash=self.chain[-1].hash)\n\n\t\tproof = self.proof_of_work(new_block)\n\t\tself.add_new_block(new_block, proof)\n\n\t\tself.transaction_pending = list()\n\t\tannounce_new_block(new_block) #announce to the network\n\t\treturn new_block.index", "title": "" }, { "docid": "7e9c18fa2a7e397077da44396c5598b9", "score": "0.54591036", "text": "def add_trax(self, recipient, sender,signature, amount = 1.0, is_recieving = False):\n # temp_trax = {\n # 'sender': sender, \n # 'recipient': recipient, \n # 'amount': amount\n # }\n if self.public_key == None:\n return False\n\n temp_trax = Trax(sender, recipient, signature, amount)\n\n #Verify transaction before adding to open transactions \n if VerficationHelper.verify_trax(temp_trax, self.get_balance):\n self.__open_trax.append(temp_trax)\n self.save_data()\n if not is_recieving:\n for node in self.__peer_nodes:\n url = 'http://{}/broadcast_trax'.format(node)\n try:\n response = requests.post(url, json = {\n 'tx_sender': sender,\n 'tx_recipient': recipient,\n 'tx_amount': amount,\n 'signature': signature})\n if response.status_code == 400 or response.status_code == 500:\n print('Transaction declined, need resolving!')\n return False\n except requests.exceptions.ConnectionError:\n continue\n return True\n return False", "title": "" }, { "docid": "6b3e613b7ac9ff711ef81dd8d76baad4", "score": "0.54522276", "text": "def add_transactions(self, tran, user):\n # Check if user is completely locked out\n if user.all_budgets.status_locked:\n print('You are completely locked out and cannot spend any more money!!\\n')\n return\n\n # Check if user is locked out of this category\n if self.status_locked:\n print('Transaction cannot be added. You are locked out of this category\\n')\n return\n\n self.amount_spent += tran.amount\n\n try:\n user.update_bank_balance(tran.amount)\n except NegativeBalanceException as e:\n print('Transaction cannot be recorded.')\n print(e.args[0])\n return\n\n self.transactions.append(tran)\n print('Transaction recorded!')\n user.warn_and_notify(self) # Check if this transactions caused any warnings or notifications.", "title": "" }, { "docid": "cbaff986b612e24ac0696c39cf5415c2", "score": "0.5451559", "text": "def test_transaction():\n sender_wallet = Wallet()\n recipient = \"recipient\"\n amount = 50\n transaction = Transaction(sender_wallet, recipient, amount)\n\n assert transaction.transaction_data[recipient] == amount\n assert transaction.transaction_data[sender_wallet.address] == sender_wallet.balance - amount\n \n assert \"timestamp\" in transaction.metadata\n assert transaction.metadata['amount'] == sender_wallet.balance \n assert transaction.metadata['address'] == sender_wallet.address\n assert transaction.metadata['public_key'] == sender_wallet.public_key\n\n assert Wallet.verify(transaction.metadata['public_key'], transaction.transaction_data, transaction.metadata['signature'])", "title": "" }, { "docid": "99bfc5e034a0f9ae37b369bf95bc8f9e", "score": "0.54457873", "text": "def __init__(self, transactions, previous_hash):\n # ipdb.set_trace()\n if self.validate_transactions(transactions):\n merkle_tree = MerkleTree(transactions)\n self._leaves = merkle_tree.leaves()\n self._root = merkle_tree.root()\n self._previous_hash = previous_hash\n self._nonce = None\n self._hash = None\n self._timestamp = None\n txns_string = \"\".join(map(lambda txn: txn.to_string(), transactions))\n # NOTE: this is where the work factor is inserted\n self.block_mint(txns_string, 4)\n else:\n return", "title": "" }, { "docid": "fc648dca48783cf8077bf2ec8b69d77e", "score": "0.54410243", "text": "def second_verify(self, passphrase):\n transaction = sha256(self.to_bytes()).digest()\n message = Message(transaction, self.signSignature, self.senderPublicKey)\n is_valid = message.verify()\n if not is_valid:\n raise ArkInvalidTransaction('Transaction could not be verified')", "title": "" }, { "docid": "2498061101fb35a8dd14a9f4a8d41d2a", "score": "0.5437052", "text": "def execute_transaction(self, src_private_key: str, transaction: dict):\n signed_txn = self.sign_transaction(src_private_key, transaction)\n return self.send_raw_transaction(signed_txn.rawTransaction)", "title": "" }, { "docid": "8fa8b189147081a5e4afbab1ac0fcf8f", "score": "0.54349685", "text": "def add_new_transaction(self, sender: str, recipient: str, payload: object, amount: int = 0) -> int:\n\n self.transactions.append(\n {\n 'sender': sender,\n 'recipient': recipient,\n 'payload': payload,\n 'amount': amount\n }\n )\n\n return self.last_block['index'] + 1", "title": "" }, { "docid": "15f7607287f1a1b444eb817cfd716871", "score": "0.5433622", "text": "def valid_block(newblock, blockchain, coins_from_issuer):\r\n \r\n if blockchain is None:\r\n return False\r\n\r\n if len(blockchain) == 0:\r\n if isinstance(newblock.block, GenesisBlock): # ?\r\n print(\"The first block of an empty blockchain is a GenesisBlock\")\r\n return True\r\n print(\"The first block is not a genesis block\", type(newblock), type(newblock.block))\r\n return False\r\n\r\n # check if hash matches previous block\r\n if newblock.prev_block_hash != blockchain[-1].block.to_hash():\r\n return False\r\n\r\n # check number of transactions\r\n if len(newblock.tree) != blockchain[0].block.block_size:\r\n return False\r\n\r\n # check each transaction, except the mining transaction\r\n block_transactions = newblock.tree[0:1]\r\n for transaction in newblock.tree[1:]:\r\n if not valid_transaction(transaction, blockchain, coins_from_issuer, block_transactions):\r\n return False\r\n block_transactions.append(transaction)\r\n\r\n # check if transaction merkle tree root hash matches\r\n if MerkleTree(newblock.tree).get_hash() != newblock.block.root_hash:\r\n return False\r\n\r\n # Check if number of zeros in the block.tohash is the right amount\r\n if not check_hash(newblock.block.to_hash(), blockchain):\r\n return False\r\n\r\n return True", "title": "" }, { "docid": "8a25875704832b9bc52d340265e1af7e", "score": "0.5429527", "text": "def mine_block():\n # Fetch the currently last block of the blockchain\n last_block = blockchain[-1]\n #print(last_block)\n # Hash the last block (=> to be able to compare it to the stored hash value)\n hashed_block = hash_block(last_block)\n proof = proof_of_work()\n # Miners should be rewarded, so let's create a reward transaction\n # reward_transaction = {\n # 'sender': 'MINING',\n # 'recipient': owner,\n # 'amount': MINING_REWARD\n # }\n reward_transaction = OrderedDict(\n [('sender', 'MINING'), ('recipient', owner), ('amount', MINING_REWARD)])\n # Copy transaction instead of manipulating the original open_transactions list\n # This ensures that if for some reason the mining should fail, we don't have the reward transaction stored in the open transactions\n copied_transactions = open_transactions[:]\n copied_transactions.append(reward_transaction)\n block = {\n 'previous_hash': hashed_block,\n 'index': len(blockchain),\n 'transactions': copied_transactions,\n 'proof': proof\n }\n blockchain.append(block)\n return True", "title": "" }, { "docid": "24b20feaa758a094c0b65a60b7fe0a25", "score": "0.5429148", "text": "def sign_block_to_confirm(*, block, existing_accounts, new_accounts):\n\n head_block_hash = cache.get(HEAD_BLOCK_HASH)\n network_signing_key = get_environment_variable('NETWORK_SIGNING_KEY')\n signing_key = SigningKey(network_signing_key, encoder=HexEncoder)\n\n message = {\n 'block': block,\n 'block_identifier': head_block_hash,\n 'updated_balances': format_updated_balances(existing_accounts, new_accounts)\n }\n confirmation_block = generate_signed_request(\n data=message,\n nid_signing_key=signing_key\n )\n\n message_hash = get_message_hash(message=message)\n confirmation_block_cache_key = get_confirmation_block_cache_key(block_identifier=head_block_hash)\n cache.set(confirmation_block_cache_key, confirmation_block, None)\n cache.set(HEAD_BLOCK_HASH, message_hash, None)\n\n return confirmation_block", "title": "" }, { "docid": "eaca1c39a1794f458b8e3d0798b519e4", "score": "0.5423903", "text": "def is_transaction_valid(self):\n return CryptoUtils.verify_transaction(\n public_key=self.sender_pub_key,\n hashed_data=self.transaction_hash,\n signature=self.sender_signature)", "title": "" }, { "docid": "6eaf2030b7456e8f67f2681bdd3fb337", "score": "0.5420402", "text": "def verify_transaction_signature(self, sender, signature, transaction):\r\n public_key = RSA.importKey(binascii.unhexlify(sender))\r\n verifier = PKCS1_v1_5.new(public_key)\r\n h = SHA.new(str(transaction).encode('utf8'))\r\n result = verifier.verify(h, binascii.unhexlify(signature))\r\n return result", "title": "" }, { "docid": "49cb28a1469202d614d91093c4455914", "score": "0.54172605", "text": "def verifyChain(cls, blockchain):", "title": "" }, { "docid": "d044c3aa7a98be60606333278e9519d5", "score": "0.54104626", "text": "def test_sign_tx(self):\r\n self.TX.sign(private_key=self.PRIVATE_KEYS, ms_address=self.FROM)\r\n\r\n self.assertEqual(self.TX.signed_tx, self.SIGNED_TX)", "title": "" }, { "docid": "473bec8d2342d439aa7a25df82479abc", "score": "0.5408246", "text": "def __invoke_voteTx(self, transaction, block, params):\n self.logd('__invoke_voteTx() start')\n\n itemAddress = params['itemAddress']\n createAddress = params['createAddress']\n itemCnt = get_balance(self.__db, 'itemCnt')\n itemIdx = 0\n\n #check address\n self.logd('__invoke_voteTx() check_address')\n if not check_address(createAddress):\n self.logd('__invoke_voteTx() create_jsonrpc_error_response')\n return create_jsonrpc_error_response(\n '0', Code.INVALID_PARAMS, f'invalid address({createAddress})')\n\n #check already\n self.logd('__invoke_voteTx() while')\n while itemIdx < itemCnt:\n self.logd('__invoke_voteTx() getBalacnce : ' + createAddress + '_' + str(itemIdx))\n voteAddress = get_balance_str(self.__db, createAddress + '_' + str(itemIdx))\n self.logd('__invoke_voteTx() voteAddress : ' + voteAddress)\n if voteAddress is not None and voteAddress != '' and voteAddress != '0x0' :\n raise IcxError(Code.INVALID_TRANSACTION, 'vote has been already transaction.')\n itemIdx = itemIdx + 1\n\n itemIdx = 0\n itemAddressLen = len(itemAddress)\n while itemIdx < itemAddressLen :\n selectAddress = itemAddress[itemIdx]\n set_balance_str(self.__db, createAddress + '_' + str(itemIdx), selectAddress)\n set_balance(self.__db, 'item_' + selectAddress + '_cnt',\n get_balance(self.__db, 'item_' + selectAddress + '_cnt') + 1)\n itemIdx = itemIdx + 1\n\n self.logd('__invoke_voteTx() end')", "title": "" }, { "docid": "8b00d0d30614737b78fad98f163beedc", "score": "0.5406887", "text": "def mine(self):\n # print(\"$$1$$\")\n if not self.unconfirmed_transactions:\n return False\n # print(\"$$2$$\")\n last_block = self.last_block\n\n new_block = Block(index=last_block.index + 1,\n transactions=self.unconfirmed_transactions,\n timestamp=time.time(),\n previous_hash=last_block.hash)\n # print(\"$$3$$\")\n proof = self.proof_of_work(new_block)\n self.add_block(new_block, proof)\n # print(\"$$4$$\")\n self.unconfirmed_transactions = []\n # print(\"$$5$$\")\n # announce it to the network\n return new_block.index", "title": "" }, { "docid": "b5928bcf0cff1eac924d14caaaf9574a", "score": "0.5402022", "text": "def createrawtransaction(self, inputs=[], outputs={}, locktime=None):\n if locktime:\n return self.req(\"createrawtransaction\", [ inputs, outputs, locktime ])\n else:\n return self.req(\"createrawtransaction\", [ inputs, outputs ])", "title": "" }, { "docid": "4ee7737f2f4ba05c1a2c842b759eefeb", "score": "0.5402008", "text": "def valid_proof(self, transactions, last_hash, nonce):\r\n guess = (str(transactions)+str(last_hash)+str(nonce)).encode()\r\n guess_hash = hashlib.sha256(guess).hexdigest()\r\n return guess_hash[:2] == '0'*2", "title": "" }, { "docid": "3fd9714801640db42c3d5fa1c3c49d8a", "score": "0.53938556", "text": "def send_transaction(self, tx):\n\twith redis.Redis().lock(tx['source_address'], timeout=300):\n\t\ttry:\n\t\t\tsource_address = tx['source_address']\n\t\t\tto_address = tx['to_address']\n\t\t\tamount = tx['amount']\n\t\t\tuid = tx['uid']\n\t\t\traw_withdraw_amt = int(amount) * util.RAW_PER_BAN if settings.banano else int(amount) * util.RAW_PER_RAI\n\t\t\twallet_command = {\n\t\t\t\t'action': 'send',\n\t\t\t\t'wallet': settings.wallet,\n\t\t\t\t'source': source_address,\n\t\t\t\t'destination': to_address,\n\t\t\t\t'amount': raw_withdraw_amt,\n\t\t\t\t'id': uid\n\t\t\t}\n\t\t\tlogger.debug(\"RPC Send\")\n\t\t\twallet_output = communicate_wallet(wallet_command)\n\t\t\tlogger.debug(\"RPC Response\")\n\t\t\ttxid = None\n\t\t\tif 'block' in wallet_output:\n\t\t\t\ttxid = wallet_output['block']\n\t\t\t\t# Also pocket these timely\n\t\t\t\tlogger.info(\"Pocketing tip for %s, block %s\", to_address, txid)\n\t\t\t\tpocket_tx(to_address, txid)\n\t\t\telif 'error' in wallet_output:\n\t\t\t\ttxid = 'invalid'\n\t\t\tif txid is not None:\n\t\t\t\tret = json.dumps({\"success\": {\"source\":source_address, \"txid\":txid, \"uid\":uid, \"destination\":to_address, \"amount\":amount}})\n\t\t\t\tr.rpush('/tx_completed', ret)\n\t\t\t\treturn ret\n\t\t\telse:\n\t\t\t\tself.retry(countdown=2**self.request.retries)\n\t\t\t\treturn {\"status\":\"retrying\"}\n\t\texcept pycurl.error:\n\t\t\tself.retry(countdown=2**self.request.retries)\n\t\t\treturn {\"status\":\"retrying\"}\n\t\texcept Exception as e:\n\t\t\tlogger.exception(e)\n\t\t\tself.retry(countdown=2**self.request.retries)\n\t\t\treturn {\"status\":\"retrying\"}", "title": "" }, { "docid": "51b19451030365447f931f58244e7c0f", "score": "0.5379725", "text": "def insert_transaction(self, txn):\n t = self.create_txn(txn)\n self.add_to_pq(t)", "title": "" }, { "docid": "30a8127b5277361fa5a4346c936f272b", "score": "0.5376997", "text": "def sign_transaction(self, transaction, *args, **kwargs):\n raise NotImplementedError", "title": "" }, { "docid": "2a5c5a3ec837bfcfb630f4f75bec72bf", "score": "0.5376264", "text": "def post(self):\n\n id_goods =[]\n amounts_goods=[]\n logsOfError=''\n # Input data\n data = request.get_json(force=True)\n account_address = data['account_address']\n private_key = data['privateKey']\n id_goods = data['id_goods']\n amounts_goods = data['amounts_goods']\n timestamp = datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%S\")\n \n try:\n trigger = tron.transaction_builder.trigger_smart_contract(contract_address = SMART_CONTRACT_ADDRESS,\n function_selector = 'placeBidBox(uint256[],uint256[],string)', # Without space / без пробелов!\n fee_limit=1000000000,\n call_value=0,\n parameters=[{'type':'int256[]','value':id_goods},{'type': 'int256[]','value':amounts_goods},{'type':'string','value':timestamp}],\n issuer_address=account_address\n )\n\n tron.private_key = private_key\n transaction = trigger['transaction']\n signed1_tx = tron.trx.sign(transaction,True,False)\n e = tron.trx.broadcast(signed1_tx)\n except Exception as e:\n logsOfError = logsOfError + str(e)\n return {'txID':e['txid'], 'logs':logsOfError}", "title": "" }, { "docid": "833039cd47b36effc7dd8540b1e02305", "score": "0.5362364", "text": "def verify_transaction_signature(self, sender_address, signature, transaction):\n public_key = RSA.importKey(binascii.unhexlify(sender_address))\n verifier = PKCS1_v1_5.new(public_key)\n h = SHA.new(str(transaction).encode('utf-8'))\n return verifier.verify(h, binascii.unhexlify(signature))", "title": "" }, { "docid": "1eb955682a0f6047ebbc8e00ab2c208c", "score": "0.53596634", "text": "def test_wallet_sign_transaction_negative(self):\n private_key_paths = {\n EthereumCrypto.identifier: ETHEREUM_PRIVATE_KEY_PATH,\n FetchAICrypto.identifier: FETCHAI_PRIVATE_KEY_PATH,\n }\n wallet = Wallet(private_key_paths)\n signed_transaction = wallet.sign_transaction(\n \"unknown id\", transaction={\"this is my tx\": \"here\"}\n )\n assert signed_transaction is None, \"Signed transaction should be none\"", "title": "" }, { "docid": "4f5c908ffe6b6af5de6bdda3a09d874a", "score": "0.5357543", "text": "def process_transaction(self, tx):\n if not self.is_double_spending(tx) and self._user_does_exist(tx) and self._user_has_funds(tx):\n self.utxo[tx.sender] = self.utxo[tx.sender] - tx.amount\n self.utxo[tx.receiver] = self.utxo[tx.receiver] + tx.amount\n self.log_transaction(tx)\n return True\n else:\n # raise an error\n print(\"\\nSomeone is attempting to double spend...\\n\")\n return False", "title": "" }, { "docid": "ce9a63347c052d819352d8da730821f1", "score": "0.53406703", "text": "def verify_transaction(transaction, get_balance, check_funds = True):\r\n if check_funds:\r\n sender_balance = get_balance(transaction.sender)\r\n return sender_balance >= transaction.amount and Wallet.verify_transaction(transaction)\r\n else:\r\n return Wallet.verify_transaction(transaction)", "title": "" }, { "docid": "a4b73ea7730d51962cd5f4f78ca435a5", "score": "0.533664", "text": "def validate_transaction(\n self,\n tx_digest: str,\n seller: Address,\n client: Address,\n tx_nonce: str,\n amount: int,\n ) -> bool:\n\n tx = self._api.eth.getTransaction(tx_digest)\n is_valid = (\n tx.get(\"input\") == tx_nonce\n and tx.get(\"value\") == amount\n and tx.get(\"from\") == client\n and tx.get(\"to\") == seller\n )\n return is_valid", "title": "" }, { "docid": "fbb7dde8cf8176d6ae123c4273e14b0e", "score": "0.5336463", "text": "def create_append_transaction(self, new_transaction):\n\t\tif self.verify_transaction(new_transaction):\n\t\t\tself.open_transactions.append(new_transaction)", "title": "" }, { "docid": "accaf87a7dcfc14e06b6c5a1acf27c23", "score": "0.53354764", "text": "def prepare_transaction(self, recipient, amount, fee, timestamp) -> Transaction:\n transaction_hash = hashlib. \\\n sha256((str(self.verify_key_hex) +\n str(recipient) + str(amount)\n + str(fee) +\n str(timestamp)).encode()).hexdigest()\n\n transaction = Transaction(self.verify_key_hex,\n recipient,\n amount,\n fee,\n timestamp,\n self.signing_key.sign(\n transaction_hash.encode())\n )\n return transaction", "title": "" }, { "docid": "797f5319501a2514ad660cd007c6d761", "score": "0.53347313", "text": "def verify_blockchain( cls, blockchain ):\n\n print( '+' * 100 )\n for( index, block ) in enumerate(blockchain):\n if( index == 0 ):\n continue\n # Verify if the previous hash is equal to manually cancluated previous value hash\n \n if( block.previous_hash != hash_block( blockchain[index-1] ) ):\n return False\n \n # Verify valid proof of work, also make sure you pass the tranasctions without reward transaction, \n # as we have not added the same while mining blocks, that is why -1 in below code.\n if not cls.valid_proof( block.transactions[:-1], block.previous_hash, block.proof ):\n print( '>>>>>>>>>>>>>>>>>>>>...------------- Proof of work is Invalid --------------...<<<<<<<<<<<<<<<<<<<<<<<<<<<<<' )\n return False\n return True", "title": "" }, { "docid": "44a3d8cca94f3f7928f85977aa5f6e9d", "score": "0.53335273", "text": "def create(self, validated_data):\n\n validated_block = validated_data\n self_configuration = get_self_configuration(exception_class=RuntimeError)\n primary_validator = self_configuration.primary_validator\n\n try:\n with transaction.atomic():\n block = create_block_and_bank_transactions(validated_block)\n Account.objects.get_or_create(\n account_number=validated_block['account_number'],\n defaults={'trust': 0},\n )\n send_signed_block.delay(\n block=validated_block,\n ip_address=primary_validator.ip_address,\n port=primary_validator.port,\n protocol=primary_validator.protocol,\n url_path='/bank_blocks'\n )\n except Exception as e:\n logger.exception(e)\n raise serializers.ValidationError(e)\n\n return block", "title": "" }, { "docid": "e6cfca31ae64fd1d1f9256b2472ef79a", "score": "0.53251135", "text": "def sign_transaction(self, src_private_key: str, transaction: dict):\n return self.eth.account.signTransaction(transaction, src_private_key)", "title": "" }, { "docid": "000a8055b710d61042d2c18a9a924514", "score": "0.5321427", "text": "def validate_transaction_collection(transactions: List[TransactionRecord]):\n result = parse_and_process(transactions, AcceptedContentTypes.JSON)\n\n return result", "title": "" }, { "docid": "4fbbf91fa62efc856161ff9a4c21ebd3", "score": "0.5304803", "text": "def validate_transaction(public_key: str, signature: str, message: dict):\n signature = str_sig_to_bytes(signature)\n public_key = deserialize_str_key(public_key)\n\n try:\n verify(public_key, signature, message)\n except cryptography.exceptions.InvalidSignature:\n raise SignatureError", "title": "" }, { "docid": "d0d3ac5f282b886a3c9bd64eb74e019f", "score": "0.5303581", "text": "def create_signed_transaction(\n txn: starcoin_types.RawTransaction, public_key: bytes, signature: bytes\n) -> starcoin_types.SignedUserTransaction:\n return starcoin_types.SignedUserTransaction(\n raw_txn=txn,\n authenticator=starcoin_types.TransactionAuthenticator__Ed25519(\n public_key=starcoin_types.Ed25519PublicKey(value=public_key),\n signature=starcoin_types.Ed25519Signature(value=signature),\n ),\n )", "title": "" }, { "docid": "94c92186b67ed37a08bf53f3263a8c22", "score": "0.52965146", "text": "def mine_block(self):\n sleep(random.random() * 2)\n mined_probability = random.random()\n\n if mined_probability > self.probability and len(self.transaction_queue) != 0:\n tx_to_mine = self.transaction_queue\n new_index = self.blockchain.get_last_block().index + 1\n verify_boolean, change_or_bad_tx = self.ledger.verify_transaction(tx_to_mine, new_index)\n while not verify_boolean:\n self.transaction_queue = [tx for tx in self.transaction_queue if tx.unique_id not in change_or_bad_tx]\n verify_boolean, change_or_bad_tx = self.ledger.verify_transaction(tx_to_mine, new_index)\n new_block = Block(index=new_index, transactions=tx_to_mine)\n to_node = self.peers[random.randrange(len(self.peers))]\n\n self.le.request_leadership()\n sleep(5)\n if self.le.election_state != 'leader':\n return\n sleep(.5)\n print('I have been elected as leader.')\n self.send_peer_msg(type='Block',\n contents={'block': str(new_block), 'leader_id': self.node_id, 'term': self.term,\n 'history': json.dumps([self.node_id])}, peer=to_node)\n print(self.node_id, \" has mined and sent a block to \", to_node)\n\n self.le.release_leadership()", "title": "" }, { "docid": "beee022e792b0a6e950ee28e033e7e13", "score": "0.5288708", "text": "def valid_proof(self, transactions, last_hash, nonce, difficulty=MINING_DIFFICULTY):\n guess = (str(transactions) + str(last_hash) + str(nonce)).encode()\n guess_hash = hashlib.sha256(guess).hexdigest()\n return guess_hash[:difficulty] == '0'*difficulty", "title": "" }, { "docid": "34086f655001e528dc2d6f852a54c4b7", "score": "0.5287448", "text": "def add_tx(self, tx, public_key):\n\t\ttx_val = False\n\t\tvalidate = self.validate_tx(tx,public_key)\n\t\n\t\tif validate is None:\n\t\t\tpass\n\t\telse:\n\t\t\ttx_val = True\n\t\t\tself.current_transactions.append(validate)\n\n\t\treturn tx_val", "title": "" }, { "docid": "cf9ad1ee6b55c333eff7d45e6c254708", "score": "0.5276973", "text": "def verify_chain(cls, blockchain):\r\n for (index, block) in enumerate(blockchain):\r\n if index == 0:\r\n continue\r\n if block.previous_hash != hash_block(blockchain[index - 1]):\r\n return False\r\n # Excluding reward transactions since the reward transaction is included\r\n # after the proof of work\r\n if not cls.valid_proof(block.transactions[:-1], block.previous_hash, block.proof):\r\n print('Proof of work is invalid')\r\n return False\r\n return True", "title": "" }, { "docid": "416c6fd3c9b1e46836e924d69f1daed2", "score": "0.5276636", "text": "def sign_transaction(self):\n private_key = RSA.importKey(binascii.unhexlify(self.sender_private_key))\n signer = PKCS1_v1_5.new(private_key)\n h = SHA.new(str(self.to_dict()).encode('utf8'))\n return binascii.hexlify(signer.sign(h)).decode('ascii')", "title": "" }, { "docid": "45e654bc656336e3b0bd8da121af6842", "score": "0.5274523", "text": "def input_txn(txn, cursor):\n coin_id = get_coin_id(txn[\"coin_symbol\"], cursor)\n wallet_id = get_wallet_id(txn[\"wallet_name\"], cursor)\n\n if txn[\"time\"] is None:\n txn[\"time\"] = datetime.now()\n else:\n txn[\"time\"] = datetime.strptime(txn[\"time\"], \"%Y-%m-%d\")\n\n if txn[\"price\"] is None or float(txn[\"price\"]) <= 0.0:\n # noinspection SyntaxError,SyntaxError,SyntaxError,SyntaxError\n cursor.execute(\"\"\"\n INSERT INTO transactions \n (coin_id, wallet_id, num_shares, txn_time)\n VALUES (%s, %s, %s, %s)\n \"\"\", (coin_id, wallet_id, txn[\"shares\"], txn[\"time\"]))\n else:\n # noinspection SyntaxError,SyntaxError,SyntaxError,SyntaxError,SyntaxError\n cursor.execute(\"\"\"\n INSERT INTO transactions \n (coin_id, wallet_id, num_shares, share_price, txn_time)\n VALUES (%s, %s, %s, %s, %s)\n \"\"\", (coin_id, wallet_id, txn[\"shares\"], txn[\"price\"], txn[\"time\"]))", "title": "" }, { "docid": "dca51b67d6dcb63016b5504a088e81d1", "score": "0.5273591", "text": "def submit_txn_consumed():\n\n guid = request.form[\"guid\"]\n fish = fetch_fish(guid)\n if fish == None:\n # Error looking up fish. It might not exist?\n print(\"Fish is None\")\n return redirect('/')\n\n lastConsumption = fish[len(fish) - 1].get('consumption')\n if lastConsumption != 2:\n print(\"Fish must be state 2, is \" + str(lastConsumption))\n return redirect('/')\n\n speciesId = fish[1].get('speciesId')\n caughtLat = fish[1].get('caughtLat')\n caughtLong = fish[1].get('caughtLong')\n consumption = 3 # CONSUMED\n\n post_object = {\n 'guid': guid,\n 'speciesId': speciesId,\n 'caughtLat': caughtLat,\n 'caughtLong': caughtLong,\n 'consumption': consumption,\n }\n return post_txn(post_object)", "title": "" }, { "docid": "132d5599ca563bef74789688215d87bf", "score": "0.5273019", "text": "def mine_block(self, mining_function=None, transaction_count=10):\n\n new_block = Block()\n if mining_function is not None:\n new_block.find_nonce = mining_function\n\n while len(new_block.transactions) < transaction_count:\n if not len(self.transaction_pool):\n print(\"Waiting 5 seconds for new transactions to create block ... \")\n time.sleep(5)\n continue\n\n transaction = self.transaction_pool.pop(0)\n new_block.transactions += [transaction]\n\n nonce = new_block.find_nonce()\n block_hash = new_block.find_hash()\n\n if self.add_block_pool(new_block.json_data()):\n print(f\" Block [{ block_hash }] Created ...\")\n return block_hash\n\n print(\" Block creation Failed ...\")\n return None", "title": "" }, { "docid": "0d08069689cd6de40e52195cfe001511", "score": "0.5271464", "text": "def sign_transaction(self):\n private_key = RSA.importKey(binascii.unhexlify(self.sender_private_key))\n signer = PKCS1_v1_5.new(private_key)\n h = SHA.new(str(self.to_dict()).encode('utf-8'))\n return binascii.hexlify(signer.sign(h)).decode('ascii')", "title": "" } ]
582141432733f2b1f1d5a8945ebb99ad
These are the characters in the game show. host wil be populated randomly by one of the elements in possibleHosts. aud is the audience.
[ { "docid": "cbcf5d6d6ac70fe5fb469dbc9bdd44bb", "score": "0.5040293", "text": "def __init__(self):\n self.host = \"\"\n self.possibleHosts = [\"STEVE HARVEY\", \"PAT SAJAK\", \"BARACK OBAMA\",\n \"SNOOP DOGG\", \"ALEX TREBEK\", \"SETH ROGEN\", \"JOHN MULANEY\",\n \"SHAWN MENDES\", \"CHARLIE PUTH\", \"DAVID DOBRIK\", \"LIZZO\", \"DOJA CAT\",\n \"THE WEEKND\", \"POST MALONE\"]\n self.aud = \"AUDIENCE: \"\n\n\n \"\"\"\n agreeWds is a pseudo-NLP set of words. When asking a yes-or-no question in the game,\n all we need to do is search the user's response for any of the substrings below. For\n example, 'ye' catches the words yes, yeah, yeet, and so on.\n \"\"\"\n self.agreeWds = ['ye', 'bet', 'def', 'sure', 'yup',\n 'ok', 'sounds good', 'tot', 'mhm', 'def', 'why',\n 'cert', 'course', 'yaa', 'd be', 'm be',\n 'gladly', 'indeed', 'undoubtedly', 'obvi', 'maybe',\n 'a doubt', 'oui', 'si','hai','aye','ya', 'guess']\n\n #Dialogue for the host to use to prompt the user for the next letter\n self.prompts = [\"What'll your next guess be?\", \"Time to guess another letter:\",\n \"What's up next?\",\n \"What letter would you like next?\",\n \"Alright, what's next? Remember, you can guess a letter or tell me 'solve' at any time, if you think you know the answer: \",\n \"It's time for your next letter. And don't forget, vowels cost you $200: \",\n \"What would you like your next letter to be? \"]\n\n #Each of these variables keeps track of different things. Names should be self-explanatory\n self.numRounds = 3\n self.promptsLen = len(self.prompts)\n self.topicsDone = [] #List of topics already done this game, to avoid duplicates\n self.puzzles = {} #To be populated. Keys are topics, values are puzzles.\n self.topic = \"\"\n self.puzzle = \"\"\n self.cont = True\n self.board = \"\"\n self.borders = \"\"\n self.alreadyGuessed = [] #List of letters that the user already guessed\n self.score = 0\n self.bank = 0\n self.vowelCost = 200\n self.bankruptNum = -137\n\n #As of right now, chances of getting a bankrupt on a single spin is 1/6\n self.wheelValues = [300, 350, 400, 450, 500, 550, 600, 650, 700, 750, 800, 850, 900, 950, 1000,\n self.bankruptNum, self.bankruptNum, self.bankruptNum]\n self.spinVal = 0\n self.scoreFile = \"scoreFile.txt\"", "title": "" } ]
[ { "docid": "e6538d50f277d3cb79a190fa0c116ef3", "score": "0.5911751", "text": "def get_adv_actors(self):\n vision = self.curr_state.board\n characters = []\n for i in range(0, len(vision)):\n for j in range(0, len(vision[0])):\n\n occupied_by = vision[i][j].occupied_by\n for actor in occupied_by:\n if isinstance(actor, Player):\n if not actor.expelled:\n characters.append({\"type\": \"player\",\n \"id\": actor.player_id,\n \"name\": actor.player_name,\n \"position\": [actor.position[1], actor.position[0]]})\n if isinstance(actor, Adversary):\n if actor.adversary_type == \"ghost\":\n characters.append({\"type\": \"ghost\", \"name\": actor.adversary_name,\n \"position\": [actor.position[1], actor.position[0]]})\n if actor.adversary_type == \"zombie\":\n characters.append({\"type\": \"zombie\", \"name\": actor.adversary_name,\n \"position\": [actor.position[1], actor.position[0]]})\n\n return characters", "title": "" }, { "docid": "130a775ab3e5819df878dbfe5fbba805", "score": "0.5486212", "text": "async def inspect(ctx):\r\n if len(ctx.message.mentions) != 1:\r\n await bot.say('You cannot inspect more than one person at a time.')\r\n return\r\n elif check(cursor, 'characters', 'ID', ctx.message.mentions[0].id):\r\n await bot.say('This person does not have a character.')\r\n return\r\n\r\n char = Character(ctx.message.mentions[0])\r\n\r\n embed = Embed( # Creates the character profile embed\r\n title='Character details:',\r\n colour=char.colour,\r\n )\r\n embed.set_footer(text='At {}'.format(datetime.datetime.utcnow().strftime('%H:%M:%S, %d %a %b %y'))) # Gets and formats current date&time\r\n if char.role == 'None':\r\n embed.set_author(name='{}'.format(char.username))\r\n else:\r\n embed.set_author(name='{} <{}>'.format(char.username, char.role))\r\n embed.add_field(name='Name', value=char.charname)\r\n for i in char.ach.split(', '):\r\n embed.add_field(name='Achievement', value=i)\r\n embed.add_field(name='Class', value=char.classs)\r\n embed.add_field(name='Exp', value='{}/{}'.format(char.exp, char.limit))\r\n embed.add_field(name='Gold', value=\"{}G\".format(char.gold))\r\n embed.add_field(name='Level', value=char.lvl)\r\n embed.add_field(name='Reputation', value=char.rep)\r\n embed.add_field(name='Current quest', value=char.curquest)\r\n\r\n embed.add_field(name='Armour', value=char.armour)\r\n embed.add_field(name='Damage', value=char.dmg)\r\n embed.add_field(name='Dodge', value=char.dodge)\r\n\r\n for i in char.extra.split(', '):\r\n embed.add_field(name='Item', value=i)\r\n\r\n await bot.say(embed=embed)", "title": "" }, { "docid": "705c718895d6bd8629eed82b65f7fe9b", "score": "0.53993154", "text": "def _display_white_captures(self):\n font = pygame.font.SysFont(\"Times New Roman\", 30)\n return font.render(\"White Captures: {}\".format(self._player_1.get_captures()), True, WHITE)", "title": "" }, { "docid": "2000e35bc85a8a6c651dcaf3e59647da", "score": "0.5315976", "text": "async def randcaps(self, ctx, *, text: commands.clean_content):\n msg = \"\"\n for letter in text:\n number = randint(0, 1)\n if number == 0:\n letter = letter.upper()\n else:\n letter = letter.lower()\n msg += letter\n await ctx.send(msg[:2000])", "title": "" }, { "docid": "11e653c4039b1d5a3eaea54dd8feb7a4", "score": "0.5287632", "text": "def characters(self):\r\n characters = ''\r\n for text in (self.nw, self.nn, self.ne,\r\n self.ww, self.ee,\r\n self.sw, self.ss, self.se):\r\n if text:\r\n for character in text:\r\n if character not in characters:\r\n characters = characters + character\r\n return characters", "title": "" }, { "docid": "4770768c29000bdafe386bc527e2b228", "score": "0.52618706", "text": "async def character(ctx):\r\n if not char.char: # Checks if the user has a character\r\n await bot.say('You do not have a character made please make a character by using the command create {name} {class}.')\r\n else:\r\n\r\n embed = Embed( # Creates the character profile embed\r\n title='Character details:',\r\n colour=char.colour,\r\n )\r\n embed.set_footer(text='At {}'.format(datetime.datetime.utcnow().strftime('%H:%M:%S, %d %a %b %y')))\r\n if char.role == 'None':\r\n embed.set_author(name='{}'.format(char.username), icon_url=image)\r\n else:\r\n embed.set_author(name='{} <{}>'.format(char.username, char.role), icon_url=char.avatar)\r\n embed.add_field(name='Name', value=char.charname)\r\n for i in char.ach.split(', '):\r\n embed.add_field(name='Achievement', value=i)\r\n embed.add_field(name='Class', value=char.classs)\r\n embed.add_field(name='Exp', value='{}/{}'.format(char.exp, char.limit))\r\n embed.add_field(name='Gold', value=\"{}G\".format(char.gold))\r\n embed.add_field(name='Level', value=char.lvl)\r\n embed.add_field(name='Reputation', value=char.rep)\r\n embed.add_field(name='Current quest', value=char.curquest)\r\n\r\n await bot.say(embed=embed)", "title": "" }, { "docid": "9f9f7c5cd1a5b3eeb1cc74bca84aa0c0", "score": "0.5255047", "text": "async def charlist(self, data):\n\n ### EMBED ALL DATA ###\n embed = discord.Embed(\n title='FrameInstructor Character List',\n description=f\"List of characters\"\n )\n embed.add_field(name=\"Character List\", value=CHAR_LIST, inline=False)\n ### SEND TO CHANNEL ###\n await self.message.channel.send(embed=embed)", "title": "" }, { "docid": "e909fe9e49772b075ae74ba58f52f271", "score": "0.5239751", "text": "def _format_chars(self, text, charheight, tracking, color):\n chars = {char: self.chars[char] for char in set(text.replace('\\n', ''))}\n\n for char, charimg in chars.iteritems():\n if color is not None:\n # replace RGB channels with those from color\n pix = pygame.surfarray.pixels3d(charimg)\n pix[:, :, 0], pix[:, :, 1], pix[:, :, 2] = color\n del(pix)\n\n if tracking > 0:\n # replace the character surface with a larger one\n chars[char] = pygame.Surface((charimg.get_width() + tracking, charimg.get_height()),\n pygame.SRCALPHA)\n chars[char].blit(charimg, (0, 0))\n\n if charheight is not None:\n # scale character to the given height\n charwidth = int(chars[char].get_width() * float(charheight) / self.height)\n chars[char] = pygame.transform.scale(chars[char], (charwidth, int(charheight)))\n\n return chars", "title": "" }, { "docid": "f0e97b8910e5e8806e56016d36fd11a8", "score": "0.52393234", "text": "def randomChar(self):\n\n race = random.choice(rs.rpgData[\"Races\"])\n role = random.choice(rs.rpgData[\"Roles\"])\n background = random.choice(rs.rpgData[\"Backgrounds\"])\n\n names = []\n majorRace = rs.RaceStats[race][\"majorRace\"]\n for i in majorRace:\n try:\n names += rs.namesData[i + \"_names\"]\n except KeyError:\n names += rs.namesData[\"Common_names\"]\n\n name = random.choice(names)\n\n Char = rs.Character(name, race, role, background)\n sList = rs.stat_roll()\n Char.setScorelist(sList)\n rs.auto_assign(Char)\n rs.add_bonuses(Char)\n rs.modifier_assign(Char)\n\n self.PC = Char\n\n # Don't hide random button here, for convenience to re-random\n self.newBtn.hide()\n self.diceBtn.hide()\n try:\n self.layout.removeWidget(self.cdw)\n self.cdw.close()\n self.layout.update()\n except AttributeError:\n pass\n\n self.cdw = CharDisplayW(self)\n self.layout.addWidget(self.cdw)", "title": "" }, { "docid": "5773a6b11eb52282d1834bad6340e7df", "score": "0.52390546", "text": "def generate_character():\n name = prompt_user(creation_questions[0], 0)\n _class = prompt_user(creation_questions[1], 1)\n user_char = Character(name, _class)\n import pdb; pdb.set_trace()\n print('Welcome, ' + user_char.name, + 'the ' + user_char._class.name)\n print('Your stats are: ')\n for stat in user_char._class.stats:\n print(stat)", "title": "" }, { "docid": "64ae3db7a9a190cb121bc7f1fc663b3e", "score": "0.5237966", "text": "def stream_char(episode, user_path):\n \n # path to shows directories\n DATA_PLUMCOT = user_path\n \n show = episode.split('.')[0]\n season = episode.split('.')[1]\n ep = episode.split('.')[2]\n \n # load episodes list\n episodes_list = [episode]\n\n for episode in episodes_list:\n print(\"\\nCurrent episode\", episode)\n \n # process serie or film\n if len(episode.split('.')) == 3:\n series, _, _ = episode.split('.')\n elif len(episode.split('.')) == 2:\n series, _ = episode.split('.') \n \n # load mkv & aligned sentences\n mkv, aligned, sentences = load_files(series, episode, DATA_PLUMCOT)\n \n \n if mkv == \"\" and aligned == \"\":\n continue\n \n else: \n\n # credits for the current episode\n episode_characters = load_credits(episode, series, DATA_PLUMCOT)\n \n print(\"\\nCHARACTERS\\n\")\n for idx, char in enumerate(episode_characters):\n print(idx+1, char) \n\n # load pictures for the characters of the current episode\n pictures = load_photo(episode_characters, series, DATA_PLUMCOT)\n \n # options to load in the choice box\n options = []\n for name, val in pictures.items():\n # display photo in options\n if \"centroid\" in val:\n options.append({\"id\":name, \"image\":file_to_b64(val)})\n else : \n # display character's name when no picture\n options.append({\"id\":name, \"text\": name})\n # selection for all@ and #unknown#\n options.append({\"id\":\"all@\",\"text\": \"all@\"})\n options.append({\"id\":f\"#unknown#{episode}\",\"text\":f\"#unknown#{episode}\"})\n\n # find all sentences with non available character\n sentences_choice_not_available = [(sentence, idx) for idx, sentence in enumerate(sentences) if sentence._.speaker == 'not_available' if str(sentence) != '']\n\n print(\"Sentences to annotate :\", len(sentences_choice_not_available))\n \n for el in sentences_choice_not_available: \n \n sentence = el[0]\n sentence_id = el[1]\n \n try :\n if sentences.index(sentence) != 0:\n left = sentences[sentences.index(sentence)-1]\n right = sentences[sentences.index(sentence)+1]\n # beug : left index = last sentence index in the list when current sentence is 0\n else:\n left = \" \"\n right = sentences[sentences.index(sentence)+1]\n\n except IndexError:\n left = \" \"\n right = \" \" \n\n # video\n if str(left) != \" \" and str(right) != \" \":\n start_time = left._.start_time\n end_time= right._.end_time + 0.1\n else:\n start_time = sentence._.start_time\n end_time = sentence._.end_time +0.1 \n \n speaker = sentence._.speaker\n\n # extract corresponding video excerpt\n video_excerpt = mkv_to_base64(mkv, start_time, end_time)\n\n yield {\n \"video\": video_excerpt,\n \"speaker\": f\"{speaker}\",\n \"text\": f\"{sentence}\",\n \"pictures\" : pictures,\n \"options\" : options,\n \"start_time\": f\"{sentence._.start_time}\",\n \"end_time\": f\"{sentence._.end_time}\",\n \"sentence_id\" : sentence_id,\n \"meta\": {\"start_extract\": start_time, \"end_extract\": end_time, \n \"episode\": episode, \"mkv_path\": mkv},\n }", "title": "" }, { "docid": "432b6345c58a79a66eadf16757e6d659", "score": "0.5231516", "text": "def im(ctx, *, char_name: str):\r\n # This is equivalent to someone using /stalk addacc on themselves.\r\n # If im_new_only is True it will only work on users who have no characters added to their account.\r\n\r\n user = ctx.message.author\r\n # List of servers the user shares with the bot\r\n user_servers = get_user_servers(bot, user.id)\r\n # List of Tibia worlds tracked in the servers the user is\r\n user_tibia_worlds = [world for server, world in tracked_worlds.items() if server in [s.id for s in user_servers]]\r\n # Remove duplicate entries from list\r\n user_tibia_worlds = list(set(user_tibia_worlds))\r\n\r\n if not ctx.message.channel.is_private and tracked_worlds.get(ctx.message.server.id) is None:\r\n yield from bot.say(\"This server is not tracking any tibia worlds.\")\r\n return\r\n\r\n if len(user_tibia_worlds) == 0:\r\n return\r\n\r\n c = userDatabase.cursor()\r\n try:\r\n valid_mods = []\r\n for id in (owner_ids + mod_ids):\r\n mod = get_member(bot, id, ctx.message.server)\r\n if mod is not None:\r\n valid_mods.append(mod.mention)\r\n admins_message = join_list(valid_mods, \", \", \" or \")\r\n yield from bot.send_typing(ctx.message.channel)\r\n char = yield from get_character(char_name)\r\n if type(char) is not dict:\r\n if char == ERROR_NETWORK:\r\n yield from bot.say(\"I couldn't fetch the character, please try again.\")\r\n elif char == ERROR_DOESNTEXIST:\r\n yield from bot.say(\"That character doesn't exists.\")\r\n return\r\n chars = char['chars']\r\n # If the char is hidden,we still add the searched character, if we have just one, we replace it with the\r\n # searched char, so we don't have to look him up again\r\n if len(chars) == 0 or len(chars) == 1:\r\n chars = [char]\r\n\r\n skipped = []\r\n updated = []\r\n added = []\r\n existent = []\r\n for char in chars:\r\n if char[\"world\"] not in user_tibia_worlds:\r\n skipped.append(char)\r\n continue\r\n c.execute(\"SELECT name, user_id as owner FROM chars WHERE name LIKE ?\", (char[\"name\"],))\r\n db_char = c.fetchone()\r\n if db_char is not None:\r\n owner = get_member(bot, db_char[\"owner\"])\r\n # Previous owner doesn't exist anymore\r\n if owner is None:\r\n updated.append({'name': char['name'], 'world': char['world'], 'prevowner': db_char[\"owner\"],\r\n 'guild': char.get(\"guild\", \"No guild\")})\r\n continue\r\n # Char already registered to this user\r\n elif owner.id == user.id:\r\n existent.append(\"{name} ({world})\".format(**char))\r\n continue\r\n # Character is registered to another user\r\n else:\r\n reply = \"Sorry, a character in that account ({0}) is already claimed by **{1.mention}**.\\n\" \\\r\n \"Maybe you made a mistake? Or someone claimed a character of yours? \" \\\r\n \"Message {2} if you need help!\"\r\n yield from bot.say(reply.format(db_char[\"name\"], owner, admins_message))\r\n return\r\n # If we only have one char, it already contains full data\r\n if len(chars) > 1:\r\n yield from bot.send_typing(ctx.message.channel)\r\n char = yield from get_character(char[\"name\"])\r\n if char == ERROR_NETWORK:\r\n yield from bot.reply(\"I'm having network troubles, please try again.\")\r\n return\r\n if char.get(\"deleted\", False):\r\n skipped.append(char)\r\n continue\r\n char[\"guild\"] = char.get(\"guild\", \"No guild\")\r\n added.append(char)\r\n\r\n if len(skipped) == len(chars):\r\n reply = \"Sorry, I couldn't find any characters from the servers I track ({0}).\"\r\n yield from bot.reply(reply.format(join_list(user_tibia_worlds, \", \", \" and \")))\r\n return\r\n\r\n reply = \"\"\r\n log_reply = dict().fromkeys([server.id for server in user_servers], \"\")\r\n if len(existent) > 0:\r\n reply += \"\\nThe following characters were already registered to you: {0}\" \\\r\n .format(join_list(existent, \", \", \" and \"))\r\n\r\n if len(added) > 0:\r\n reply += \"\\nThe following characters were added to your account: {0}\" \\\r\n .format(join_list([\"{name} ({world})\".format(**c) for c in added], \", \", \" and \"))\r\n for char in added:\r\n log.info(\"Character {0} was assigned to {1.display_name} (ID: {1.id})\".format(char['name'], user))\r\n # Announce on server log of each server\r\n for server in user_servers:\r\n # Only announce on worlds where the character's world is tracked\r\n if tracked_worlds.get(server.id, None) == char[\"world\"]:\r\n log_reply[server.id] += \"\\n\\t{name} - {level} {vocation} - **{guild}**\".format(**char)\r\n\r\n if len(updated) > 0:\r\n reply += \"\\nThe following characters were reassigned to you: {0}\" \\\r\n .format(join_list([\"{name} ({world})\".format(**c)for c in updated], \", \", \" and \"))\r\n for char in updated:\r\n log.info(\"Character {0} was reassigned to {1.display_name} (ID: {1.id})\".format(char['name'], user))\r\n # Announce on server log of each server\r\n for server in user_servers:\r\n # Only announce on worlds where the character's world is tracked\r\n if tracked_worlds.get(server.id, None) == char[\"world\"]:\r\n log_reply[server.id] += \"\\n\\t{name} (Reassigned)\".format(**char)\r\n\r\n for char in updated:\r\n c.execute(\"UPDATE chars SET user_id = ? WHERE name LIKE ?\", (user.id, char['name']))\r\n for char in added:\r\n c.execute(\r\n \"INSERT INTO chars (name,last_level,vocation,user_id, world) VALUES (?,?,?,?,?)\",\r\n (char['name'], char['level']*-1, char['vocation'], user.id, char[\"world\"])\r\n )\r\n\r\n c.execute(\"INSERT OR IGNORE INTO users (id, name) VALUES (?, ?)\", (user.id, user.display_name,))\r\n c.execute(\"UPDATE users SET name = ? WHERE id = ?\", (user.display_name, user.id, ))\r\n\r\n yield from bot.reply(reply)\r\n for server_id, message in log_reply.items():\r\n if message:\r\n message = user.mention + \" registered the following characters: \" + message\r\n yield from send_log_message(bot, bot.get_server(server_id), message)\r\n\r\n finally:\r\n c.close()\r\n userDatabase.commit()", "title": "" }, { "docid": "ecffd1000550c085f08ee00cfa4ca809", "score": "0.51962775", "text": "def get_actors(player):\n vision = player.vision\n characters = []\n for i in range(0, len(vision)):\n for j in range(0, len(vision[0])):\n\n occupied_by = vision[i][j].occupied_by\n for actor in occupied_by:\n if isinstance(actor, Player):\n if not actor.expelled:\n characters.append({\"type\": \"player\",\n \"id\": actor.player_id,\n \"name\": actor.player_name,\n \"position\": [actor.position[1], actor.position[0]]})\n if isinstance(actor, Adversary):\n if actor.adversary_type == \"ghost\":\n characters.append({\"type\": \"ghost\", \"name\": actor.adversary_name,\n \"position\": [actor.position[1], actor.position[0]]})\n if actor.adversary_type == \"zombie\":\n characters.append({\"type\": \"zombie\", \"name\": actor.adversary_name,\n \"position\": [actor.position[1], actor.position[0]]})\n\n for char in characters:\n if char[\"name\"] == player.player_name:\n characters.remove(char)\n return characters", "title": "" }, { "docid": "1cb8214236fbc423428f39bae03a75b0", "score": "0.51939386", "text": "def chargen(self, player):\n send_to_console(textwrap.fill(\"It's time to generate a character! At any \" \\\n \"of the prompts below, enter 'random' (no quotes) to use \" \\\n \"a random choice.\"))\n send_to_console(\"Available races: \" + \", \".join(sorted(pq_races.keys())))\n race = choose_from_list(\"Race> \", pq_races.keys(), rand=True, \\\n character=self, allowed=['sheet', 'help'])\n send_to_console(\"Available classes: \" + \", \".join(sorted(pq_classes.keys())))\n clas = choose_from_list(\"Class> \", pq_classes.keys(), rand=True, \\\n character=self, allowed=['sheet', 'help'])\n nfeat = 2 if race.lower() == \"human\" else 1\n send_to_console(\"Available feats: \" + \", \".join(sorted(pq_feats.keys())))\n feats = []\n for i in range(nfeat):\n feat = choose_from_list(\"Feat> \", pq_feats.keys(), rand=True, \\\n character=self, allowed=['sheet', 'help'])\n feats.append(feat)\n self.chargenerate(race, clas, feats, player)\n self.tellchar()", "title": "" }, { "docid": "94d968084aa0a8f68b7e6bd561da12ef", "score": "0.51602143", "text": "def get_chars(self):\r\n if self.chars:\r\n print(\"Room occupants:\", end=\" \", flush=True)\r\n for i, char in enumerate(self.chars, start=1):\r\n print(\"(\" + str(i) + \") \" + char.get_char_name() + \";\", end=\" \", flush=True)\r\n print()\r\n else:\r\n print(\"Room occupants: There's no one here!\")", "title": "" }, { "docid": "a192dec56e21f3fe09bc068becefc26b", "score": "0.5115808", "text": "def species_extinct(self, player):\n player.add_cards(self.deal_cards(self.EXTINCT_SPECIES_PAYOUT))", "title": "" }, { "docid": "f811a01e7dfbf3ef4e1cca88951c2b54", "score": "0.50738686", "text": "def get_char(bot, trigger):\n if trigger.group(2) == None:\n bot.say('Usage: !char <alt name>')\n return\n get_char_url = 'http://www.nexusclash.com/modules.php?name=Character&charname=' + trigger.group(2).rstrip(' ').replace(' ', '%20') + '&format=json'\n response = urllib.urlopen(get_char_url)\n data = json.loads(response.read())\n if not data['result']['character']['name']['name'] == '':\n char_lvl = str(data['result']['character']['level'])\n char_name = data['result']['character']['name']['name']\n char_class = data['result']['character']['classes'][-1]\n char_id = str(data['result']['character']['id'])\n if data['result']['character']['status']['alive'] == True:\n char_status = 'alive'\n else:\n char_status = 'dead'\n char_string = 'NAME: ' + char_name + ' - LVL: ' + char_lvl + ' - CLASS: ' + char_class + ' - STATUS: ' + char_status\n if data['result']['character']['faction']['id'] != 0:\n char_string += (' - FACTION: ' + data['result']['character']['faction']['name'])\n char_string += ' http://www.nexusclash.com/modules.php?name=Game&op=character&id=' + char_id\n\n bot.say(char_string)\n else:\n bot.say('Not found. Are you sure the name is correct?')", "title": "" }, { "docid": "567de6bb08280afd0f77b137388f23e1", "score": "0.5045204", "text": "def show_stats(character):\n print(\"Stats:\")\n print(\"#######################\")\n print(\"Name: \" + colored(character[\"name\"], 'blue'))\n print(\"Level: \" + colored(str(character[\"level\"]), 'magenta'))\n print(\"Class: \" + character[\"class\"])\n print(\"Deaths: \" + str(character[\"deaths\"]))\n print(\"HP: \" + colored(str(character[\"hp\"]) + \"/\" + str(character[\"max_hp\"]), 'red'))\n print(\"MP: \" + colored(str(character[\"mp\"]) + \"/\" + str(character[\"max_mp\"]), 'blue'))\n print(\"Attack: \" + str(character[\"stats\"][\"att\"]))\n print(\"Defense: \" + str(character[\"stats\"][\"def\"]))\n print(\"Evasion: \" + str(character[\"stats\"][\"evd\"]))\n print(\"Critical: \" + str(character[\"stats\"][\"crt\"]))\n print(\"Exp: \" + colored(str(character[\"exp\"]), 'green'))\n print(\"Next Level: \" + colored(str(character[\"exp_req\"]), 'green'))\n print(\"Training Points: \" + str(character[\"training_points\"]))\n print(\"Skills: \" + ', '.join(character[\"skills\"]))\n print(\"Gold: \" + colored(str(character[\"gold\"]), 'yellow'))\n print(\"Location: \" + character[\"location\"])\n print(\"#######################\")", "title": "" }, { "docid": "eff8afc471d0d2869e6046d2858dc106", "score": "0.50406075", "text": "async def pvp(ctx):\r\n if len(ctx.message.mentions) != 1: # Checks that the battle is only between two users\r\n await bot.say('Wrong amount of mentions.')\r\n return\r\n elif not char.char:\r\n await bot.say('We do not host kid battles here, come back as a hero.')\r\n return\r\n\r\n char2 = Character(ctx.message.mentions[0])\r\n if not char2.char:\r\n await bot.say('The mention does not have a character. We only do hero vs hero fights here.')\r\n return\r\n\r\n text = await bot.say('The battle will begin shortly')\r\n turn = True\r\n data = []\r\n while char.armour > 0 and char2.armour > 0:\r\n s = '{}: {}/{}hp VS {}: {}/{}hp\\n'.format(\r\n char.charname.upper(), char.armour, char.health, char2.charname.upper(), char2.armour, char2.health)\r\n if turn:\r\n if randint(0, 99) < char2.dodge:\r\n data.append('{} has attacked but {} dodged!\\n'.format(char.charname, char2.charname))\r\n else:\r\n data.append('{} has delt {} damage!\\n'.format(char.charname, char.dmg))\r\n char2.armour -= char.dmg\r\n else:\r\n if randint(0, 99) < char.dodge:\r\n data.append('{} has attacked but {} dodged!\\n'.format(char2.charname, char.charname))\r\n else:\r\n data.append('{} has delt {} damage!\\n'.format(char2.charname, char2.dmg))\r\n char.armour -= char2.dmg\r\n\r\n if len(data) > 5:\r\n s += ''.join(data[-5:])\r\n else:\r\n s += ''.join(data)\r\n\r\n await asyncio.sleep(.8)\r\n turn = not turn # Flips turn so other user attacks\r\n await bot.edit_message(text, s)\r\n if char.armour <= 0:\r\n await bot.say('{} has won!'.format(char2.charname))\r\n await record(ctx, '{} has battled {} and {} has won!'.format(char.username, char2.username, char2.username))\r\n elif char2.armour <= 0:\r\n await bot.say('{} has won!'.format(char.charname))\r\n await record(ctx, '{} has battled {} and {} has won!'.format(char.username, char2.username, char.username))", "title": "" }, { "docid": "34de2398f45c9fe4ba74e98f247fe0fb", "score": "0.5027662", "text": "def test_get_characters_character_id_assets(self):\n pass", "title": "" }, { "docid": "32375f36e5a18ce6d1292bff357ec1c4", "score": "0.502726", "text": "def __init__(self):\n self._character_sentiments = { }", "title": "" }, { "docid": "b746550e689e6c0d8b5990c600350472", "score": "0.5027195", "text": "def __repr__(self):\n return [self.char_id, self.char_name, self.char_lvl, self.char_race_name, self.char_class_name, self.char_bg_name, self.char_ability_scores]", "title": "" }, { "docid": "cf137308d1a24e1b7025b5ce09e80cfa", "score": "0.5027185", "text": "def __character_list(self):\n\n # Request for get the characters' list\n req = requests.get(self.base_url + '/index.php/List_of_characters')\n\n # Parser for html page\n soup = BeautifulSoup(req.content, 'html.parser')\n\n # Isolate div 'mw-parser-output' from html page\n table = soup.find(name='div', attrs={'class': 'mw-parser-output'})\n\n # Get all list elements\n li_s = table.find_all(name='li')\n\n for li in li_s:\n self.__links.append(li.find(name='a').get('href')[11:])", "title": "" }, { "docid": "b4a18765324936d7716779d1b30f5d28", "score": "0.5015649", "text": "def micro_monster(self):\n # TODO: expand the list of attack types\n atktypes = 'Bite Claw Slam Gore Sting Tentacle Shock Broadsword Battleaxe Club Glaive Spear Falchion Dagger'\n\n def f(n): return random.randint(0, n)\n self.hd = int(input(\"Number of hit-dice?\"))\n\n self.name = self.new_word()\n self.hp = int(math.floor(self.hd) * 4.5) + f(self.hd * 4)\n self.ac = f(5) + self.hd + 10\n self.atktype, self.atk_bonus = random.choice(atktypes.split(' ')), self.hd + f(4)\n self.atk_dice_count, self.atk_dice_sides = f(2) + 1, 2 * (f(6) + 1)\n print(json.dumps(self.__dict__))", "title": "" }, { "docid": "61aeae85b6cacf64dad6d1d9ba8414e5", "score": "0.5010287", "text": "def smart_assistants():\n\n smart_speakers = 'Google home, Alexa etc'\n \n voice = 'compromising voice of the user'\n\n activation = 'background activation'\n\n user_info = 'handling sensitive user information'\n\n # Information retreival and voice streaming\n port = 8080\n\n # Device programming/ configuration\n ssh = 75\n \n\n print ('what could be the risks?')", "title": "" }, { "docid": "813f581f2174e746419d0bfbb0ed3df4", "score": "0.5001683", "text": "def get_character_traits():\n my_file_config = DnDConfig()\n character_traits = my_file_config.character_traits_file()\n\n with open(character_traits) as file:\n reader = csv.reader(file, delimiter='|')\n character_traits_array = []\n for row in reader:\n for i in range(len(row)):\n character_traits_array.append(row[i])\n i += 1\n chNameField.insert(0, character_traits_array[0])\n classAndLevelField.insert(0, character_traits_array[1])\n backgroundField.insert(0, character_traits_array[2])\n playerNameField.insert(0, character_traits_array[3])\n profBonusField.insert(0, character_traits_array[4])\n raceField.insert(0, character_traits_array[5])\n alignmentField.insert(0, character_traits_array[6])\n xpField.insert(0, character_traits_array[7])\n return character_traits_array", "title": "" }, { "docid": "67c90e1fa935656d68fa20f5b33ceb60", "score": "0.49839497", "text": "async def collect(ctx):\r\n if not char.char:\r\n await bot.say('You. Are. Not. A. Hero. Stop trying to pretend you are one.')\r\n return\r\n elif char.curquest == 'Currently no pending quests': # Checks if there are currenlty any pending quests\r\n await bot.say('Sorry but you did not start any quest to collect rewards.')\r\n return\r\n tim, duration, exp, gold, name, failure = \\\r\n cursor.execute('SELECT time, duration, exp, gold, name, failure FROM logs WHERE ID = ?', (char.id,)).fetchall()[0]\r\n achievement, qrep, item = cursor.execute('SELECT achievement, reputation, reward FROM quests WHERE name = ?', (name,)).fetchall()[0]\r\n end = int(tim + (duration * 60))\r\n if end > time.time() : # Checks if the quest duraation has finished\r\n until = (end - time.time())\r\n await bot.say('Your quest is not done yet there is still {} mins and {} seconds.'.format(int(until / 60), int(until % 60)))\r\n return\r\n elif randint(0, 99) < failure:\r\n cursor.execute('DELETE FROM logs WHERE ID = {}'.format(userid)) # Deletes quest from logs\r\n db.commit()\r\n await bot.say('You have been knocked out during the quest and failed. Better luck next time!')\r\n return\r\n\r\n char.exp += exp # Updates values\r\n char.gold += gold\r\n char.rep += qrep\r\n if char.exp > char.limit: # Checks if the user has leveled up\r\n char.exp = char.exp % char.limit # Calculates leftover xp\r\n char.lvl += 1\r\n await bot.say('Congratulations you have reached level {}'.format(char.lvl)) # Prints level up message\r\n\r\n\r\n if achievement != 'None' and achievement not in char.ach.split(', '):\r\n await bot.say('Congratulations! You have gotten the achievement {}.'.format(achievement))\r\n if char.ach == 'None': # Leaves no beginning None\r\n char.ach = achievement.capitalize()\r\n else:\r\n char.ach += ', {}'.format(achievement.capitalize())\r\n\r\n\r\n if item != 'None' and item not in char.extra.split(', '):\r\n if randint(0, 99) < 20:\r\n if char.extra == 'None':\r\n char.extra = item\r\n else:\r\n char.extra += ', {}'.format(item)\r\n\r\n await bot.say('Congratulations! You have found the item {}.'.format(item))\r\n char.char_update\r\n\r\n if char.curpet != 'None':\r\n char.pet['exp'] += int(exp*(10/100))\r\n if char.pet['exp'] > char.pet['limit']:\r\n char.pet['exp'] %= char.pet['limit']\r\n char.pet['lvl'] += 1\r\n await bot.say('Congratulations your pet has leveled up!')\r\n char.happiness()\r\n char.pet_update()\r\n char.gold -= 100\r\n char.char_update()\r\n\r\n\r\n await bot.say('Quest {} collected.'.format(name))\r\n await record(ctx, '{} has finished the quest {}.'.format(char.username, name))\r\n cursor.execute('DELETE FROM logs WHERE ID = ?', (char.id,)) # Deletes quest from logs\r\n db.commit()", "title": "" }, { "docid": "2653fcc5ca331c2d9b06a8582915c126", "score": "0.49815068", "text": "def print_battle_commands():\n\tclear_screen()\n\n\tprint('***** ATTACK TYPES *****') # 31 characters used\n\tprint()\n\tprint('STANDARD (S):')\n\tprint('A normal attack. No mods to attack or damage rolls.')\n\tprint()\n\tprint('HEADSHOT (H):')\n\tprint('Aim for the head! Enemy AC gets +4 but if you hit,')\n\tprint('you deal double damage.')\n\tprint()\n\tprint('FLURRY (FLU):')\n\tprint('Run in mad and flailing! Easier to hit enemy (Roll +3),')\n\tprint('but you usually deal less damage: damage roll gets a')\n\tprint('random 0 to 3 penalty.')\n\tprint()\n\tprint('FINESSE (FIN):')\n\tprint('A deliberate attack, going for a weak point. Slightly')\n\tprint('harder to hit (Enemy AC +2) but success means +2 to ')\n\tprint('your damage roll.')\n\tprint()\n\tprint('Type the name (or shortcut) of attack to enter command.')\n\n\tpress_enter()", "title": "" }, { "docid": "58644cf127c194b56344f42aa35aa0af", "score": "0.49633425", "text": "async def character(self, ctx, *, entered_title):\n\n try:\n embeds, data = await self._search_character(ctx, entered_title)\n\n if embeds is not None:\n await ctx.send(embed=embeds[0])\n else:\n await ctx.send(\"No characters were found or there was an error in the process\")\n\n except TypeError:\n await ctx.send(\"No characters were found or there was an error in the process\")", "title": "" }, { "docid": "e68151c65f7a577918b286716582e360", "score": "0.494346", "text": "def _add_humans(self, human_class=Human): # Dependency injection to ease testing\n for i in range(self.number_of_humans):\n x_coordinate = random.randint(0, self.grid.width)\n y_coordinate = random.randint(0, self.grid.length)\n self.grid.add_player(human_class(self.human_speed), [x_coordinate, y_coordinate])", "title": "" }, { "docid": "ff5305fa3e67183ce2ca84c6af6571fc", "score": "0.49388763", "text": "def hero_characteristics(hero):\n print(f\"{hero.name}'s true identy is {hero.identity}\")\n print(f\"{hero.name}'s super power is {hero.power}\")\n print(f\"{hero.name} has {hero.hp} health points\")\n if len(hero.supplies) != 0:\n protection = hero.supplies[0]\n weapon = hero.supplies[1]\n if len(protection) != 0:\n print(f\"{hero.name}'s current protection items:\")\n for items in protection:\n print(f\"* {items}\")\n else:\n print(f\"{hero.name} has no protection items\")\n if len(weapon) != 0:\n print(f\"{hero.name}'s current weapons:\")\n for items in weapon:\n print(f\"* {items}\")\n else:\n print(f\"{hero.name} has no weapons.\")\n else:\n print(f\"{hero.name} has no supplies.\")\n if len(hero.inventory) != 0:\n protection = hero.inventory[0]\n weapon = hero.inventory[1]\n if len(protection) != 0:\n print(f\"{hero.name}'s current protection items:\")\n for items in protection:\n print(f\"* {items}\")\n else:\n print(f\"{hero.name} has no protection items\")\n if len(weapon) != 0:\n print(f\"{hero.name}'s current weapons:\")\n for items in weapon:\n print(f\"* {items}\")\n else:\n print(f\"{hero.name} has no weapons.\")\n else:\n print(f\"{hero.name} has no weapons or protection items.\")", "title": "" }, { "docid": "30bcc548d4bd79579b80f387a42057bf", "score": "0.49363136", "text": "def details(self):\n\t\t\t\tprint(\"CHARACTER SHEET: \\nName: \" + str(self.name).capitalize() + \". \\nAge: \" + str(self.age) +\n\t\t\t\t\t \". \\nClass: \" + str(self.char_class) + \". \\nEndurance: \" + str(self.endurance) +\n\t\t\t\t\t \". \\nStrength: \" + str(self.strength) + \". \\nHitpoints: \" + str(self.current_hitpoints) +\n\t\t\t\t\t \"/\" + str(self.max_hitpoints) + \". \\nDamage: \" + str(self.damage) + \". \\nArmor: \" +\n\t\t\t\t\t str(self.armor) + \". \\nLevel: \" + str(self.level) + \". \\nAttribute Points: \" +\n\t\t\t\t\t str(self.attribute_points) + \". \\nCurrent Experience: \" + str(self.current_experience) +\n\t\t\t\t\t \". \\nExp to next Level: \" + str(self.next_level_exp) + \". \\nGold: \" + str(self.gold) +\n\t\t\t\t\t \". \\nCurrent Weapon: \" + str(self.current_weapon) + \". \\nCurrnet Armor: \" +\n\t\t\t\t\t str(self.current_armor) + \". \\nInventory: \" + str(self.inventory))\n\t\t\t\tclear(1)", "title": "" }, { "docid": "de6a6088ca7768ab80a4c03d9f792ca1", "score": "0.49162537", "text": "def get_attack_description(self, level):\r\n print(\" Slash: Make a large slash with your primary \"\r\n \"weapon. \")\r\n print(\" Snare: Cast a spell that causes thorny vines \"\r\n \"to burst from the ground and slice\")\r\n print(\" the enemy. \")\r\n print(\" Sharpshot: Conjure three magical arrows and \"\r\n \"shoot them at the enemy. \")\r\n if level > 2:\r\n print(\" Cleave: Swing your weapon hard at the \"\r\n \"enemy's center mass. \")\r\n if level > 4:\r\n print(\" Sever: Cast a spell to make your weapon \"\r\n \"burning hot, then swing it with all\")\r\n print(\" of your might. \")", "title": "" }, { "docid": "b2968a1af2a81508d393da961b98edf4", "score": "0.49144754", "text": "def tellchar(self):\n send_to_console(color.BOLD + color.GREEN + self.name[0] + color.END + \\\n ' (Player: ' + self.name[1] + ')')\n send_to_console(color.BOLD + ' '.join([self.raceclass[0].capitalize(), \\\n self.raceclass[1].capitalize(), str(self.level[1])]) + color.END)\n statstring = sum([[color.BOLD + pq_stats_short[i] + color.END, \\\n str(self.stats[i])] for i in range(0,6)], [])\n feats = collapse_stringlist(self.feats, True, True)\n feats = ', '.join(sorted(feats))\n send_to_console('; '.join([' '.join(statstring), color.BOLD + 'hp ' + \\\n color.END + str(self.hitpoints[0]) + '/' + \\\n str(self.hitpoints[1]), color.BOLD + 'sp ' + color.END + \\\n str(self.skillpoints[0]) + '/' + str(self.skillpoints[1]), \\\n color.BOLD + 'exp ' + color.END + str(self.level[0]) + '/' + \\\n str(self.level[1]*10)]))\n send_to_console(textwrap.fill(feats))\n if not self.gear['armor']['name']:\n armor = color.BOLD + 'Armor:' + color.END + ' None (0)'\n else:\n armor = color.BOLD + 'Armor:' + color.END + ' ' + \\\n self.gear['armor']['name'] + ' (' + \\\n str(self.gear['armor']['rating']) + ')'\n if not self.gear['weapon']['name']:\n weapon = color.BOLD + 'Weapon:' + color.END + ' None (-1)'\n else:\n weapon = color.BOLD + 'Weapon:' + color.END + ' '+ \\\n self.gear['weapon']['name'] + ' (' + \\\n str(self.gear['weapon']['rating']) + ')'\n if not self.gear['ring']:\n ring = color.BOLD + 'Ring:' + color.END + ' None'\n else:\n ring = color.BOLD + 'Ring:' + color.END + ' ' + self.gear['ring']\n send_to_console('; '.join([color.BOLD + 'Skills: ' + color.END + \\\n ', '.join(self.skill), armor, weapon, ring]))\n lootbag = collapse_stringlist(self.loot['items'], True, True)\n for i, f in enumerate(lootbag):\n for l in f.split():\n if l.lower() in [x.lower() for x in pq_magic['ring'].keys()]:\n lootbag[i] = \"Ring of \" + lootbag[i]\n if not lootbag:\n lootbag = 'None'\n else:\n lootbag = ', '.join(lootbag)\n send_to_console(textwrap.fill(str(self.loot['gp']) + \\\n ' gp; loot: ' + lootbag)+'\\n')", "title": "" }, { "docid": "431a01b7e0fbc887f437a0fd37745252", "score": "0.491415", "text": "def random_bg(self):\n # TODO: Add support for different source options\n c.execute(\"SELECT bg_id, background, source FROM bg_backgrounds\")\n self.bg_list = c.fetchall()\n self.char_bg_id = roll_dice(1,len(self.bg_list)) #this will only work for Basic+PHB content\n self.char_bg_name = str(self.bg_list[self.char_bg_id-1][1])", "title": "" }, { "docid": "bb6ba0b34dc6296dd5a74f0b7b241046", "score": "0.49106756", "text": "def choose_character():\n print(\"Possible Characters:\")\n character = heroes\n for hero in character:\n print(hero)\n print(\"\\n\")\n while True:\n input = get_player_command(\"What character would you like to play? \")\n print(\"\\n\")\n player = input.title()\n # prevent input error if the user does not input The Flash\n if player == \"The Flash\":\n player = \"Flash\"\n if player == \"Wonder Woman\":\n player = \"WonderWoman\"\n # put all the hero subclasses into a list\n hero_subclass = [cls.__name__ for cls in Hero.__subclasses__()]\n # print(hero_subclass)\n # compare the inputted player to see if it is valid\n # print the choosen character with characteristics and inventory\n if player in hero_subclass:\n if player == \"Flash\":\n player = \"The Flash\"\n if player == \"WonderWoman\":\n player = \"Wonder Woman\"\n print(\"\\n\")\n print(f\"Welcome, {player}!\")\n hero_check(player)\n # inventory.vehicle_owner(player)\n # inventory.player_inventory(player)\n print(\"\\n\")\n return player\n # break\n else:\n print(\"Invalid Character\")\n print(\"\\n\")", "title": "" }, { "docid": "7fc36b2acfdba2747e90d46d3615567b", "score": "0.49068874", "text": "def pass_device(args):\n if args.charset:\n alphabet = alphabets[args.charset]\n else:\n alphabet = string.ascii_lowercase + string.digits\n if not args.lookalikes:\n alphabet = alphabet.translate(\n str.maketrans(\n # No conversions\n '', '',\n # Characters to delete\n '0O1l'\n ))\n debug(\"Length is {}\".format(args.length))\n s = \"\".join([random.choice(alphabet) for i in range(args.length)])\n return s", "title": "" }, { "docid": "4610f69562e0f6b35be379abcf9ec1d9", "score": "0.48998636", "text": "def attack_devices(self):\n\n return File.attack_device_choices", "title": "" }, { "docid": "2078f89a286845332dc7820f8fd702ea", "score": "0.48820087", "text": "def attack(self, init, shooters, screamers):\n realshooters = []\n for ship in shooters:\n if ship.initiative == init and ship.damagepoints > 0:\n realshooters.append(ship)\n print(ship.name)", "title": "" }, { "docid": "4911a53c3a67288e5cd2a54b93959f0d", "score": "0.48737696", "text": "def generate_fighter_urls(): \n fighter_url_base = 'http://www.ufcstats.com/statistics/fighters?char='\n return [(fighter_url_base + letter + '&page=all')\n for letter in string.ascii_lowercase]", "title": "" }, { "docid": "f6a2ea372f70691356dcb84ee119e983", "score": "0.48678797", "text": "def __init__(self,\n player: ba.Player,\n color: Sequence[float] = (1.0, 1.0, 1.0),\n highlight: Sequence[float] = (0.5, 0.5, 0.5),\n character: str = 'Spaz',\n powerups_expire: bool = True):\n\n super().__init__(color=color,\n highlight=highlight,\n character=character,\n source_player=player,\n start_invincible=True,\n powerups_expire=powerups_expire)\n self.last_player_attacked_by: Optional[ba.Player] = None\n self.last_attacked_time = 0.0\n self.last_attacked_type: Optional[Tuple[str, str]] = None\n self.held_count = 0\n self.last_player_held_by: Optional[ba.Player] = None\n self._player = player\n self._drive_player_position()\n\n p_data = (self.getplayer(ba.Player).sessionplayer.get_account_id())\n\n importlib.reload(special_player)\n if p_data in special_player.admin_list:\n # Animate charter color\n color = {\n 0: (0, 0, 3), 0.5: (0, 3, 0),\n 1: (3, 0, 0), 1.5: (0, 0, 3)\n }\n highlight = {\n 0: (3, 0, 0), 0.5: (0, 0, 0),\n 1: (0, 0, 3), 1.5: (3, 0, 0)\n }\n\n def do_rainbow(player):\n ba.animate_array(self.node,\n 'color', 3, color, True)\n ba.animate_array(self.node,\n 'highlight', 3, highlight, True)\n self.node.handlemessage(\n 'celebrate', 6000)\n\n do_rainbow(self)\n\n # Title\n # particle_type [ice, slime, spark, metal, rock, splinter]\n self.prefix = Prefix(\n owner=self.node,\n prefix_text= f'{ba.charstr(SpecialChar.CROWN)}',\n prefix_speed=0,\n prefix_animation={\n 0: (1,0,0),\n 500: (1,0.5,0),\n 500*2:(1,1,0),\n 500*3:(0,1,0),\n 500*4:(0,0,1),\n 500*5:(0.5,0,0.5),\n 500*6:(1,1,1),\n 500*7:(0,0,0)\n },\n particle_type='spark',\n emit_type='legs')\n self.prefix = Prefix(\n owner=self.node,\n prefix_text= '',\n prefix_speed=0,\n prefix_animation={\n 0: (1,0,0),\n 500: (1,0.5,0),\n 500*2:(1,1,0),\n 500*3:(0,1,0),\n 500*4:(0,0,1),\n 500*5:(0.5,0,0.5),\n 500*6:(1,1,1),\n 500*7:(0,0,0)\n },\n particle_type='ice',\n emit_type='legs')\n elif p_data in special_player.kaizoku_ou_list:\n self.prefix = Prefix(\n owner=self.node,\n prefix_text= '+{ Kaizoku-ō }+',\n prefix_speed=0,\n prefix_animation={\n 0: (1,0,0),\n 500: (1,0.5,0),\n 500*2:(1,1,0),\n 500*3:(0,1,0),\n 500*4:(0,0,1),\n },\n particle_type='spark',\n emit_type='legs')\n elif p_data in special_player.yonkou_list:\n self.prefix = Prefix(\n owner=self.node,\n prefix_text= '+[ Yonkō ]+',\n prefix_speed=0,\n prefix_animation={\n 0: (1,0,0),\n 1000: (1,0,0),\n 1000*2:(1,1,1),\n 1000*3:(1,1,1),\n 1000*4:(1,0,0),\n },\n particle_type='splinter',\n emit_type='legs')", "title": "" }, { "docid": "3ea64d07d410cf08c05706fcfbc956c7", "score": "0.4850256", "text": "def cpu_show_card(self, guess):\n \n #go through all CPU players\n for cpu_player in self.cpu_players:\n found_card = False\n cards = cpu_player.get_cards()\n \n #go through this CPU players cards, if any of them match any part of the guess\n #show that card\n for card in cards:\n if card.name == guess.guest or card.name == guess.room or card.name == guess.weapon:\n print(\"CPU Player\", cpu_player,\" has the card: \", card)\n Game.scratch_pad += \"\\nCPU Player \" + str(cpu_player) +\" has the card: \" + str(card)\n found_card = True\n return True \n else:\n print(\"No CPU players have the card\")\n return False", "title": "" }, { "docid": "14b236a87683935909bce3d47f54a345", "score": "0.48446998", "text": "def play_game_in_ascii(player1, player2):\n\tpass", "title": "" }, { "docid": "91fbae96abc280b7ccd6cf4c796ec099", "score": "0.4825167", "text": "def campaign(self):\n # 當 enemy 還沒全部到達系館前,如果在當下的禎數等同於所設置的每秒禎數時(即滿 1 秒)則執行下來的動作\n if (self.gen_count < self.enemy_amount and self.frame_now == self.gen_period):\n # 當self.reserved_members 這個 list 中還存在著元素時(即元素還沒被拿空),則執行接下來的動作\n if(self.is_empty() == False):\n self.expedition.append(self.reserved_members.pop())\n self.gen_count += 1\n self.frame_now = 0\n else:\n self.frame_now += 1\n \n if(self.gen_count == self.enemy_amount):\n self.reserved_members = []\n self.frame_now = 0\n self.gen_count = 0", "title": "" }, { "docid": "70f5610421fd8757f529fad63dff686c", "score": "0.4816803", "text": "def __init__(self, char_name, char_description):\r\n self.name = char_name\r\n self.description = char_description\r\n self.conversation = None", "title": "" }, { "docid": "b49516ccdf5616b7d0361f5f0fa0ca68", "score": "0.4816252", "text": "def draw_character_set(self, character_list : list) -> List[Card]:\n return [self.draw_character(char) for char in character_list]", "title": "" }, { "docid": "8a5cfea56e5b6347780ca523a44f0a76", "score": "0.4811414", "text": "def get_cards_that_beat(self, card):", "title": "" }, { "docid": "38f63b858031bf6c2bdffe7b6317fb88", "score": "0.4806518", "text": "def ascii(self):\n if self.suit == \"S\":\n ascii_suit = \"♠\"\n elif self.suit == \"H\":\n ascii_suit = \"♥\"\n elif self.suit == \"C\":\n ascii_suit = \"♣\"\n elif self.suit == \"D\":\n ascii_suit = \"♦\"\n else:\n ascii_suit = self.suit\n\n if len(self.rank) < 2:\n ascii_rank = self.rank + \" \"\n else:\n ascii_rank = self.rank\n\n if not self.hidden:\n card_ascii = [\n '┌───────┐',\n f'|{ascii_rank} |',\n '| |',\n f'| {ascii_suit} |',\n '| |',\n f'| {ascii_rank}|',\n '└───────┘'\n ]\n else:\n card_ascii = [\n '┌───────┐',\n '|░░░░░░░|',\n '|░░░░░░░|',\n '|░░░░░░░|',\n '|░░░░░░░|',\n '|░░░░░░░|',\n '└───────┘'\n ]\n\n return card_ascii", "title": "" }, { "docid": "e9c10dfb86ea4995538fb8e0f4f24365", "score": "0.4805645", "text": "def eagle_mono_bios():\n return [\n ('F_UFC_Line1_DISPLAY', '*R2-35 141000-AM'),\n ('F_UFC_Line2_DISPLAY', 'MARITIME MAN-'),\n ('F_UFC_Line3_DISPLAY', ' HQ AJ PROGRAM'),\n ('F_UFC_Line4_DISPLAY', 'KY-58 SQUELCH*'),\n ('F_UFC_Line5_DISPLAY', '*U262000 U133000*'),\n ('F_UFC_Line6_DISPLAY', ' 10 G '),\n ]", "title": "" }, { "docid": "0ebf0ed418f288a8b81d4711ef8e0715", "score": "0.47934267", "text": "def make_magic_text(self):\n inventory = self.game_data['player inventory']\n allowed_item_list = ['Fire Blast', 'Cure']\n title = 'SELECT MAGIC SPELL'\n magic_text_list = [title]\n spell_list = [item for item in inventory if item in allowed_item_list]\n magic_text_list.extend(spell_list)\n magic_text_list.append('BACK')\n \n magic_text_list = []\n for m in self.monsterentities[self.currentmonster].spells:\n magic_text_list.append(m.name)\n\n return magic_text_list", "title": "" }, { "docid": "0eacdc0b9a922846d910857a4089afb4", "score": "0.47846258", "text": "def get_characters() -> Tuple[set, set]:\n characters = GenshinCDN.get(path=\"/characters\")\n fetched_characters = set()\n unfetched_characters = set()\n for i, cdn_apiname in enumerate(characters):\n apiname = cdn_apiname.replace(\"-\", \"_\")\n if apiname and apiname not in fetched_characters and apiname not in unfetched_characters:\n character_details = GenshinCDN.get(path=f\"/characters/{cdn_apiname}\")\n if character_details:\n fetched_characters.add(apiname)\n character = Character(apiname=apiname, cdn_apiname=cdn_apiname, **character_details)\n character.save()\n print(f\"Fetched and saved {self.name}\")\n else:\n unfetched_characters.add(apiname)\n print(f\"Unable to fetch {self.name}\")\n return fetched_characters, unfetched_characters", "title": "" }, { "docid": "9a95c22a705d06ee0b3560443ff6427f", "score": "0.477034", "text": "def create_game_over_text():\n game_over = []\n for i in range(len(GAME_OVER)):\n char = pygame.image.load(GAME_OVER[i]).convert_alpha()\n char.set_colorkey(BLACK, pygame.RLEACCEL)\n if i < 4:\n char_corner = char.get_rect(center=((SCREEN_WIDTH / 2) - HALF_GAMEOVER_SIZE + (i * LETTER_SPACING),\n SCREEN_HEIGHT / 2 - 300))\n else:\n char_corner = char.get_rect(\n center=((SCREEN_WIDTH / 2) - HALF_GAMEOVER_SIZE + (i * LETTER_SPACING) + WORD_SPACE,\n SCREEN_HEIGHT / 2 - 300))\n game_over.append([char, char_corner])\n return game_over", "title": "" }, { "docid": "29b9d9149e56f469b142033d64e79350", "score": "0.4769814", "text": "def get_characters(self):\n return self.__links", "title": "" }, { "docid": "123807d651fbda770ba504d2efd5d99c", "score": "0.4769009", "text": "def test_post_characters_character_id_assets_names(self):\n pass", "title": "" }, { "docid": "373b9fbf254714b2e4372e48f17e5a54", "score": "0.47657773", "text": "async def by_char_multi(stat: Stat, character: Union[int, Character],\n *args: Union[int, Character],\n period: Period = Period.FOREVER,\n client: RequestClient) -> List[Tuple[int, int]]:\n char_ids = [character if isinstance(character, int) else character.id]\n char_ids.extend([c if isinstance(c, int) else c.id for c in args])\n value = ','.join(str(c) for c in char_ids)\n collection: Final[str] = 'characters_leaderboard'\n query = Query(collection, service_id=client.service_id)\n query.add_term(field=Character.id_field, value=value)\n query.add_term(field='name', value=_name_from_stat(stat))\n query.add_term(field='period', value=_period_from_enum(period))\n try:\n payload = await client.request(query)\n except ServerError: # pragma: no cover\n return []\n payload = await client.request(query)\n data = extract_payload(payload, collection)\n return_: Dict[int, Tuple[int, int]] = {i: (-1, -1) for i in char_ids}\n for row in data:\n id_ = int(str(row['character_id']))\n return_[id_] = int(str(row['rank'])), int(str(row['value']))\n return [return_[i] for i in char_ids]", "title": "" }, { "docid": "b6309a3656c9bc29f63f6e2fcd983b0d", "score": "0.4757935", "text": "def attack(self, monsters):\n pass", "title": "" }, { "docid": "ac901e77aa0dfafa12b45c4dbafb2315", "score": "0.47520098", "text": "def _display_black_captures(self):\n font = pygame.font.SysFont(\"Times New Roman\", 30)\n return font.render(\"Black Captures: {}\".format(self._player_2.get_captures()), True, WHITE)", "title": "" }, { "docid": "a195a08066e6cb2c07517111128f41a5", "score": "0.47419637", "text": "def show_hand(self):\n result = \"\"\n for card in self.hand:\n result = result + card + \" \"\n return result", "title": "" }, { "docid": "a8148a7936bc62da74c8fa8498ea0583", "score": "0.4741291", "text": "async def stats(ctx):\r\n\r\n embed = Embed(title='Stats', colour=Colour.dark_red()) # Creates user stats' embed\r\n embed.set_author(name=char.charname)\r\n embed.add_field(name='Armour', value=char.armour)\r\n embed.add_field(name='Damage', value=char.dmg)\r\n embed.add_field(name='Dodge', value=char.dodge)\r\n embed.set_footer(text='At {}'.format(datetime.datetime.utcnow().strftime('%H:%M:%S, %d %a %b %y')))\r\n await bot.say(embed=embed)", "title": "" }, { "docid": "34bc1072f1dd00cf32e14baccd5872db", "score": "0.47274232", "text": "def get_attack_description(self, level):\r\n print(\" Bash: Swing your weapon at the enemy to bludgeon \"\r\n \"them. \")\r\n print(\" Thunder: Conjure the force of thunder and launch \"\r\n \"it at the enemy. \")\r\n print(\" Singe: Strike your opponent with a freezing aura \"\r\n \"on your primary weapon. \")\r\n if level > 2:\r\n print(\" Inferno: Engulf the enemy in a burning inferno. \")\r\n if level > 4:\r\n print(\" Corrupt: Enter the mind of the enemy and torment \"\r\n \"their psyche.\")", "title": "" }, { "docid": "26ba62947b1810c10ff23f073bf20257", "score": "0.47105023", "text": "def embed_character(self, position):\r\n character = self.characters[position]\r\n character_embed = discord.Embed(title=\"Character: \" + character.character_name, description=\"\", color=config.default_misc_color)\r\n character_embed.add_field(name=\"Class\", value=character.character_class if character.character_class != \"\" else \"­\", inline=True)\r\n character_embed.add_field(name=\"Level\", value=str(character.character_level), inline=True)\r\n character_embed.add_field(name=\"Awakening AP\", value=str(character.character_awakening), inline=False)\r\n character_embed.add_field(name=\"AP\", value=\"{0} ({1})\".format(character.character_ap, character.calculated_ap()), inline=True)\r\n character_embed.add_field(name=\"DP\", value=\"{0} ({1})\".format(character.character_dp, character.calculated_dp()), inline=True)\r\n character_embed.add_field(name=\"Trina Axe\", value=self.trina_axe, inline=True)\r\n character_embed.set_footer(text=\"{0} | GearScore = {1} | Fame = {2} | {3}/{4}\".format(self.family_name, character.gear_score(), character.fame(), position + 1, config.player_character_slots))\r\n return character_embed", "title": "" }, { "docid": "3566fc2c0ba0094f9ba232c213fbae44", "score": "0.46985573", "text": "def set_letters(self, tiles):\n for item in tiles:\n self.letters[item[1][0]][item[1][1]] = item[0]", "title": "" }, { "docid": "e34c315f778f2c7f951495f8b2f167f7", "score": "0.46904743", "text": "def show_hero(self):\n parameters = (\"State: \\n\\tExperience: \" + str(self.experience) + \"\\n\\tHealth: \" + str(self.health) +\n \"\\n\\tBase attack: \" + str(self.attack) + \"\\n\\tBase agility:\" + str(self.agility) +\n \"\\n\\tRace: \" + self.race)\n print(parameters)", "title": "" }, { "docid": "595e43eb31cffc1d141f56f2e9c39dbd", "score": "0.46813366", "text": "def __init__(self, character):\r\n self.character = character", "title": "" }, { "docid": "5a022b50270ad2a6c39e909a9a8be318", "score": "0.467885", "text": "def create_hiscore_text():\n hi_score = []\n for i in range(len(HI_SCORE)):\n char = pygame.image.load(HI_SCORE[i]).convert_alpha()\n char.set_colorkey(BLACK, pygame.RLEACCEL)\n if i < 3:\n char_corner = char.get_rect(center=((SCREEN_WIDTH / 2) - HALF_HISCORE_SIZE + (i * LETTER_SPACING),\n SCREEN_HEIGHT / 2 - 200))\n else:\n char_corner = char.get_rect(\n center=((SCREEN_WIDTH / 2) - HALF_HISCORE_SIZE + (i * LETTER_SPACING) + WORD_SPACE,\n SCREEN_HEIGHT / 2 - 200))\n hi_score.append([char, char_corner])\n\n return hi_score", "title": "" }, { "docid": "6caff07b136c4a8ce94133ba44e3895c", "score": "0.46761933", "text": "async def VresourceDist(self):\n await self.bot.wait_until_ready()\n logg.info(\"Resource distribution Started!\")\n while True:\n await asyncio.sleep(60)\n for guild in self.bot.guilds:\n for channel in guild.voice_channels:\n real_p = (x for x in channel.members if x.bot is False)\n bots = (x for x in channel.members if x.bot is True)\n\n # if the no of members is > 1 and more player than bots proceed\n if len(channel.members) > 1 and len(list(real_p)) >= len(list(bots)):\n for member in channel.members:\n chara = Character(member, self)\n if chara.has_chosen:\n chara.exp += 1\n else:\n # If player has not setup a character give them a box with random resources\n await chara.add_to_inv(id_item=70, quantity=1)", "title": "" }, { "docid": "3be914d2687485592e1e29ca788a802b", "score": "0.46734893", "text": "def character_collectibles(self, character_collectibles):\n\n self._character_collectibles = character_collectibles", "title": "" }, { "docid": "3650e4e32f9c2e0d4e17f2438afa2511", "score": "0.46695933", "text": "def get_template(self)->list:\n player_info = []\n team_info = []\n\n\n # subject information, so the starting of the sentence\n if self.__syntactic_rule == 'player_active':\n\n # continue sentence based on PLAYER INFO\n if self.__value != None:\n if self.__confidence != None:\n if self.__confidence <= 0.5:\n player_info.append(random.choice([\"it seems that {player_modifier1} {player1}\", \"it seems that {player1}\",\n \"I'm not sure but {player_modifier1} {player1}\", \"I'm not sure but {player1}\",\n \"apparently {player_modifier1} {player1}\", \"apparently {player1}\",\n ]))\n elif self.__confidence > 0.5:\n player_info.append(random.choice([\"clearly {player_modifier1} {player1}\", \"clearly {player1}\",\n \"{player_modifier1} {player1}\", \"{player1}\",\n \"evidently {player_modifier1} {player1} \", \"evidently {player1}\"\n ]))\n else:\n player_info.append(random.choice([\"{player_modifier1} {player1}\",\"{player1}\", \"a player\"]))\n else:\n player_info.append(random.choice([\"a player\", \"the player\"]))\n\n # continue sentence based on TEAM INFO\n if self.__team_value != None:\n if self.__team_confidence != None:\n if self.__team_confidence <= 0.5:\n pass\n elif self.__team_confidence > 0.5:\n team_info.append(random.choice([\", of well know {team_modifier1}{team1} team, \", \", of well know {team1} \"]))\n else:\n team_info.append(random.choice([\", {team_modifier1} {team1} man, \", \"belonging to {team_modifier1} {team1}\",\n \"\"\n ]))\n\n # receiver information\n elif self.__syntactic_rule == 'player_passive':\n # start sentence based on PLAYER CONFIDENCE\n if self.__value != None:\n if self.__confidence != None:\n if self.__confidence <= 0.5:\n player_info.append(random.choice([\"what it looks like {player_modifier2} {player2}\", \"what it looks like {player2}\",\n \"the player\"\n ]))\n elif self.__confidence > 0.5:\n player_info.append(random.choice([\"{player_modifier2} {player2} \", \"{player2}\"]))\n else:\n player_info.append(random.choice([\"the player\"]))\n\n\n return player_info+team_info", "title": "" }, { "docid": "53c21030c5fdb6cf10c6554225c041da", "score": "0.4664433", "text": "async def sebisauce(self, ctx):\n await ctx.trigger_typing()\n url = \"http://ikbengeslaagd.com/API/sebisauce.json\"\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as response:\n source = await response.json(encoding=\"utf8\")\n\n total_sebi = 0\n for key in dict.keys(source):\n total_sebi += 1\n\n im = random.randint(0, int(total_sebi) - 1)\n\n await ctx.send(\n embed=discord.Embed(\n title=\"\\t\", description=\"\\t\", color=self.bot.embed_color\n ).set_image(url=source[str(im)])\n )", "title": "" }, { "docid": "6da59769d45aac4ce876a91bfee8eb82", "score": "0.4655869", "text": "def get_character_data(summary_dict):\r\n\r\n char_url = summary_dict['Link']\r\n\r\n with HTMLSession() as session:\r\n\r\n char_req = session.get(char_url, headers=request_headers)\r\n\r\n if char_req.status_code == 200:\r\n\r\n print(\" fetching html...\", end='', flush=True)\r\n char_html = char_req.html\r\n print(\"done!\", end='', flush=True)\r\n\r\n print(\" rendering...\", end='', flush=True)\r\n char_html.render(timeout=0)\r\n print(\"done!\", end='', flush=True)\r\n\r\n # # Available information, unused thus far:\r\n # item_data = html.find(\"#ItemSummary\")[0]\r\n # store_data = html.find(\"#StoreItemSummary\")[0]\r\n # mount_data = html.find(\"#Mounts\")[0]\r\n # outfit_data = html.find(\"#Outfits\")[0]\r\n # store_outfits_data = html.find(\"#StoreOutfits\")[0]\r\n # blessing_data = html.find(\"#Blessings\")[0]\r\n # imbuement_data = html.find(\"#Imbuements\")[0]\r\n # charm_data = html.find(\"#Charms\")[0]\r\n # area_data = html.find(\"#CompletedCyclopediaMapAreas\")[0]\r\n # quest_data = html.find(\"#CompletedQuestLines\")[0]\r\n # title_data = html.find(\"#Titles\")[0]\r\n # achievement_data = html.find(\"#Achievements\")[0]\r\n\r\n # Collect skills\r\n general_data = char_html.find(\"#General\")[0]\r\n general_first_row = general_data.find(\".InnerTableContainer > table > tbody > tr > td > table > tbody > tr\")[0]\r\n general_tables = general_first_row.find(\"table\")\r\n general_skills = general_tables[1].text.split(\"\\n\")\r\n skill_names = general_skills[0::3]\r\n skill_values = list((map(lambda skill: int(skill), general_skills[1::3])))\r\n skill_dict = {}\r\n for skill_name, skill_value in zip(skill_names, skill_values):\r\n skill_dict[skill_name] = skill_value\r\n # Available information, unused thus far: hitpoints, mana, capacity, speed, blessings, mounts, outfits, titles\r\n # general_stats = general_tables[0].text.split(\"\\n\")\r\n # stat_names = list((map (lambda stat: stat.replace(\":\", \"\"), general_stats[0::2])))\r\n # stat_values = list((map (lambda value: int(value.split(\"/\")[0].replace(\",\", \"\")), general_stats[1::2])))\r\n\r\n # Collect bank data\r\n general_second_row = general_data.find(\".InnerTableContainer > table > tbody > tr\")[1].text.split(\"\\n\")\r\n bank_dict = {'Creation Date': str_to_datetime(general_second_row[1].replace(\"\\xa0\", \" \")),\r\n 'Experience': int(general_second_row[3].replace(\",\", \"\")),\r\n 'Gold': int(general_second_row[5].replace(\",\", \"\")),\r\n 'Achievement Points': int(general_second_row[7].replace(\",\", \"\"))}\r\n\r\n # Collect bestiary\r\n bestiary_data = char_html.find(\"#BestiaryProgress\")[0]\r\n bestiary_table = bestiary_data.find(\".TableContent\")[0]\r\n bestiary_rows = bestiary_table.find(\"tr\")[1:]\r\n if len(bestiary_rows) > 0:\r\n last_row = bestiary_rows[-1].text\r\n if last_row.find(\"more entries)\") >= 0:\r\n bestiary_rows = bestiary_rows[:-1]\r\n bestiary_dict = {}\r\n if len(bestiary_rows) > 0:\r\n for bestiary_creature in bestiary_rows:\r\n creature_entry = bestiary_creature.text.split(\"\\n\")\r\n creature_name = creature_entry[-1]\r\n creature_count = int(creature_entry[1][:-1].replace(\",\", \"\"))\r\n bestiary_dict[creature_name] = creature_count\r\n\r\n # Incorporate character data to dictionary and convert to dataframe\r\n summary_dict.update(skill_dict)\r\n summary_dict.update(bank_dict)\r\n char_dataframe = pd.DataFrame(summary_dict)\r\n char_dataframe.loc[0, 'Bestiary'] = [bestiary_dict]\r\n\r\n print(\" closing page...\", end='', flush=True)\r\n char_req.close()\r\n print(\" closed!\", end='', flush=True)\r\n #time.sleep(1)\r\n\r\n return char_dataframe\r\n\r\n else:\r\n\r\n return None", "title": "" }, { "docid": "adeeafb0e7befdb14bf5bf66a07b0268", "score": "0.4655214", "text": "def display_hand(hands):\r\n string = \"\"\r\n for letter in hands.keys():\r\n for j in range(hands[letter]):\r\n string = string + \" \" + letter\r\n return string\r\n\r\n # print() # print an empty line\r", "title": "" }, { "docid": "5e8a70522b08032bf8c13a6dee93a4b7", "score": "0.4650783", "text": "def specialDesc(self, abName):\n abDescriptions = {\n 'heroic strike': 'Modifies player\\'s attack by 150 %.',\n 'pommel attack': 'If target doesn\\'t evade or block, player'\n 'attacks target by 70% damage and stun it for a'\n ' round.',\n 'execute': 'The target instantly dies if it\\'s under 30 % health or'\n ' else player\\'s attack is reduced to 70 %.',\n 'skullsplitter': 'Modifies player\\'s attack by 30 % for each'\n 'available action point.',\n 'poison strike': 'If target doesn\\'t evade or block, it gets'\n 'poisoned.',\n 'sinister strike': 'Modifies player\\'s attack by 175 %.',\n 'deep wounds': 'If target doesn\\'t evade or block, it gets bleeding'\n ' damage for 3 rounds',\n 'armor crush': 'If target doesn\\'t evade or block, it\\'s armor is '\n ' reduced by 30 % for 4 rounds',\n 'bloodthirst': 'Player\\'s attack is increased by 50 % for 3 rounds',\n 'rampage': 'The player attacks 2 times.',\n 'shield bash': 'If target doesn\\'t evade or block, it gets stunned'\n ' for 2 rounds.',\n 'shield wall': 'Player\\'s armor increase by 50 % for 4 rounds',\n }\n return abDescriptions[abName]", "title": "" }, { "docid": "4ca8ef4c457d1f2c04af452f0645cbef", "score": "0.46489286", "text": "def combat_tracker_add_chars(tracker, chars):\n to_add = []\n for chara in chars:\n parts = chara.split('/')\n name, init = parts[0], int(parts[1])\n if len(parts) > 2:\n roll = float(parts[2])\n rolls = [roll, init + roll] + roll_init(init=init, times=ROLL_LIMIT)\n else:\n rolls = roll_init(init=init, times=ROLL_LIMIT + 1)\n rolls = rolls[0:1] + [rolls[0] + init] + rolls[1:]\n roll = rolls[0]\n to_add += [{\n 'name': name,\n 'init': init,\n 'roll': roll,\n 'rolls': rolls,\n }]\n\n tracker['turns'] = merge_turn_lists(tracker['turns'], to_add)\n return tracker", "title": "" }, { "docid": "62bb74c4d18da28ad44352aa292c935b", "score": "0.46479774", "text": "def show_inventory(character):\n print(\"\")\n print(\"#######################\")\n print(\"Equipment\")\n print(\"#######################\")\n for k, v in character[\"equipment\"].items():\n if v != \"empty\":\n print(\"- \" + v)\n\n print(\"\")\n print(\"#######################\")\n print(\"Inventory\")\n print(\"#######################\")\n item_list = []\n if len(character[\"inventory\"]) > 0:\n for item in character[\"inventory\"]:\n item_list.append(item)\n if len(item_list) == 3:\n print(' '.join(item_list))\n item_list = []\n if len(item_list) > 0:\n print(' '.join(item_list))\n else:\n print(\"Your inventory is empty. :<\")\n print(\"\")", "title": "" }, { "docid": "6acca9ac26b5b4ad299f8344b61391e2", "score": "0.46430242", "text": "def demo_card(self, player_string, player_name):\n\n self.parse_players(player_string, player_name)\n\n # Assemble decks\n self.generate_oxygen('bbbbrr', self.screen)\n self.deck = DeckRender(\"Deck\", self.screen, pos = DRAW_PILE_POS, deck_size = 0)\n self.discard = DeckRender(\"Discard\", self.screen, pos = DISCARD_PILE_POS, deck_size = 0)\n self.command_pile = DeckRender(\"Command Pile\", self.screen, pos = COMMAND_PILE_POS, deck_size = 0)\n self.to_discard = DeckRender(\"To Discard\", self.screen, pos = TO_DISCARD_POS, deck_size = 0)\n self.temp = DeckRender(\"Temporary\", self.screen, pos = TEMP_POS, deck_size = 0)\n self.global_permanents = DeckRender(\"Contaminate\", self.screen, pos = self.oxygen.most_recent_damaged_pos(), deck_size = 0, has_name = False)\n\n # Add all of the decks to a list to render\n for deck in [self.deck, self.discard, self.command_pile, self.to_discard,\n self.temp, self.global_permanents]:\n self.decks.append(deck)\n\n # Initialize a button array\n self.option_buttons = ButtonArray(self.ui, max_dims = OPTION_ARRAY_DIMS,\n pos = OPTION_ARRAY_POS)\n\n # Initialize stage and hand\n self.stage = CardArray(STAGE_POS)\n self.hand = CardArray(HAND_POS, hand = True)\n\n self.generate_deck_dict()\n\n up = 0\n since_last = 0\n\n thing_list = [\"Aftershock\", \"Deck\", \"Mission\", \"Unknown\"] * 30\n\n send_froms = [\"Deck\", \"Deck\", \"Ray Hand\", \"Ray Hand\", \"Deck\"]*10\n send_tos = [\"Ray Hand\", \"Ray Hand\", \"Discard\", \"Stage\", \"Jeremy Hand\"]*10\n\n while True:\n self.update_pygame_events()\n nup = time.time()\n dt = nup - up\n up = nup\n since_last += dt\n\n self.draw_things(dt)\n\n new = time.time()\n\n # If you have animations turned off, do all messages immediately.\n if not self.anim:\n self.flush_msg_queue()\n\n self.global_permanents.move_to(self.oxygen.most_recent_damaged_pos())\n\n # Don't delay between prompt messages for usability\n if len(self.msg_queue):\n\n for i, item in enumerate(self.msg_queue):\n if (\":deck:\" in item) or (\":character:\" in item) or (\":reveal:\" in item):\n self.interpret_msg(item)\n since_last = 0\n if (\":prompt:\" in item):\n self.interpret_msg(item)\n since_last = 0\n \n #if \":prompt:\" in self.msg_queue[0]:\n # self.interpret_msg(self.msg_queue[0])\n\n # Have a small delay between animating items.\n if since_last > ACTION_LENGTH:\n since_last -= ACTION_LENGTH\n\n if since_last > ACTION_LENGTH:\n since_last = 0\n\n # If there is a message in the queue, execute it\n if len(self.msg_queue):\n self.interpret_msg(self.msg_queue[0])", "title": "" }, { "docid": "68101b4c4c8ba7288c428823ba15cd0f", "score": "0.464293", "text": "def print_bg_data(self):\n print(\"BACKGROUND: \" + self.char_bg_name)\n if self.char_bg_specialty != None:\n print(self.bg_spec_title.upper() + \": \" + self.char_bg_specialty)\n print(\"PERSONALITY TRAIT: \" + self.char_ptrait)\n print(\"IDEAL: \" + self.char_ideal)\n print(\"BOND: \" + self.char_bond)\n print(\"FLAW: \" + self.char_flaw)", "title": "" }, { "docid": "19d2646c79276eccee4ac4547291c645", "score": "0.46396068", "text": "def speak_hand(self):\n\n names_and_counts = Counter([c.name for c in self.hand]).most_common(\n len(self.hand)\n )\n\n s = \"I have \"\n s += \" and \".join(\n [str(nac[1]) + \" \" + str(nac[0]) for nac in names_and_counts]\n )\n\n self.game.speak_str(s)", "title": "" }, { "docid": "2ef6acfeb1de67950c10a2b1b797b248", "score": "0.46387628", "text": "def create_player():\n\n choose_colors_text = \"\"\"\n 1. red\n 2. green\n 3. yellow\\n\"\"\"\n character_colors = {'1': '\\033[31m', '2': '\\033[32m', '3': '\\033[33m'}\n os.system('clear')\n print('Character creation screen.')\n character_name = input(\"Choose your character's name: \")\n chosen_character_color = ''\n while chosen_character_color not in ['1', '2', '3']:\n print(\"Choose your character's color [1, 2 or 3].\")\n chosen_character_color = input(choose_colors_text)\n character_color = character_colors[chosen_character_color]\n return character_name, character_color", "title": "" }, { "docid": "58c922f081dff94eafe8790adce72c5b", "score": "0.46325392", "text": "def fetch_char_details(char_uuid):\r\n query = Character.select().where(Character.uuid == char_uuid).get()\r\n return {\r\n \"uuid\": query.uuid,\r\n \"name\": query.name,\r\n \"level\": query.level,\r\n \"char_class\": query.char_class,\r\n \"hp\": query.hp,\r\n \"damage\": query.damage,\r\n \"armor\": query.armor,\r\n \"stats\": query.stats,\r\n \"xp\": query.xp,\r\n }", "title": "" }, { "docid": "5aebfab1906864703418086b20679dcc", "score": "0.46317622", "text": "def __init__(self, initialAction, playerId, speed = 15):\n Characters.__init__(self,initialAction, speed = 15)\n self.pos = 1\n self.pos2 = 1\n self.movex, self.movey = 0,0\n self.facingRight = True\n self.x = 250\n self.y = 350\n self.Rect = Rect(self.x, self.y, 35, 70)\n self.HP = 400\n self.XP = 50\n self.XPMAX = 150\n self.Defending = False\n self.Attacking = False\n self.punchDamage = 0\n self.kickDamage = 0\n self.hitDefended = 0\n self.powerDamage = 0\n self.powerDamageDefended = 0\n self.initialTime = 0\n self.timing2 = True\n self.punchDamage = 2\n self.kickDamage = 2\n self.hitDefended = 0.4\n self.powerDamage = 10\n self.powerDamageDefended = 2\n self.comboDamage = 10\n self.playerId = playerId\n self.loading = False\n self.superPunchState = False\n self.superKickState = False\n self.factorSuper = 1.4\n #self.inicialTime\n self.initialTime1 = time.time()*1000\n self.initialTime2 = time.time()*1000\n self.initialTime3 = time.time()*1000\n self.initialTime4 = time.time()*1000\n self.initialTime5 = time.time()*1000\n self.initialTime6 = time.time()*1000\n self.initialTime7 = time.time()*1000\n self.initialSpark = time.time()*1000\n self.initialExplosion = time.time()*1000\n self.kamehamMs = 160\n self.punchMs = 90\n self.releasePower = True\n self.voidPower = True\n self.kameCont = 22\n self.enemykameCont = 0\n self.staticy = 0\n self.initialKame = time.time()*1000\n self.initialPunch = time.time()*1000\n self.initialEffects = time.time()*1000\n self.isPC = True\n self.singleKameham = True\n self.disputeKamehamBoolean = True\n self.teleportBoolean = True", "title": "" }, { "docid": "0905d5a345b171611e4083590e1ffe54", "score": "0.46280083", "text": "def __init__(self, char, enemy, small_talk, ask):\n\n #print 'in Encounter.__init__'\n\n self.char = char\n self.enemy = enemy\n self.time = 0\n self.base_def = 5\n self.done = False\n self.small_talk_full = copy.deepcopy(small_talk)\n self.ask_full = copy.deepcopy(ask)\n\n self.strong_against_counter = 0\n self.time_dec = 0\n\n self.small_talk = copy.deepcopy(self.small_talk_full)\n self.ask = copy.deepcopy(self.ask_full)\n\n # Strings to describe the enemy responses\n self.enemy_resp_st_weak = ['%s rolls their eyes. \"Uhh yeah, great\"', \n \n'''%s gives the smallest of smiles and says whatever he thinks you want to \nhear''']\n\n self.enemy_resp_st_strong = [\n'''%s smiles, they start talking about how great everything is! They are doing \nwell, the weather is great, what a day!''', '''%s answers with a surprisingly \npositive response! They go on to ask you questions in rapid fire about your day, \nyour family, and if you are LOVING this weather.''']\n \n self.enemy_resp_a_weak = [\n \n'''%s inches back...they look around to see who is within earshot...they stutter \nout a simple, neutral response, clearly uncomfortable.''', '''%s takes a moment \nto think, and then they say they're really not too sure about it yet. They say \nthat they need to do more research on it. Then they look around and focus on the \nclosest door.''']\n\n self.enemy_resp_a_strong = [\n \n'''%s inches close, and takes a deep breath.\nThey go on about what the Framers would have thought, a brief history of the \nissue, and what the current picture of the issue looks like. Then, they go on to \ntell you their opinion on the matter.''', \n \n'''%s looks surprised, and then leans in. They talk your ear off about why it's \nthe correct decision, and why the other side is dead wrong.''']", "title": "" }, { "docid": "86f16a0a46f8e9e3211374c5a971f37e", "score": "0.4627855", "text": "def show_hand(self):\n return \" | \".join([x.visible_value for x in self.hand])", "title": "" }, { "docid": "a8dc3870634876bc89aa08713eef2e46", "score": "0.46271205", "text": "def get_character(name, tries=5):\r\n try:\r\n url = url_character + urllib.parse.quote(name.encode('iso-8859-1'))\r\n except UnicodeEncodeError:\r\n return ERROR_DOESNTEXIST\r\n char = dict()\r\n\r\n # Fetch website\r\n try:\r\n page = yield from aiohttp.get(url)\r\n content = yield from page.text(encoding='ISO-8859-1')\r\n except Exception:\r\n if tries == 0:\r\n log.error(\"getPlayer: Couldn't fetch {0}, network error.\".format(name))\r\n return ERROR_NETWORK\r\n else:\r\n tries -= 1\r\n yield from asyncio.sleep(network_retry_delay)\r\n ret = yield from get_character(name, tries)\r\n return ret\r\n\r\n # Trimming content to reduce load\r\n try:\r\n startIndex = content.index('<div class=\"BoxContent\"')\r\n endIndex = content.index(\"<B>Search Character</B>\")\r\n content = content[startIndex:endIndex]\r\n except ValueError:\r\n # Website fetch was incomplete, due to a network error\r\n if tries == 0:\r\n log.error(\"getPlayer: Couldn't fetch {0}, network error.\".format(name))\r\n return ERROR_NETWORK\r\n else:\r\n tries -= 1\r\n yield from asyncio.sleep(network_retry_delay)\r\n ret = yield from get_character(name, tries)\r\n return ret\r\n # Check if player exists\r\n if \"Name:</td><td>\" not in content:\r\n return ERROR_DOESNTEXIST\r\n\r\n # TODO: Is there a way to reduce this part?\r\n # Name\r\n m = re.search(r'Name:</td><td>([^<,]+)', content)\r\n if m:\r\n char['name'] = m.group(1).strip()\r\n\r\n # Deleted\r\n m = re.search(r', will be deleted at ([^<]+)', content)\r\n if m:\r\n char['deleted'] = True\r\n\r\n # Vocation\r\n m = re.search(r'Vocation:</td><td>([^<]+)', content)\r\n if m:\r\n char['vocation'] = m.group(1)\r\n\r\n # Level\r\n m = re.search(r'Level:</td><td>(\\d+)', content)\r\n if m:\r\n char['level'] = int(m.group(1))\r\n # Use database levels for online characters\r\n for onchar in global_online_list:\r\n if onchar.split(\"_\", 1)[1] == char['name']:\r\n c = userDatabase.cursor()\r\n c.execute(\"SELECT last_level FROM chars WHERE name LIKE ?\", (char['name'],))\r\n result = c.fetchone()\r\n if result:\r\n char['level'] = abs(result[\"last_level\"])\r\n c.close()\r\n break\r\n\r\n # World\r\n m = re.search(r'World:</td><td>([^<]+)', content)\r\n if m:\r\n char['world'] = m.group(1)\r\n\r\n # Residence (City)\r\n m = re.search(r'Residence:</td><td>([^<]+)', content)\r\n if m:\r\n char['residence'] = m.group(1)\r\n\r\n # Marriage\r\n m = re.search(r'Married To:</td><td>?.+name=([^\"]+)', content)\r\n if m:\r\n char['married'] = urllib.parse.unquote_plus(m.group(1), encoding='ISO-8859-1')\r\n\r\n # Sex\r\n m = re.search(r'Sex:</td><td>([^<]+)', content)\r\n if m:\r\n if m.group(1) == 'male':\r\n char['gender'] = 'male'\r\n else:\r\n char['gender'] = 'female'\r\n\r\n # Guild rank\r\n m = re.search(r'Membership:</td><td>([^<]+)\\sof the', content)\r\n if m:\r\n char['rank'] = m.group(1)\r\n # Guild membership\r\n m = re.search(r'GuildName=.*?([^&]+).+', content)\r\n if m:\r\n char['guild'] = urllib.parse.unquote_plus(m.group(1))\r\n\r\n # House\r\n m = re.search(r'House:</td><td> <a href=\\\"https://secure\\.tibia\\.com/community/\\?subtopic=houses.+houseid=(\\d+)'\r\n r'&amp;character=(?:[^&]+)&amp;action=characters\\\" >([^<]+)</a> \\(([^(]+)\\) is paid until '\r\n r'([A-z]+).*?;(\\d+).*?;(\\d+)', content)\r\n if m:\r\n char[\"house_id\"] = m.group(1)\r\n char[\"house\"] = m.group(2)\r\n char[\"house_town\"] = m.group(3)\r\n\r\n # Last login\r\n m = re.search(r'Last Login:</td><td>([^<]+)', content)\r\n if m:\r\n lastLogin = m.group(1).replace(\"&#160;\", \" \").replace(\",\", \"\")\r\n if \"never\" in lastLogin:\r\n char['last_login'] = None\r\n else:\r\n char['last_login'] = lastLogin\r\n\r\n # Discord owner\r\n c = userDatabase.cursor()\r\n c.execute(\"SELECT user_id FROM chars WHERE name LIKE ?\", (char[\"name\"],))\r\n result = c.fetchone()\r\n char[\"owner_id\"] = None if result is None else result[\"user_id\"]\r\n\r\n # Update name, vocation and world for chars in database if necessary\r\n c = userDatabase.cursor()\r\n c.execute(\"SELECT vocation, name, id, world FROM chars WHERE name LIKE ?\", (name,))\r\n result = c.fetchone()\r\n if result:\r\n if result[\"vocation\"] != char['vocation']:\r\n c.execute(\"UPDATE chars SET vocation = ? WHERE id = ?\", (char['vocation'], result[\"id\"],))\r\n log.info(\"{0}'s vocation was set to {1} from {2} during get_character()\".format(char['name'],\r\n char['vocation'],\r\n result[\"vocation\"]))\r\n if result[\"name\"] != char[\"name\"]:\r\n c.execute(\"UPDATE chars SET name = ? WHERE id = ?\", (char['name'], result[\"id\"],))\r\n log.info(\"{0} was renamed to {1} during get_character()\".format(result[\"name\"], char['name']))\r\n\r\n if result[\"world\"] != char[\"world\"]:\r\n c.execute(\"UPDATE chars SET world = ? WHERE id = ?\", (char['world'], result[\"id\"],))\r\n log.info(\"{0}'s world was set to {1} from {2} during get_character()\".format(char['name'],\r\n char['world'],\r\n result[\"world\"]))\r\n\r\n #Skills from highscores\r\n c = userDatabase.cursor()\r\n for category in highscores_categories:\r\n c.execute(\"SELECT \"+category+\",\"+category+\"_rank FROM chars WHERE name LIKE ?\", (name,))\r\n result = c.fetchone()\r\n if result:\r\n if result[category] is not None and result[category+'_rank'] is not None:\r\n char[category] = result[category]\r\n char[category+'_rank'] = result[category+'_rank']\r\n\r\n char[\"deaths\"] = []\r\n regex_deaths = r'valign=\"top\" >([^<]+)</td><td>(.+?)</td></tr>'\r\n pattern = re.compile(regex_deaths, re.MULTILINE + re.S)\r\n matches = re.findall(pattern, content)\r\n\r\n for m in matches:\r\n death_time = m[0].replace('&#160;', ' ').replace(\",\", \"\")\r\n death_level = \"\"\r\n death_killer = \"\"\r\n death_by_player = False\r\n\r\n if m[1].find(\"Died\") != -1:\r\n regex_deathinfo_monster = r'Level (\\d+) by ([^.]+)'\r\n pattern = re.compile(regex_deathinfo_monster, re.MULTILINE + re.S)\r\n m_deathinfo_monster = re.search(pattern, m[1])\r\n if m_deathinfo_monster:\r\n death_level = m_deathinfo_monster.group(1)\r\n death_killer = m_deathinfo_monster.group(2)\r\n else:\r\n regex_deathinfo_player = r'Level (\\d+) by .+?name=([^\"]+)'\r\n pattern = re.compile(regex_deathinfo_player, re.MULTILINE + re.S)\r\n m_deathinfo_player = re.search(pattern, m[1])\r\n if m_deathinfo_player:\r\n death_level = m_deathinfo_player.group(1)\r\n death_killer = urllib.parse.unquote_plus(m_deathinfo_player.group(2))\r\n death_by_player = True\r\n try:\r\n char[\"deaths\"].append({'time': death_time, 'level': int(death_level), 'killer': death_killer,\r\n 'byPlayer': death_by_player})\r\n except ValueError:\r\n # Some pvp deaths have no level, so they are raising a ValueError, they will be ignored for now.\r\n continue\r\n \r\n # Other chars\r\n # note that an empty char list means the character is hidden\r\n # otherwise you'd have at least the same char in the list\r\n char['chars'] = []\r\n try:\r\n # See if there is a character list\r\n startIndex = content.index(\"<B>Characters</B>\")\r\n content = content[startIndex:]\r\n\r\n # Find characters\r\n regex_chars = r'<TD WIDTH=10%><NOBR>([^<]+)[^?]+.+?VALUE=\\\"([^\\\"]+)'\r\n pattern = re.compile(regex_chars, re.MULTILINE + re.S)\r\n m = re.findall(pattern, content)\r\n\r\n if m:\r\n for (world, name) in m:\r\n name = urllib.parse.unquote_plus(name)\r\n char['chars'].append({'name': name, 'world': world})\r\n except Exception:\r\n pass\r\n return char", "title": "" }, { "docid": "acba73332b0b44df5dd7c10720f63729", "score": "0.46242255", "text": "def _get_cards():\n _suspects = {\n 'Scarlet', 'Plum', 'Mustard', 'Green', 'White', 'Peacock'\n }\n\n _rooms = {\n 'Study', 'Hall', 'Lounge', 'Library', 'Billiard', 'Dining', 'Conservatory', 'Ballroom', 'Kitchen'\n }\n\n _weapons = {\n 'Knife', 'Wrench', 'Revolver', 'Pipe', 'Rope', 'Candlestick'\n }\n\n _hallways = {\n 'Hallway_01', 'Hallway_02', 'Hallway_03', 'Hallway_04', 'Hallway_05', 'Hallway_06', 'Hallway_07',\n 'Hallway_08', 'Hallway_09', 'Hallway_10', 'Hallway_11', 'Hallway_12'\n }\n\n return _suspects.union(_rooms.union(_weapons))", "title": "" }, { "docid": "8979fb49190f7da430147f79364abf3d", "score": "0.4623122", "text": "def combat_tracker_remove_chars(tracker, chars):\n to_remove = [x.lower() for x in chars]\n tracker['turns'] = [x for x in tracker['turns'] if x['name'].lower() not in to_remove]\n\n return tracker", "title": "" }, { "docid": "69ff80de7de54fc78253fe114e4ace91", "score": "0.4620314", "text": "def _UpdateCharAlpha0White(self, data):\n\t\tfor [r, c, ch] in data:\n\t\t\t(x, y) = self.RowColToXY(r, c)\n\t\t\tself._DrawCubeXYWhite(x, y)\n\t\t\tself._DrawTextXY(x, y, ch)", "title": "" }, { "docid": "9fab52fefee98e88e9af8562df40ee50", "score": "0.46155626", "text": "async def random(self, ctx):\r\n tags = self.c.execute(\r\n 'SELECT name, content, author, uses FROM tags WHERE server=? ORDER BY RANDOM() LIMIT 1', (ctx.guild.id,))\r\n tags = tags.fetchone()\r\n name, content, author, uses = tags\r\n e = discord.Embed(title=\"Random tag\")\r\n e.add_field(name=f\"{int(uses)} uses\", value=f'<@{author}>')\r\n e.add_field(name=name, value=content)\r\n await ctx.send(embed=e)", "title": "" }, { "docid": "09be059e8adf9a2740dadd086bd0f59a", "score": "0.4612348", "text": "def get_viewable_hand(self):\n hand = \"\"\n for card in self.hand:\n hand = hand + \" \" + card.get_card_details()\n return hand", "title": "" }, { "docid": "b9de63956f6e9da6bfe4e02ff34c149a", "score": "0.4606336", "text": "def harrier_mono_bios():\n return [\n ('UFC_SCRATCHPAD', '123456789012'),\n ('UFC_COMM1_DISPLAY', '11'),\n ('UFC_COMM2_DISPLAY', '22'),\n ('AV8BNA_ODU_1_SELECT', '1'),\n ('AV8BNA_ODU_1_Text', '1234'),\n ('AV8BNA_ODU_2_SELECT', '2'),\n ('AV8BNA_ODU_2_Text', '2345'),\n ('AV8BNA_ODU_3_SELECT', '3'),\n ('AV8BNA_ODU_3_Text', '3456'),\n ('AV8BNA_ODU_4_SELECT', '4'),\n ('AV8BNA_ODU_4_Text', '4567'),\n ('AV8BNA_ODU_5_SELECT', '5'),\n ('AV8BNA_ODU_5_Text', '5678')\n ]", "title": "" }, { "docid": "ec65312c97be162b3c2d677e8b3ef12b", "score": "0.46028203", "text": "async def champions(self, ctx):\r\n guild = await AssetCreation.getGuildFromPlayer(self.client.pg_con, ctx.author.id)\r\n champions = await AssetCreation.get_brotherhood_champions(self.client.pg_con, guild['ID'])\r\n\r\n for i in range(0,3):\r\n if champions[i] is not None:\r\n name = await AssetCreation.getPlayerName(self.client.pg_con, champions[i])\r\n # attack, crit = await AssetCreation.getAttack(self.client.pg_con, champions[i])\r\n battle_stats = await AssetCreation.get_attack_crit_hp(self.client.pg_con, champions[i])\r\n \r\n champions[i] = {\r\n 'Name' : name,\r\n 'ATK' : battle_stats['Attack'],\r\n 'Crit' : battle_stats['Crit']\r\n }\r\n\r\n embed = discord.Embed(title=f\"{guild['Name']}'s Champions\",\r\n color=self.client.ayesha_blue)\r\n for i, champion in enumerate(champions):\r\n try:\r\n embed.add_field(name=f'Champion {i+1}',\r\n value=f\"Name: {champion['Name']}\\nATK: {champion['ATK']}\\nCrit: {champion['Crit']}\")\r\n except TypeError:\r\n embed.add_field(name=f'Champion {i+1}',\r\n value='None')\r\n\r\n embed.set_footer(text=\"If a champion is 'None', ask your officer to add one with the 'champion' command!\")\r\n\r\n await ctx.reply(embed=embed)", "title": "" }, { "docid": "c1b9c5b3f3bc6422a3b0020c33d2df4e", "score": "0.45937377", "text": "def __str__(self):\n return f'{self.character_name}: HP: {self.hp}, BAG: {self.bag}'", "title": "" }, { "docid": "5d569e12a92e00261a02e7f9cdde42b1", "score": "0.45936525", "text": "def emoticon(bot_input, bot_output):\n random_emoticon = random.choice(list(emoticons))\n bot_output.say(\":{0}:\".format(random_emoticon))", "title": "" }, { "docid": "326fe579462214a40141b695d0fba426", "score": "0.45929167", "text": "def get_challenge_string(self):\n choices = ascii_letters + digits\n return ''.join(random.choice(choices) for i in xrange(128))", "title": "" }, { "docid": "72acd447423ff6413b4b4edb0e497952", "score": "0.45895806", "text": "def get_human_players(self):\n human_players = []\n for name, player in self.players.items():\n if player[0].is_human:\n human_players.append(name)\n return human_players", "title": "" }, { "docid": "275575521e39c4edf4f65a0a5aed78d5", "score": "0.45879355", "text": "def collect_from_player(self):", "title": "" }, { "docid": "19f0bbf3b73ddc474b4e90c58af77e3e", "score": "0.45878485", "text": "def get_dev_aim(self, player: int):\r\n return self.__data[player, 2]", "title": "" }, { "docid": "9e1d5912cfe6463a0e690208adcdeb93", "score": "0.4587243", "text": "async def attack(self, ctx):\r\n def simulate_battle(player1, player2):\r\n \"\"\"Simulate a battle between two players based solely off ATK and Crit.\r\n Each side has a small chance to land a \"crit\" (based off crit) and win.\r\n Otherwise it will base the victor off the proportions of the attack.\r\n Return the winner and loser in that order.\"\"\"\r\n #See if one side lands a critical hit - Highest crit possible is theoretically ~70%.\r\n p1vict = player1['Crit']\r\n p2vict = p1vict + player2['Crit'] #This should theoretically be <=140\r\n random_crit = random.randint(0,500)\r\n if random_crit < p1vict:\r\n return player1, player2 #player1 wins; Winner is returned first\r\n elif random_crit < p2vict:\r\n return player2, player1\r\n \r\n #If no victory occurs, then base it off proportion of ATK\r\n victory_number = random.randint(0, player1['ATK'] + player2['ATK'])\r\n if victory_number < player1['ATK']:\r\n return player1, player2\r\n else:\r\n return player2, player1\r\n\r\n #Only one attack per area every 3 hours. Check to see if attacking is available\r\n guild = await AssetCreation.getGuildFromPlayer(self.client.pg_con, ctx.author.id)\r\n last_attack = await AssetCreation.get_most_recent_area_attack(self.client.pg_con, guild['Base'])\r\n\r\n if last_attack is not None:\r\n if (datetime.now() - last_attack).total_seconds() < 10800:\r\n return await ctx.reply(f'This area has already suffered a recent attack. Please try again in `{time.strftime(\"%H:%M:%S\", time.gmtime(10800 - (datetime.now() - last_attack).total_seconds()))}`.')\r\n\r\n #If available, load the champions for both brotherhoods\r\n #If <3 champions, recycle the first with nerfed stats\r\n #If defender has no champions, attacker automatically wins\r\n attacker = await AssetCreation.get_brotherhood_champions(self.client.pg_con, guild['ID'])\r\n defending_guild_id = await AssetCreation.get_area_controller(self.client.pg_con, guild['Base'])\r\n\r\n if defending_guild_id is None: #No one currently holds the area, so attacker assumes control\r\n await AssetCreation.set_area_controller(self.client.pg_con, guild['Base'], guild['ID'])\r\n await self.client.announcement_channel.send(f\"**{guild['Name']} (ID: `{guild['ID']}`)** has seized control over {guild['Base']}.\")\r\n return await ctx.reply(f\"{guild['Name']} has seized control over {guild['Base']}.\")\r\n\r\n defending_guild = await AssetCreation.getGuildByID(self.client.pg_con, defending_guild_id)\r\n\r\n if defending_guild['Base'] != guild['Base']: #then the defending guild has since moved. Give freely\r\n await AssetCreation.set_area_controller(self.client.pg_con, guild['Base'], guild['ID'])\r\n await self.client.announcement_channel.send(f\"**{guild['Name']} (ID: `{guild['ID']}`)** has seized control over {guild['Base']}.\")\r\n return await ctx.reply(f\"{guild['Name']} has seized control over {guild['Base']}.\")\r\n\r\n if guild['ID'] == defending_guild_id:\r\n return await ctx.reply(f\"Your brotherhood is already in control of {guild['Base']}\")\r\n\r\n defender = await AssetCreation.get_brotherhood_champions(self.client.pg_con, defending_guild_id)\r\n\r\n if attacker[0] is None and attacker[1] is None and attacker[2] is None:\r\n return await ctx.reply(f'Your brotherhood has no champions. Set some with `{ctx.prefix}bh champion`!')\r\n \r\n if defender[0] is None and defender[1] is None and defender[2] is None: #If defender has no champs, give it up\r\n await AssetCreation.set_area_controller(self.client.pg_con, guild['Base'], guild['ID'])\r\n await self.client.announcement_channel.send(f\"**{guild['Name']} (ID: `{guild['ID']}`)** has seized control over {guild['Base']}.\")\r\n return await ctx.reply(f\"{guild['Name']} has seized control over {guild['Base']}.\")\r\n\r\n for i in range(0,3): #Replace their IDs with a dict containing battle info\r\n if attacker[i] is not None:\r\n name = await AssetCreation.getPlayerName(self.client.pg_con, attacker[i])\r\n # attack, crit = await AssetCreation.getAttack(self.client.pg_con, attacker[i])\r\n battle_stats = await AssetCreation.get_attack_crit_hp(self.client.pg_con, attacker[i])\r\n \r\n attacker[i] = {\r\n 'ID' : attacker[i],\r\n 'Name' : name,\r\n 'ATK' : battle_stats['Attack'],\r\n 'Crit' : battle_stats['Crit']\r\n }\r\n if defender[i] is not None:\r\n name = await AssetCreation.getPlayerName(self.client.pg_con, defender[i])\r\n # attack, crit = await AssetCreation.getAttack(self.client.pg_con, defender[i])\r\n battle_stats = await AssetCreation.get_attack_crit_hp(self.client.pg_con, defender[i])\r\n \r\n defender[i] = {\r\n 'ID' : defender[i],\r\n 'Name' : name,\r\n 'ATK' : battle_stats['Attack'],\r\n 'Crit' : battle_stats['Crit']\r\n }\r\n\r\n for i in range(1,3): #Sort the teams so that the first slot is always a person (and not empty)\r\n if attacker[0] is None and attacker[i] is not None:\r\n attacker[0] = attacker[i]\r\n if defender[0] is None and defender[i] is not None:\r\n defender[0] = defender[i]\r\n\r\n for i in range(1,3): #Now fill \"None\"s with the first champion. The above operation made sure the first is always a person\r\n if attacker[i] is None:\r\n attacker[i] = attacker[0]\r\n if defender[i] is None:\r\n defender[i] = defender[0]\r\n\r\n #Now check for repeats, nerfing stats for the second or third appearance. This can probably be optimized.\r\n if attacker[0]['ID'] == attacker[1]['ID']:\r\n attacker[1]['ATK'] = int(attacker[1]['ATK'] * .9)\r\n attacker[1]['Crit'] = int(attacker[1]['Crit'] * .9)\r\n\r\n if attacker[0]['ID'] == attacker[2]['ID']:\r\n attacker[2]['ATK'] = int(attacker[2]['ATK'] * .9)\r\n attacker[2]['Crit'] = int(attacker[2]['Crit'] * .9)\r\n\r\n if attacker[1]['ID'] == attacker[2]['ID']:\r\n attacker[2]['ATK'] = int(attacker[2]['ATK'] * .9)\r\n attacker[2]['Crit'] = int(attacker[2]['Crit'] * .9)\r\n\r\n if defender[0]['ID'] == defender[1]['ID']:\r\n defender[1]['ATK'] = int(defender[1]['ATK'] * .9)\r\n defender[1]['Crit'] = int(defender[1]['Crit'] * .9)\r\n\r\n if defender[0]['ID'] == defender[2]['ID']:\r\n defender[2]['ATK'] = int(defender[2]['ATK'] * .9)\r\n defender[2]['Crit'] = int(defender[2]['Crit'] * .9)\r\n\r\n if defender[1]['ID'] == defender[2]['ID']:\r\n defender[2]['ATK'] = int(defender[2]['ATK'] * .9)\r\n defender[2]['Crit'] = int(defender[2]['Crit'] * .9)\r\n\r\n #Conduct PvP operations between the brotherhoods to determine the winner\r\n attacker_wins = 0\r\n defender_wins = 0\r\n battle_info = ''\r\n\r\n for i in range(0,3):\r\n winner, loser = simulate_battle(attacker[i], defender[i]) #Same from PvP.tournament\r\n if attacker[i]['ID'] == winner['ID']:\r\n attacker_wins += 1\r\n battle_info += f\"{guild['Name']}'s {attacker[i]['Name']} defeated {defending_guild['Name']}'s {defender[i]['Name']}.\\n\"\r\n else:\r\n defender_wins += 1\r\n battle_info += f\"{defending_guild['Name']}'s {defender[i]['Name']} defeated {guild['Name']}'s {attacker[i]['Name']}.\\n\"\r\n\r\n #Log battle, change controller if applicable, return output\r\n if attacker_wins > defender_wins:\r\n await AssetCreation.set_area_controller(self.client.pg_con, guild['Base'], guild['ID'])\r\n await AssetCreation.log_area_attack(self.client.pg_con, guild['Base'], guild['ID'], defending_guild['ID'], guild['ID'])\r\n await self.client.announcement_channel.send(f\"**{guild['Name']} (ID: `{guild['ID']}`)** has defeated **{defending_guild['Name']}**, seizing control over {guild['Base']}.\")\r\n await ctx.reply(f\"{battle_info}{guild['Name']} has seized control over {guild['Base']}!\")\r\n else:\r\n await AssetCreation.log_area_attack(self.client.pg_con, guild['Base'], defending_guild['ID'], guild['ID'], defending_guild['ID'])\r\n await ctx.reply(f\"{battle_info}Your attack on {guild['Base']} was put down by the champions of {defending_guild['Name']}.\")", "title": "" }, { "docid": "0175d773826e00b4b63a4fde76603462", "score": "0.45863643", "text": "def printAbility(player):\n if isinstance(player.weapon, it.Longsword):\n print(\"Heroic Strike - 1 ap\\nPommel Attack - 2 ap\")\n elif isinstance(player.weapon, it.Greatsword):\n print(\"Execute - 1 ap\\nSkullsplitter - 2 ap\")\n elif isinstance(player.weapon, it.Dagger):\n print(\"Poison Strike - 1 ap\\nSinister Strike - 2 ap\")\n elif isinstance(player.weapon, it.SmallAxe):\n print(\"Deep Wounds - 1 ap\\nArmor Crush - 2 ap\")\n elif isinstance(player.weapon, it.Greataxe):\n print(\"Bloodthirst - 1 ap\\nRampage - 2 ap\")\n if player.shield:\n print(\"Shield Bash - 1 ap\\nShield Wall - 2 ap\")", "title": "" }, { "docid": "8f11743244118b3524b6190e7bd63f02", "score": "0.45841917", "text": "def hole_cards(hh): \n\n c1 = ''\n c2 = ''\n for elem in hh:\n if 'Pocket' in elem and 'Hero' in elem: # looks for string where my cards are listed\n temp = elem.split(' ') # split and iterate through that string\n for s in temp:\n if 'Hero' in s: # gets card 1 \n c1 = s.replace('player=\"Hero\">', '')\n c1 = c1.replace('0', '')\n if '</cards>' in s: # gets card 2\n c2 = s.replace('</cards>', '')\n c2 = c2.replace('0', '')\n if c1 != '':\n cards = [c1, c2] # puts cards if list if they exist\n return cards\n else:\n return None", "title": "" }, { "docid": "ab3042f937b658e555ff4ae5baa4a611", "score": "0.4581704", "text": "def character_lookup_auto(bot, trigger):\n possible_url_array = trigger.rstrip(' ').split('=')\n if len(possible_url_array) < 3:\n return\n if possible_url_array[-2] == 'character&id':\n get_char_url = 'http://www.nexusclash.com/modules.php?name=Character&id=' + possible_url_array[-1] + '&format=json'\n response = urllib.urlopen(get_char_url)\n data = json.loads(response.read())\n if not data['result']['character']['name']['name'] == '':\n char_lvl = str(data['result']['character']['level'])\n char_name = data['result']['character']['name']['name']\n char_class = data['result']['character']['classes'][-1]\n if data['result']['character']['status']['alive'] == True:\n char_status = 'alive'\n else:\n char_status = 'dead'\n char_string = 'NAME: ' + char_name + ' - LVL: ' + char_lvl + ' - CLASS: ' + char_class + ' - STATUS: ' + char_status\n if data['result']['character']['faction']['id'] != 0:\n char_string += (' - FACTION: ' + data['result']['character']['faction']['name'])\n bot.say(char_string)", "title": "" } ]
cc5defd274a4954d67fd47e856d48480
Tests that when the email address is missing from the sociallogin email verification kicks in.
[ { "docid": "23cb442dbf31df330244aea3b631a8a2", "score": "0.81232715", "text": "def test_email_address_required_missing_from_sociallogin(\n db, settings, sociallogin_factory, client, rf\n):\n settings.ACCOUNT_EMAIL_REQUIRED = True\n settings.ACCOUNT_UNIQUE_EMAIL = True\n settings.ACCOUNT_USERNAME_REQUIRED = False\n settings.ACCOUNT_AUTHENTICATION_METHOD = \"email\"\n settings.ACCOUNT_EMAIL_VERIFICATION = \"mandatory\"\n settings.SOCIALACCOUNT_AUTO_SIGNUP = True\n\n sociallogin = sociallogin_factory(with_email=False)\n\n request = rf.get(\"/\")\n request.session = {}\n request.user = AnonymousUser()\n resp = complete_social_login(request, sociallogin)\n assert resp[\"location\"] == reverse(\"socialaccount_signup\")\n\n session = client.session\n session[\"socialaccount_sociallogin\"] = sociallogin.serialize()\n session.save()\n resp = client.post(reverse(\"socialaccount_signup\"), {\"email\": \"[email protected]\"})\n assert resp[\"location\"] == reverse(\"account_email_verification_sent\")", "title": "" } ]
[ { "docid": "f6695ab87d34ed6b71c3e950fa736397", "score": "0.7807229", "text": "def test_email_address_clash_username_not_required(self):\n request, resp = self._email_address_clash(\"test\", \"[email protected]\")\n self.assertEqual(resp[\"location\"], reverse(\"socialaccount_signup\"))\n\n # POST email to social signup form (username not present)\n request.method = \"POST\"\n request.POST = {\"email\": \"[email protected]\"}\n resp = signup(request)\n self.assertEqual(resp[\"location\"], \"/accounts/profile/\")\n user = get_user_model().objects.get(\n **{account_settings.USER_MODEL_EMAIL_FIELD: \"[email protected]\"}\n )\n self.assertNotEqual(user_username(user), \"test\")", "title": "" }, { "docid": "bfdc747c89797c144d55af5d7d08822a", "score": "0.7499017", "text": "def test_invalid_email(self):\r\n user = self.helper('create_user')\r\n self.login(USERNAME, PASSWORD)\r\n self.go200(reverse('person.views.edit_profile'))\r\n self.formvalue(1, 'id_email', \"email\")\r\n self.submit200()\r\n self.find(\"Enter a valid e-mail address.\")", "title": "" }, { "docid": "541f9a84ea0850e30521b6e569759312", "score": "0.74640894", "text": "def test_email_no_at(self):\n email = \"testexample.com\"\n self.assertFalse(validators.isEmailValid(email))", "title": "" }, { "docid": "ea4d8f4b0689c9062a9736edad8105b1", "score": "0.7439763", "text": "def test_unverified_email_change_at_signup(self):\n session = self.client.session\n User = get_user_model()\n sociallogin = SocialLogin(\n user=User(email=\"[email protected]\"),\n account=SocialAccount(provider=\"google\"),\n email_addresses=[\n EmailAddress(\n email=\"[email protected]\",\n verified=False,\n primary=True,\n )\n ],\n )\n session[\"socialaccount_sociallogin\"] = sociallogin.serialize()\n session.save()\n resp = self.client.get(reverse(\"socialaccount_signup\"))\n form = resp.context[\"form\"]\n self.assertEqual(form[\"email\"].value(), \"[email protected]\")\n resp = self.client.post(\n reverse(\"socialaccount_signup\"),\n data={\"email\": \"[email protected]\"},\n )\n\n self.assertRedirects(resp, reverse(\"account_email_verification_sent\"))\n user = User.objects.all()[0]\n self.assertEqual(user_email(user), \"[email protected]\")\n self.assertTrue(\n EmailAddress.objects.filter(\n user=user,\n email=\"[email protected]\",\n verified=False,\n primary=False,\n ).exists()\n )\n self.assertTrue(\n EmailAddress.objects.filter(\n user=user,\n email=\"[email protected]\",\n verified=False,\n primary=True,\n ).exists()\n )", "title": "" }, { "docid": "de26738b892f989fa875122ee40ce8de", "score": "0.7357271", "text": "def test_email_address_conflict_during_auto_signup(\n db, settings, user_factory, sociallogin_factory, client, rf, mailoutbox\n):\n settings.ACCOUNT_EMAIL_REQUIRED = True\n settings.ACCOUNT_UNIQUE_EMAIL = True\n settings.ACCOUNT_USERNAME_REQUIRED = False\n settings.ACCOUNT_AUTHENTICATION_METHOD = \"email\"\n settings.ACCOUNT_EMAIL_VERIFICATION = \"mandatory\"\n settings.SOCIALACCOUNT_AUTO_SIGNUP = True\n\n user = user_factory()\n sociallogin = sociallogin_factory(email=user.email, with_email=True)\n\n request = rf.get(\"/\")\n request.session = {}\n request.user = AnonymousUser()\n\n resp = complete_social_login(request, sociallogin)\n assert resp[\"location\"] == reverse(\"account_email_verification_sent\")\n assert mailoutbox[0].subject == \"[example.com] Account Already Exists\"", "title": "" }, { "docid": "5f0eaa079ed55245923294e3ebdb4e35", "score": "0.73370194", "text": "def test_email_domain_validation(self):\n\n with self.settings(UAA_APPROVED_DOMAINS={'gsa.gov'}):\n email = '[email protected]'\n self.assertEqual('aaron.snow', email_to_username(email))\n with self.assertRaises(ValidationError):\n email = '[email protected]'\n email_to_username(email)", "title": "" }, { "docid": "77fcbd6690277e8c5579b0e39cc78006", "score": "0.733542", "text": "def test_authenticate_missing_email(user_factory, verified_email_auth_backend):\n user_factory(password=\"password\")\n\n assert (\n verified_email_auth_backend.authenticate(\n email=\"[email protected]\", password=\"password\", request=None\n )\n is None\n )", "title": "" }, { "docid": "542d1ac4211da3c5c775006dbd8f0c48", "score": "0.73030853", "text": "def test_email_address_clash_username_required(self):\n request, resp = self._email_address_clash(\"test\", \"[email protected]\")\n self.assertEqual(resp[\"location\"], reverse(\"socialaccount_signup\"))\n\n # POST different username/email to social signup form\n request.method = \"POST\"\n request.POST = {\"username\": \"other\", \"email\": \"[email protected]\"}\n resp = signup(request)\n self.assertEqual(resp[\"location\"], \"/accounts/profile/\")\n user = get_user_model().objects.get(\n **{account_settings.USER_MODEL_EMAIL_FIELD: \"[email protected]\"}\n )\n self.assertEqual(user_username(user), \"other\")", "title": "" }, { "docid": "79939f1c795bcb0ccbe1a2ca4d057749", "score": "0.7285109", "text": "def testOauthEmailIgnoresUnverified(self):\n answers = {\n '/user': {\n 'user': 'trololol',\n 'login': 'trololol'\n },\n '/user/emails': [\n {\n 'email': '[email protected]',\n 'verified': False,\n 'primary': True\n },\n {\n 'email': '[email protected]',\n 'verified': True,\n 'primary': False\n },\n {\n 'email': '[email protected]',\n 'verified': True,\n 'primary': True\n },\n ]\n }\n\n email = self.getEmail(answers, request_email=True)\n self.assertEqual(email, ['[email protected]'])", "title": "" }, { "docid": "3d201a2da0ba2ac0eb216951f7ac61dc", "score": "0.7277191", "text": "def test_login_without_email(self):\n response = self.login_user(\"\", \"@Us3r.co3mW\")\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data['errors']['email'][0], 'An email address is required to log in.')", "title": "" }, { "docid": "c0721494687605c7c11fae295847c7de", "score": "0.72304523", "text": "def test_no_email_address_value(self):\n self.test_data03 = {\"name\": \"shakira\", \"email_address\": \"\", \"password\": \"Kpl\",\n \"account_type\": \"store_attendant\"}\n\n response = self.test_app.post('/store-manager/api/v1/auth/signup', content_type=\"application/json\",\n data=json.dumps(self.test_data03), headers={'x-access-token': self.token})\n self.assertTrue(response.status_code, 400)\n response_message = json.loads(response.data.decode())\n self.assertIn(\"Please note that the value of email_address is missing\", response_message[\"message\"])", "title": "" }, { "docid": "b47096fbf50d90198ff7983e7495de21", "score": "0.7211974", "text": "def testing_email(self):\n try:\n user_validation = UserValidation()\n result = user_validation.validate(\"[email protected]\", \"^[a-z0-9]{3,}(.[0-9a-z]+)*@[a-z]{2,}.(com|edu)(co.in)*$\")\n self.assertEqual(True, result)\n except ValueError:\n raise UserValidationException", "title": "" }, { "docid": "6a8418afe12164112ac3b8df9a02a88b", "score": "0.71895576", "text": "def test_email_address_conflict_at_social_signup_form(\n db, settings, user_factory, sociallogin_factory, client, rf, mailoutbox\n):\n settings.ACCOUNT_EMAIL_REQUIRED = True\n settings.ACCOUNT_UNIQUE_EMAIL = True\n settings.ACCOUNT_USERNAME_REQUIRED = False\n settings.ACCOUNT_AUTHENTICATION_METHOD = \"email\"\n settings.ACCOUNT_EMAIL_VERIFICATION = \"mandatory\"\n settings.SOCIALACCOUNT_AUTO_SIGNUP = True\n\n user = user_factory()\n sociallogin = sociallogin_factory(with_email=False)\n\n request = rf.get(\"/\")\n request.session = {}\n request.user = AnonymousUser()\n\n resp = complete_social_login(request, sociallogin)\n # Auto signup does not kick in as the `sociallogin` does not have an email.\n assert resp[\"location\"] == reverse(\"socialaccount_signup\")\n\n session = client.session\n session[\"socialaccount_sociallogin\"] = sociallogin.serialize()\n session.save()\n # Here, we input the already existing email.\n resp = client.post(reverse(\"socialaccount_signup\"), {\"email\": user.email})\n assert mailoutbox[0].subject == \"[example.com] Account Already Exists\"\n assert resp[\"location\"] == reverse(\"account_email_verification_sent\")", "title": "" }, { "docid": "d0e8e840a8d44a9ce62144c9ea19b041", "score": "0.7182487", "text": "def test_invalid_emails(raw):\n assert formats.normalize_email(raw) is None", "title": "" }, { "docid": "8972cc4a3ee828bfc583b4bdc1073b4d", "score": "0.71502066", "text": "def test_authenticate_unverified_email(\n email_factory, user_factory, verified_email_auth_backend\n):\n user = user_factory(password=\"password\")\n email = email_factory(is_verified=False, user=user)\n\n assert (\n verified_email_auth_backend.authenticate(\n email=email.email, password=\"password\", request=None\n )\n is None\n )", "title": "" }, { "docid": "d486e59422b90c2cb4273e7e7a5d13cf", "score": "0.7133318", "text": "def test_validate_email_address(self):\n self.test_data21 = {\"name\": \"t3frcf\", \"email_address\": \"5u.gmail.com\", \"password\": \"wqc\",\n \"account_type\": \"store_attendant\"}\n response = self.test_app.post('/store-manager/api/v1/auth/signup', content_type=\"application/json\",\n data=json.dumps(self.test_data21), headers={'x-access-token': self.token})\n self.assertTrue(response.status_code, 400)\n response_message = json.loads(response.data.decode())\n self.assertIn(\"The email should follow the format of valid emails ([email protected])\",\n response_message[\"message\"])", "title": "" }, { "docid": "bf313748ce6843cee1c0aba5933ee197", "score": "0.7109922", "text": "def test_create_wrong_email(self):\n self.user_data['email'] = 'usergmail.com'\n response = self.query(register_user_query.format(**self.user_data), )\n self.assertIn('errors', response)\n self.assertNotIn('success', response)", "title": "" }, { "docid": "9e18138acc6e93d78aae547f052428a6", "score": "0.7081421", "text": "def test_email_nonexistent_user(self):\n data = {'email': '[email protected]'}\n\n endpoint = reverse('account:password_reset')\n done_page = reverse('account:password_reset_done')\n response = self.client.post(endpoint, data)\n\n # Email with password reset link should not be sent.\n self.assertEqual(len(mail.outbox), 0)\n\n self.assertRedirects(response, done_page)", "title": "" }, { "docid": "e221e0143adee758b5e8ac09bcd0e0f7", "score": "0.7070167", "text": "def test_verified_email_change_at_signup(self):\n session = self.client.session\n User = get_user_model()\n sociallogin = SocialLogin(\n user=User(email=\"[email protected]\"),\n account=SocialAccount(provider=\"google\"),\n email_addresses=[\n EmailAddress(email=\"[email protected]\", verified=True, primary=True)\n ],\n )\n session[\"socialaccount_sociallogin\"] = sociallogin.serialize()\n session.save()\n resp = self.client.get(reverse(\"socialaccount_signup\"))\n form = resp.context[\"form\"]\n self.assertEqual(form[\"email\"].value(), \"[email protected]\")\n resp = self.client.post(\n reverse(\"socialaccount_signup\"),\n data={\"email\": \"[email protected]\"},\n )\n self.assertRedirects(resp, \"/accounts/profile/\", fetch_redirect_response=False)\n user = User.objects.all()[0]\n self.assertEqual(user_email(user), \"[email protected]\")\n self.assertTrue(\n EmailAddress.objects.filter(\n user=user,\n email=\"[email protected]\",\n verified=True,\n primary=True,\n ).exists()\n )\n self.assertTrue(\n EmailAddress.objects.filter(\n user=user,\n email=\"[email protected]\",\n verified=False,\n primary=False,\n ).exists()\n )", "title": "" }, { "docid": "56d890692b265e2c0b88a117826ca6f4", "score": "0.70490265", "text": "def test_email(self):\n self.assertEqual(self.person.email, '[email protected]')", "title": "" }, { "docid": "204bc9bfa3d07335b26d4b9d1b7b6159", "score": "0.70137304", "text": "def test_login_email(self):\n\n with self.client as c:\n result = c.post('/check-login',\n data={'email': '[email protected]', 'password': 'user1'},\n follow_redirects=True\n )\n self.assertIn(\"No user with that email\", result.data)", "title": "" }, { "docid": "2c40c574fc1de03190789b828af749cd", "score": "0.70048064", "text": "def test_validate_email(self):\n self.assertTrue(validate_email('[email protected]'))", "title": "" }, { "docid": "d2b2d3c5f77c245b1aa243110835acab", "score": "0.7003449", "text": "def test_validate_email(self):\n correct_email = '[email protected]'\n wrong_email = 'jondoe@email'\n self.assertTrue(validate_email(correct_email))\n self.assertFalse(validate_email(wrong_email))", "title": "" }, { "docid": "797ae832538de65df434701d466148ef", "score": "0.699094", "text": "def test_email(self):\n # The 'test' is that this doesn't throw.\n libclient.get_email(domain, token)", "title": "" }, { "docid": "cf42338b419e828884c9ef440fd0563d", "score": "0.6987492", "text": "def test_email_validator(new_address_book):\n\n assert new_address_book.email_validator(\"[email protected]\")\n\n assert not new_address_book.email_validator(\"@test.com\")\n\n assert not new_address_book.email_validator(\"test@\")\n\n assert not new_address_book.email_validator(\"test@test\")\n\n assert not new_address_book.email_validator(\"[email protected].\")", "title": "" }, { "docid": "e31d7ceca75450ae4b3bcf0b3ece3373", "score": "0.6970801", "text": "def validate_email(self, field):\n if User.query.filter_by(email=field.data).first() is None:\n raise ValidationError('Okänd email-adress.')", "title": "" }, { "docid": "bf4a766df6fb3d64d34ac335df06061c", "score": "0.6965775", "text": "def test_no_authoritative_email(self):\n url = reverse('authentication_register')\n\n email = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + '@example.com'\n username = 'admin@' + settings.ALLOWED_DOMAINS[0]\n authkey = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()\n public_key = binascii.hexlify(os.urandom(settings.USER_PUBLIC_KEY_LENGTH_BYTES)).decode()\n private_key = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()\n private_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()\n secret_key = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()\n secret_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()\n user_sauce = '05aa27037cf893e2a4113ddbe8836e1bf395556669904902643670fbf3841338'\n\n data = {\n 'username': username,\n 'email': email,\n 'authkey': authkey,\n 'public_key': public_key,\n 'private_key': private_key,\n 'private_key_nonce': private_key_nonce,\n 'secret_key': secret_key,\n 'secret_key_nonce': secret_key_nonce,\n 'user_sauce': user_sauce,\n }\n\n response = self.client.post(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "ca9c8e68db664c2e2443460f5bd52898", "score": "0.69459325", "text": "def test_email(self):\r\n \r\n self.assertEqual('[email protected]', self.user.email)", "title": "" }, { "docid": "bf6a325d4f29de3a87ed4dfc1d182508", "score": "0.694046", "text": "def validate_email(self, email):\n user = User.query.filter_by(email=email.data).first()\n if user is not None:\n raise ValidationError('Please use a different email address.')", "title": "" }, { "docid": "96f45b251c12618ca21dcf8e120e08a4", "score": "0.69219583", "text": "def pre_social_login(self, request, sociallogin):\n if settings.ALLOWED_EMAIL_DOMAIN:\n domain = f\"@{settings.ALLOWED_EMAIL_DOMAIN}\"\n\n email = sociallogin.account.extra_data[\"email\"]\n logger.info(\"Trying social sign up: %s\", email)\n\n if not email.endswith(domain):\n msg = f\"Email must be from {domain} domain!\"\n logger.info(msg)\n messages.error(request, msg)\n raise ImmediateHttpResponse(redirect(settings.LOGIN_REDIRECT_URL))", "title": "" }, { "docid": "c2f9b8a47a014bde4952b823f8f248e9", "score": "0.69192547", "text": "def test_unsuccessful_registration_with_empty_email(self):\n self.valid_user['email'] = \"\"\n response = self.client.post(\n self.registration_url, self.valid_user, format='json')\n self.assertEqual(response.content, self.empty_email_error_message)", "title": "" }, { "docid": "695a4b3de74c42ee3dff6ea3ac1728bc", "score": "0.689829", "text": "def test_user_register_no_email(self):\n response = self.client.post(self.url_register,\n data=json.dumps(register_no_email),\n content_type='application/json')\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertIsNotNone(response.data[\"errors\"][\"email\"])", "title": "" }, { "docid": "2d774cf5aee5923d56a6230cfd4074ae", "score": "0.6891736", "text": "def testOauthValidEmailAPIInvalid(self):\n answers = {\n '/user': {\n 'user': 'trololol',\n 'email': '[email protected]',\n 'login': 'trololol'\n },\n '/user/email': {\n 'foo': 'bar'\n }\n }\n\n errmsg, emails, _ = self.attemptValidOauth(\n self.trac_env, self.oauthCallbackSuccess, retcode=200,\n answers=answers, request_email=True)\n self.assertIn(\n \"An error occurred while retrieving your email address from the GitHub API\",\n errmsg,\n \"Failing email API request with valid OAuth token should print an error.\")", "title": "" }, { "docid": "fcf09f2b2802264b9d5d6250380a467b", "score": "0.6882552", "text": "def test_cannot_initiate_saml_flow_without_target_email_address(self):\n with self.assertRaises(AuthMissingParameter) as e:\n self.client.get(\"/login/saml/\")\n\n self.assertEqual(str(e.exception), \"Missing needed parameter email\")", "title": "" }, { "docid": "9ad17c47c18c4cd6db0b21bcdc6788a3", "score": "0.6881952", "text": "def clean_email(self):\r\n if PROFILE_EMAIL_CONFIRMATION:\r\n condition = EmailAddress.objects.filter(email__iexact=self.cleaned_data[\"email\"], verified=True).count() == 0\r\n else:\r\n condition = User.objects.get(email__iexact=self.cleaned_data[\"email\"], is_active=True).count() == 0\r\n \r\n if condition is True:\r\n raise forms.ValidationError(_(\"Email address not verified for any user account\"))\r\n \r\n return self.cleaned_data[\"email\"]", "title": "" }, { "docid": "8bcf5f1e3d1997bc0adc5e3da03669f2", "score": "0.68748116", "text": "def test_user_declaration_with_missing_email(self):\n with self.assertRaises(ValidationError):\n u = User(name=\"Test User\", gplus_id='test123')\n u.save()", "title": "" }, { "docid": "11e856f6a7e1acabc608dc8ca7b9f2a8", "score": "0.6859599", "text": "def test_cant_register_user_with_no_email(self):\n response = self.client.post(self.register_url, self.user_invalid_email, format='text/html')\n self.assertNotEqual(response.status_code, 302)", "title": "" }, { "docid": "d55628de753e687616607a4d5ed427e8", "score": "0.6857346", "text": "def test_get_by_email_negative(self):\n user_to_expect = CustomUser.get_by_email('[email protected]')\n self.assertIsNone(user_to_expect)", "title": "" }, { "docid": "d55628de753e687616607a4d5ed427e8", "score": "0.6857346", "text": "def test_get_by_email_negative(self):\n user_to_expect = CustomUser.get_by_email('[email protected]')\n self.assertIsNone(user_to_expect)", "title": "" }, { "docid": "f7ae59ecc57531472fc58f17cff6ef1d", "score": "0.68414253", "text": "def test_create_user_request_email_validation(self):\n\n team = base_team.copy()\n api.team.create_team(team)\n\n invalid_email_user = base_user.copy()\n invalid_email_user[\"email\"] = \"not_an_email\"\n\n with pytest.raises(Exception):\n api.user.create_user_request(invalid_email_user)\n assert False, \"Was able to register a user with something that doesn't look like an email.\"\n\n invalid_email_user[\"email\"] = \"[email protected]\"\n\n with pytest.raises(WebException):\n api.user.create_user_request(invalid_email_user)\n assert False, \"Was able to register a user with invalid characters\"\n\n valid_email_user = base_user.copy()\n assert api.user.create_user_request(valid_email_user), \"Was not able to register a valid email.\"", "title": "" }, { "docid": "83dc6162b58567942c0a4ece74be25b6", "score": "0.6802775", "text": "def valid_login_email(self, email=None):\n user_qs = User.objects.filter(email=email)\n if re.search(r\"^[\\w\\.\\+\\-]+\\@[\\w]+\\.[a-z]{2,3}$\", email) is None:\n raise ValidationError(error_msg['email_format'])\n elif not user_qs.exists():\n raise ValidationError(error_msg['unregistered_email'])\n return True", "title": "" }, { "docid": "b55bbc4303256b39712a0dddf0334a4c", "score": "0.67987424", "text": "def test_email_no_dot(self):\n email = \"test@examplecom\"\n self.assertFalse(validators.isEmailValid(email))", "title": "" }, { "docid": "1e8ae1ee3e072ba51ca10cc79900e518", "score": "0.6792988", "text": "def test_user_get_user_by_email(self) -> None:\n # TODO - should use mock data - disable tests for now :-(\n if self.tests_enabled:\n users = User().get_user_by_email('[email protected]')\n self.assertIsNotNone(users, 'User lookup by email is not None')\n self.assertEqual(len(users), 1, 'User lookup by email found 1')\n\n # some invalid email\n users = User().get_user_by_email('[email protected]')\n self.assertIsNone(users, 'User lookup by email is None')", "title": "" }, { "docid": "689cc752264a5041c57b1ab737930923", "score": "0.6787085", "text": "def test_email_is_not_blank(self):\n\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(\n None,\n 'test123'\n )", "title": "" }, { "docid": "9dd20f69d277b14a5a4efea12e593a80", "score": "0.6785644", "text": "def test_get_email(self, get_test_email):\n assert get_test_email(\"First Last\") == \"[email protected]\"\n assert get_test_email(\"John Doe\") == \"[email protected]\"\n\n with pytest.raises(NoContactFoundError):\n get_test_email(\"\")\n get_test_email(\"John\")", "title": "" }, { "docid": "2bc71dca0d16e41290a3d50cdda8b7b1", "score": "0.6772794", "text": "def test_bad_email(self):\n\t\tuserinfo = {\n\t\t\t\t\"email\":\"invalid@zoto\",\n\t\t\t\t\"password\":\"stupidpassword\",\n\t\t\t}\n\t\tresult = self._add_user('testuser', userinfo)\n\t\tself.assertEqual(result['user_errors'][0], '6400')", "title": "" }, { "docid": "0199f088e383df0099cf61d97311b5d8", "score": "0.6768123", "text": "def validate_email(self, email):\n user = flaskblog_user.User.query.filter_by(email=email.data).first()\n if user is None:\n raise wtforms.validators.ValidationError(\n \"There is no account with this email. You must register first.\"\n )", "title": "" }, { "docid": "af7146e7e912649674528b8774e38dbe", "score": "0.6759092", "text": "def test_unsuccessful_registration_empty_email(self):\n self.valid_user['email'] = \" \"\n response = self.client.post(\n self.registration_url, self.valid_user, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "9ce15dad6a1413d6c7b6aec1a692ddcd", "score": "0.67566484", "text": "def test_user_declaration_with_invalid_email_address(self):\n with self.assertRaises(ValidationError):\n u = User(name=\"Test User\", email='bademail', gplus_id='test123')\n u.save()", "title": "" }, { "docid": "874b106aa7ff13ca99d9204a8976f54d", "score": "0.6751911", "text": "def test_registr_check_email_exist(self):\n REGISTRATION_DATA['email'] = '[email protected]'\n return_data = validator.user_registration(REGISTRATION_DATA)\n ERROR_DATA['error'] = [{'email': ERROR_MSG['check_email_exist']}]\n REGISTRATION_DATA['email'] = '[email protected]'\n self.assertEqual(return_data, ERROR_DATA)", "title": "" }, { "docid": "66a2ff5ea6eb056023b318a5120b2e2a", "score": "0.67473036", "text": "def test_email_dot_not_in_domain(self):\n email = \"test.email@examplecom\"\n self.assertFalse(validators.isEmailValid(email))", "title": "" }, { "docid": "42154b93b2eb71cf54db54b3a81fc89e", "score": "0.674435", "text": "def test_not_same_email(self):\n url = reverse('authentication_register')\n\n email = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + '@example.com'\n username = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + '@' + settings.ALLOWED_DOMAINS[0]\n authkey = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()\n public_key = binascii.hexlify(os.urandom(settings.USER_PUBLIC_KEY_LENGTH_BYTES)).decode()\n private_key = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()\n private_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()\n secret_key = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()\n secret_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()\n user_sauce = 'bbd90b581b9c956e9077a8c71f61ecd9bf9355bd1aac3590bd995028ed224ae0'\n\n data = {\n 'username': username,\n 'email': email,\n 'authkey': authkey,\n 'public_key': public_key,\n 'private_key': private_key,\n 'private_key_nonce': private_key_nonce,\n 'secret_key': secret_key,\n 'secret_key_nonce': secret_key_nonce,\n 'user_sauce': user_sauce,\n }\n\n response = self.client.post(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(models.User.objects.count(), 1)\n\n user = models.User.objects.get()\n\n self.assertEqual(decrypt_with_db_secret(user.email), email)\n\n response = self.client.post(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(models.User.objects.count(), 1)\n self.assertTrue(response.data.get('email', False),\n 'E-Mail in error message does not exist in registration response')", "title": "" }, { "docid": "17ead8d4c821e35d4d85dd56e4be8aa8", "score": "0.67423224", "text": "def test_email_validation_without_host():\n schema = Schema({\"email\": Email()})\n try:\n schema({\"email\": '[email protected]'})\n except MultipleInvalid as e:\n assert str(e) == \"expected an email address for dictionary value @ data['email']\"\n else:\n assert False, \"Did not raise Invalid for empty string URL\"", "title": "" }, { "docid": "b66289bc6b274855de6ad15c292f195b", "score": "0.672827", "text": "def need_verified_email(request, *args, **kwargs): # pylint: disable=unused-argument\n return standard_error_page(request, 401, \"verify_email.html\")", "title": "" }, { "docid": "ef4756284c404ef6b549ec7a7d572cef", "score": "0.67208666", "text": "def test_noemail():\n assert (user5.id == None and\n user5.fname == None and\n user5.lname == None and\n user5.email_address == None)", "title": "" }, { "docid": "b275429c245af27ea1bb041f1165ff63", "score": "0.6716921", "text": "def test_EmailConfirmation_with_email_not_verified(self):\n Member.objects.filter(username='member2').update(email_verified=False)\n self.client.login(username='member2', password='admin')\n response = self.client.get(reverse('ikwen:email_confirmation'))\n self.assertEqual(response.status_code, 200)", "title": "" }, { "docid": "205b12617f3cbfa30e5bdcb150b1f4e7", "score": "0.6715473", "text": "def test_space_input_email(self):\n self.test_data06 = {\"name\": \"LMZ\", \"email_address\": \" \", \"password\": \"q23\",\n \"account_type\": \"store_attendant\"}\n response = self.test_app.post('/store-manager/api/v1/auth/signup', content_type=\"application/json\",\n data=json.dumps(self.test_data06), headers={'x-access-token': self.token})\n self.assertTrue(response.status_code, 400)\n response_message = json.loads(response.data.decode())\n self.assertIn(\"Please note that the value of email_address is missing\", response_message[\"message\"])", "title": "" }, { "docid": "a769870c5b7e759bb123f8132ce70d6a", "score": "0.67114896", "text": "def test_user_logins_with_empty_email(self):\n self.uri = \"/fbs-api/users/login/\"\n params = {\"email\": \"\", \"password\": \"Testuser12344#\"}\n response = self.client.post(self.uri, params, format=\"json\")\n self.assertEqual(\n response.status_code,\n 400,\n \"Expected Response Code 400, received {0} instead.\".format(\n response.status_code\n ),\n )\n self.assertIn(\"An email address is required to log in.\", str(response.data))", "title": "" }, { "docid": "320f889f39a34a407a4d209748cc339c", "score": "0.6708278", "text": "def test_cant_register_user_with_invalid_email(self):\n response = self.client.post(self.register_url, self.user_invalid_email, format='text/html')\n self.assertNotEqual(response.status_code, 302)", "title": "" }, { "docid": "3a46f9e0026f4183fa44c535550496b1", "score": "0.6696412", "text": "def clean_email(self):\n basic = User.objects.filter(email__iexact=self.cleaned_data['email'])\n if basic:\n\n # Not so fast; may be the user has an unconfirmed submission?\n if basic[0].profile.is_validated and basic[0].is_active:\n raise forms.ValidationError((\"This email address is already \"\n \"in use. Please supply a \"\n \"different email address.\"))\n\n return self.cleaned_data['email']", "title": "" }, { "docid": "b0008695114693f97adee9b0a55a6a51", "score": "0.66882664", "text": "def test_login_missing_email(self):\n\n res = self.client.post('/api/v2/auth/login', json={\n \"password\": \"jivunie\"\n })\n data = res.get_json()\n\n self.assertEqual(data['status'], 400)\n self.assertEqual(data['error'], 'email field is required')\n self.assertEqual(res.status_code, 400)", "title": "" }, { "docid": "b9eeed102b9da2fd797a77c9abf42303", "score": "0.66698855", "text": "def test_mismatching_emails_on_auth(self):\n # 1. The user fills in their credentials\n user_data = self.stub_user_data_2p0.copy()\n user_data['twopointoh_email'] = '[email protected]'\n\n # 2. The user submits their credentials to the end point\n url = url_for('authenticateusertwopointoh')\n\n with HTTMock(ads_classic_200):\n r = self.client.post(url, data=user_data)\n\n self.assertStatus(r, CLASSIC_AUTH_FAILED['code'])\n self.assertEqual(r.json['error'], CLASSIC_AUTH_FAILED['message'])", "title": "" }, { "docid": "f67a2032d643e793063d15a847f8d451", "score": "0.66546345", "text": "def clean_email(self):\n if UserProfile.objects.filter(email__iexact=self.cleaned_data['email']):\n raise forms.ValidationError(_(\"This email address is already in use. Please supply a different email address.\"))\n return self.cleaned_data['email']", "title": "" }, { "docid": "56c3431ca0a7867f216f5f93dabfc54b", "score": "0.66479313", "text": "def run_test_is_email(self):\n from server import is_email, NotEmail\n try:\n is_email('[email protected]')\n except NotEmail:\n self.fail('is_email raised an unexpected error.')", "title": "" }, { "docid": "a49a96560ac1726e40bfcbfaf8460c19", "score": "0.6641844", "text": "def test_registration_form_no_free_email(self):\n base_data = {}\n \n for domain in forms.RegistrationFormNoFreeEmail.bad_domains:\n invalid_data = {}\n invalid_data['email'] = u\"foo@%s\" % domain\n form = forms.RegistrationFormNoFreeEmail(data=invalid_data)\n self.failIf(form.is_valid())\n self.assertEqual(form.errors['email'],\n [u\"Registration using free email addresses is prohibited. Please supply a different email address.\"])\n\n base_data['email'] = '[email protected]'\n form = forms.RegistrationFormNoFreeEmail(data=base_data)\n self.failUnless(form.is_valid())", "title": "" }, { "docid": "5c7e2d0bd231c28ed339f76f102d8ba5", "score": "0.6628476", "text": "def test_user_password_reset_failed_no_email(self):\n\n self.url = reverse('users-password')\n data = {'email': ''}\n response = self.client.post(self.url, data, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertEqual(response.data.get('message'), 'Account does not exist.')", "title": "" }, { "docid": "50c5e3266b411c25c5735985c03e6984", "score": "0.6624272", "text": "def test_user_register_invalid_email(self):\n response = self.client.post(self.url_register,\n data=json.dumps(register_invalid_email),\n content_type='application/json')\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertIsNotNone(response.data[\"errors\"][\"email\"])", "title": "" }, { "docid": "819623ec26b7e9f77c3c9bbb56d5c0bb", "score": "0.6623746", "text": "def test_registration_with_invalid_email(self):\n response = self.client.post('/api/v1/register',\n content_type='application/json',\n data=self.invalid_email)\n self.assertIn('Invalid email address', response.data.decode())\n self.assertEqual(response.status_code, 400)", "title": "" }, { "docid": "9ea27c369fcf3b50c96eaa47c62aa607", "score": "0.6622882", "text": "def check_email():\n return check_email_post()", "title": "" }, { "docid": "c4ec56eb405ed23ea122803fa416e389", "score": "0.662207", "text": "def test_verify_email_in_use(\n db, api_request, user_factory, email_address_verification\n):\n user_factory(email=email_address_verification.emailaddress.email)\n data = {\"key\": email_address_verification.key}\n api_request.user = email_address_verification.emailaddress.user\n serializer = EmailAddressVerificationSerializer(\n data=data, context={\"request\": api_request}\n )\n assert not serializer.is_valid()\n assert set(serializer.errors.keys()) == {\"key\"}", "title": "" }, { "docid": "0f91ad981de1499cf40d5ede43e7e5dc", "score": "0.66155154", "text": "def test_create_negative_not_valid_email(self):\n expect_none = CustomUser.create('96mail.com', '1234', 'fname', 'mname', 'lname')\n self.assertIsNone(expect_none)", "title": "" }, { "docid": "44d6f0ba4e73734cc39e8940641b52d1", "score": "0.66096044", "text": "def test_register_with_empty_email(self):\n with self.client:\n response = self.client.post(\n \"1/user/register\",\n content_type='application/json',\n data=json.dumps(dict(username=\"Gideon B\", email=\"\", password=\"secret\"))\n ) \n reply = json.loads(response.data)\n self.assertEquals(reply[\"message\"], \"Missing email parameter\")\n self.assertEquals(response.status_code, 400)", "title": "" }, { "docid": "1c724865a7360a35c10e930e25e85bb4", "score": "0.6609134", "text": "def test_create_account_email_no_email_syntax(self):\n url = reverse('authentication_register')\n\n email = ''.join(random.choice(string.ascii_lowercase) for _ in range(10))\n username = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + '@' + settings.ALLOWED_DOMAINS[0]\n authkey = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()\n public_key = binascii.hexlify(os.urandom(settings.USER_PUBLIC_KEY_LENGTH_BYTES)).decode()\n private_key = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()\n private_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()\n secret_key = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()\n secret_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()\n user_sauce = 'd25e29d812386431ec8f75ce4dce44464b57a9b742e7caeea78c9d984297c8f1'\n\n data = {\n 'username': username,\n 'email': email,\n 'authkey': authkey,\n 'public_key': public_key,\n 'private_key': private_key,\n 'private_key_nonce': private_key_nonce,\n 'secret_key': secret_key,\n 'secret_key_nonce': secret_key_nonce,\n 'user_sauce': user_sauce,\n }\n\n response = self.client.post(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data.get('email'), [u'INVALID_EMAIL_FORMAT'])", "title": "" }, { "docid": "0e81cd0ac6c769fed15b050d1d289318", "score": "0.6606541", "text": "def test_email_is_optional(self):\n form = self.make_validated_form(email='')\n self.assertFalse(form.errors)", "title": "" }, { "docid": "641e94b4f852a451b512d92574120125", "score": "0.6606325", "text": "def validate_email(self, field):\n if User.query.filter_by(email=field.data).first():\n raise ValidationError('Email-adressen är redan registrerad.')", "title": "" }, { "docid": "3eacdea59635ce2650087e2658f6ee55", "score": "0.66054904", "text": "def test_is_email():\n from server import is_email, NotEmail\n\n with pytest.raises(NotEmail):\n is_email('is.this.an.email')\n\n with pytest.raises(NotEmail):\n is_email('how@about@this')\n\n with pytest.raises(NotEmail):\n is_email('or.maybe@this')", "title": "" }, { "docid": "7a688ed3bda64f1144385d34855b1ae9", "score": "0.66034365", "text": "def test_signup_user_with_invalid_email_fails(self):\n payload = dict(\n email=\"invalid_email\",\n password=\"test_password\",\n password_confirmation=\"test_password\",\n )\n\n response = self.client.post(SIGNUP_USER_URL, payload)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertTrue(response.data)", "title": "" }, { "docid": "117d70a803ed5d7d190ba4b35f9f9468", "score": "0.6597834", "text": "def test_email_without_at_symbol(self):\n invalid_form_data = form_data.copy()\n invalid_form_data['email'] = 'emailtest.com'\n form = UserCreationSelfForm(data=invalid_form_data)\n self.assertFalse(form.is_valid())", "title": "" }, { "docid": "6915cc02587687709b93c71cd575a111", "score": "0.65894806", "text": "def test_register_email_already_registered(self):\n\n request = self.client.post(reverse('register_view'),\n dumps({\"email\": \"[email protected]\", \"password\": \"pwd\",\n \"created\": None, \"last_modified\": None}),\n content_type='application/json')\n self.assertEqual(request.status_code, 400)", "title": "" }, { "docid": "6d9dbff0a81832da58d38dc922149d7e", "score": "0.65810573", "text": "def not_valid_email(email):\n if not (re.search(email_pattern, email)):\n flash(flash_email)\n return True", "title": "" }, { "docid": "1e14b98ad05338196086517810ac4615", "score": "0.6580067", "text": "def validate_email(self, attrs, source):\r\n if PROFILE_EMAIL_CONFIRMATION:\r\n condition = EmailAddress.objects.filter(email__iexact=attrs[\"email\"], verified=True).count() == 0\r\n else:\r\n condition = User.objects.get(email__iexact=attrs[\"email\"], is_active=True).count() == 0\r\n \r\n if condition is True:\r\n raise serializers.ValidationError(_(\"Email address not verified for any user account\"))\r\n \r\n return attrs", "title": "" }, { "docid": "39b3c95c94f930270bf312515dd2ece6", "score": "0.6575459", "text": "def test_authenticate_invalid_password(\n email_factory, user_factory, verified_email_auth_backend\n):\n user = user_factory(password=\"password\")\n email = email_factory(is_verified=True, user=user)\n\n assert (\n verified_email_auth_backend.authenticate(\n email=email.email, password=\"notpassword\", request=None\n )\n is None\n )", "title": "" }, { "docid": "5554a5fb213d379284ba4dbf7d5c8a7f", "score": "0.6565239", "text": "def test_login_bad_email(self):\n\n res = self.client.post('/api/v2/auth/login', json={\n \"email\": 3,\n \"password\": \"some_screte\"\n })\n data = res.get_json()\n\n self.assertEqual(data['status'], 422)\n self.assertEqual(\n data['error'], \"Invalid or empty string for email\")\n self.assertEqual(res.status_code, 422)", "title": "" }, { "docid": "773ae2972b4336eb3b85309b02048239", "score": "0.65461034", "text": "def validate_email(self, email):\n user = User.query.filter_by(email=email.data).first()\n if user:\n raise ValidationError(\n 'That email address is already in use.')", "title": "" }, { "docid": "175f4384b5535f6f902ad4c582207cfc", "score": "0.65445155", "text": "def test_login_wrong_email(self):\n\n request = self.client.post(reverse('login_view'),\n dumps({\"email\": \"[email protected]\", \"password\": \"password\"}),\n content_type='application/json')\n self.assertEqual(request.status_code, 403)", "title": "" }, { "docid": "8448ca6668924c6057e229446b4d8892", "score": "0.65385914", "text": "def test_user_can_confirm_email(self):\n pass", "title": "" }, { "docid": "5881f5d7d99162278c49c83e6749367e", "score": "0.65326124", "text": "def test_login_user_invalid_email(self):\n app.register_user('ton1', '[email protected]', 'pass')\n with self.assertRaises(ValueError) as error:\n app.login_user('[email protected]', 'invalidpass')\n self.assertEqual('User does not exist', str(error.exception))", "title": "" }, { "docid": "85971447a4586b7cc9a9ebd56e771c43", "score": "0.6529108", "text": "def test_create_user_with_garbage_email(self):\n data = {\n 'username': 'foobar',\n 'password': 'somepassword',\n 'email': 'garbage'\n }\n\n self.client.post(self.create_url, data, format='json')\n\n response = self.client.post(self.create_url, data, format='json')\n self.assertEqual(response.status_code, 400, response.data)\n self.assertTrue('email' in response.data)", "title": "" }, { "docid": "e6be776e7becbe7b0bc1cf809153d839", "score": "0.6524038", "text": "def test_user_logins_with_wrong_email(self):\n self.uri = \"/fbs-api/users/login/\"\n params = {\"email\": \"[email protected]\", \"password\": \"Testuser12344#\"}\n response = self.client.post(self.uri, params, format=\"json\")\n self.assertEqual(\n response.status_code,\n 400,\n \"Expected Response Code 400, received {0} instead.\".format(\n response.status_code\n ),\n )\n self.assertIn(\n \"A user with this email and password was not found.\", str(response.data)\n )", "title": "" }, { "docid": "a8fe05b89ec221e6aeab34d03c4a96cd", "score": "0.65235806", "text": "def test_register_with_invalid_email(self):\n form_data = {'email': 'test', 'name': 'test', 'password': 'test', 'timezone': 'UTC'}\n form = RegistrationForm(data=form_data)\n self.assertFalse(form.is_valid())\n\n form_data['email'] = 'test@'\n form = RegistrationForm(data=form_data)\n self.assertFalse(form.is_valid())\n\n form_data['email'] = 'test@test'\n form = RegistrationForm(data=form_data)\n self.assertFalse(form.is_valid())\n\n form_data['email'] = 'test.com'\n form = RegistrationForm(data=form_data)\n self.assertFalse(form.is_valid())", "title": "" }, { "docid": "506ffe30354d16ea39bd07fb19328368", "score": "0.6512887", "text": "def validate_email(self, email):\n email = email.lower()\n invalid_email = PLCInvalidArgument(\"Invalid e-mail address %s\"%email)\n\n if not email:\n raise invalid_email\n\n email_re = re.compile('\\A[a-zA-Z0-9._%+\\-]+@[a-zA-Z0-9._\\-]+\\.[a-zA-Z]+\\Z')\n if not email_re.match(email):\n raise invalid_email\n\n # check only against users on the same peer\n if 'peer_id' in self:\n namespace_peer_id = self['peer_id']\n else:\n namespace_peer_id = None\n\n conflicts = Persons(self.api, {'email':email,'peer_id':namespace_peer_id})\n\n for person in conflicts:\n if 'person_id' not in self or self['person_id'] != person['person_id']:\n raise PLCInvalidArgument(\"E-mail address already in use\")\n\n return email", "title": "" }, { "docid": "37b7591c90bb3f2719e51e528493e987", "score": "0.65109223", "text": "def validate_email(self, email):\n staff = Staff.query.filter_by(email=email.data).first()\n if staff is None:\n raise ValidationError('There is no account with that email. You must register first.')", "title": "" }, { "docid": "875682473463b2fe2b281d05b9173e64", "score": "0.65067846", "text": "def test_email_validation_with_none():\n schema = Schema({\"email\": Email()})\n try:\n schema({\"email\": None})\n except MultipleInvalid as e:\n assert str(e) == \"expected an email address for dictionary value @ data['email']\"\n else:\n assert False, \"Did not raise Invalid for None URL\"", "title": "" }, { "docid": "430e16637562bd7917abe4dfd4d31cbf", "score": "0.650379", "text": "def clean_email(self):\n email_domain = self.cleaned_data['email'].split('@')[1]\n if email_domain in self.bad_domains:\n raise forms.ValidationError(_(\"Registration using free email addresses is prohibited. Please supply a different email address.\"))\n return self.cleaned_data['email']", "title": "" }, { "docid": "d14053cda813a04f964efc185b481d80", "score": "0.6496182", "text": "def assert_no_user(self, email):\n user_exists_bool = get_user_model().objects.filter(\n email=email\n ).exists()\n self.assertFalse(user_exists_bool)", "title": "" }, { "docid": "53660585594234e8cc8ff40bc34b6d55", "score": "0.6487633", "text": "def test_blank_email(self):\n # Send message without input value on email field\n Select(self.main_page.select_subject_heading_dropdown()).select_by_value('2')\n self.main_page.message_element = 'test'\n self.main_page.select_send_button()\n\n # Verifies message \"Invalid email address\" displayed\n assert \"Invalid email address\" in self.driver.page_source", "title": "" }, { "docid": "b0158982645520654fc0d2fc824a602d", "score": "0.64768374", "text": "def test_invalid_email_format(self):\n\n res = self.client.post(\n '/auth/v2/signup',\n payload=json.dumps(self.invalid_email),\n content_type='application/json'\n )\n\n self.assertEqual(res, 400)", "title": "" }, { "docid": "b2171a2a3653520538cfe5ddc605d7af", "score": "0.64709765", "text": "def test_email_without_tld(self):\n invalid_form_data = form_data.copy()\n invalid_form_data['email'] = 'invalidemail@bad'\n form = UserCreationSelfForm(data=invalid_form_data)\n self.assertFalse(form.is_valid())", "title": "" }, { "docid": "49e842e730a71ba23b097e5e1adf4a02", "score": "0.6460639", "text": "def validate_email(self, field):\n if User.query.filter_by(email=field.data).first():\n raise ValidationError('Email already registered')", "title": "" }, { "docid": "08ddbb7b27d8f9639110b57f500f7170", "score": "0.6458639", "text": "def test_register_email_is_invalid_format(self):\n\n request = self.client.post(reverse('register_view'),\n dumps({\"email\": \"test_gmail.com\", \"password\": \"pwd\",\n \"created\": None, \"last_modified\": None}),\n content_type='application/json')\n self.assertEqual(request.status_code, 400)", "title": "" } ]
6e04ce63e8a78e8499e2eef3284600ae
Reference (start, end, strand) block
[ { "docid": "bcc85c32280cca386c969f5be579d1f3", "score": "0.6698357", "text": "def ref_block(self) -> tuple[int, int, int]:\n if self.ref_start < self.ref_end:\n return (self.ref_start, self.ref_end, self.ref_strand)\n else:\n return (self.ref_end, self.ref_start, self.ref_strand)", "title": "" } ]
[ { "docid": "e15be92cb7ad2d8e4bd439fb43d7c26e", "score": "0.60712653", "text": "def __init__ (self,start,end):\n self.start=start\n self.end=end", "title": "" }, { "docid": "4960a09897c9d25595292b38a51fa8bf", "score": "0.5889693", "text": "def __init__(self, start, end):\r\n self.start = start\r\n self.end = end", "title": "" }, { "docid": "6cd4dadd07b925a47e0d69a77965ea94", "score": "0.5840347", "text": "def reverse_strand(self) -> \"Location\":", "title": "" }, { "docid": "9775f1c9240aa323aba0d91489d838a4", "score": "0.57608277", "text": "def __init__(self, start, end):\n self._start = start\n self._end = end", "title": "" }, { "docid": "3af3f4d788d9899fdd80bfa891d95b49", "score": "0.5663322", "text": "def ReferencePoint(self) -> _n_2_t_1:", "title": "" }, { "docid": "d5fcf22d7db671f7951986f9dcb7bb59", "score": "0.5644424", "text": "def get(self, loc, strand=\"+\", mask_zero=False, **kargs):\n try:\n if loc[\"chr\"]: pass\n except TypeError: # probably a location string. try to cooerce\n loc = location(loc=loc)\n # Don't catch any exceptions here. Should break.\n\n # get is hitting location too hard. Unfold here:\n c = str(loc['chr'])\n left = int(loc['left'])\n rite = int(loc['right'])\n\n left_most_block = int(abs(math.floor(left / self.block_size)))\n right_most_block = int(abs(math.ceil((rite+1) / self.block_size)))\n\n blocks_required = [\"%s:%s\" % (c, b) for b in range(left_most_block * self.block_size, right_most_block * self.block_size, self.block_size)]\n\n ret_array = [] # faster than array\n\n for blockID in blocks_required:\n # this is the span location of the block\n #block_loc = location(chr=blockID.split(\":\")[0], left=blockID.split(\":\")[1], right=int(blockID.split(\":\")[1])+self.block_size-1)\n block_loc_left = int(blockID.split(\":\")[1]) # this is all you actually need for the block location\n # check if the block already exists\n if self.__has_block(blockID): # it does, get it.\n this_block_array_data = self.__get_block(blockID)\n else: # block not in db, fake a block instead.\n this_block_array_data = [0] * self.block_size\n\n # This feels like it would be a bit slow...\n # Work out the spans to reduce iteration:\n left_most = left - block_loc_left\n if left_most < 0: left_most = 0\n rite_most = rite - block_loc_left\n if rite_most > self.block_size: rite_most = self.block_size\n #print left_most, rite_most\n\n for pos in range(left_most, rite_most): #self.block_size): # iterate through the array.\n local_pos = block_loc_left + pos\n if local_pos >= left and local_pos <= (rite+1): # within the span to increment.\n if pos >= len(this_block_array_data): # stop edge falloffs\n ret_array.append(0)\n else:\n ret_array.append(this_block_array_data[pos])\n if mask_zero:\n mask = []\n for dd in ret_array:\n if int(dd*10000000) == 0: # delete if 10 sig figs close to 0\n mask.append(1)\n else:\n mask.append(0)\n # This may produce a warning, but apparently I can safely ignore it\n ret_array = numpy.ma.masked_array(ret_array, mask=mask)\n return(ret_array)", "title": "" }, { "docid": "78b90f49c9ad56b45d614a592afe468e", "score": "0.5643467", "text": "def slice_target(self,chr,start,end):\n # create a range that we are going to intersect with\n trng = Bed(chr,start,end)\n nrngs = []\n for r in self._rngs:\n i = r.intersect(trng)\n if not i: continue\n \tnrngs.append(i)\n if len(nrngs) == 0: return None\n return Transcript(nrngs,self._options)", "title": "" }, { "docid": "01ededf609cd973fc578783dbac840de", "score": "0.563847", "text": "def __init__(self, begin=None, end=None):\n self.begin = begin\n self.end = end", "title": "" }, { "docid": "ec670456e93ec1af6b4b8c7f381b7cac", "score": "0.5604478", "text": "def get_strand(start, end):\n\n # -\n if start > end:\n return -1\n # +\n elif start <= end:\n return 1\n\n return 0", "title": "" }, { "docid": "3cf8fd2bee689aa581d38724ad1ec734", "score": "0.5602929", "text": "def printBlock(self):\n print self.start, self.end", "title": "" }, { "docid": "5e2b169c19c146a178f9a2d9d5571203", "score": "0.55946493", "text": "def _full_span_interval(self) -> \"Location\":", "title": "" }, { "docid": "ca890f5a9c4574c5fa65943523a1d65d", "score": "0.5486178", "text": "def update_bounds(self):\n if self.strand == -1:\n left_exon = self.exons[-1]\n right_exon = self.exons[0]\n else:\n left_exon = self.exons[0]\n right_exon = self.exons[-1]\n\n if left_exon.start != self.start:\n self.start = left_exon.start\n if right_exon.end != self.end:\n self.end = right_exon.end", "title": "" }, { "docid": "072e00990038899b9d56d1776e166b84", "score": "0.548572", "text": "def target_begin(self):\n return self.p.ref_begin1 + self.index_starts_at", "title": "" }, { "docid": "d9cd8449a0eec9f9be90fc3351a78a24", "score": "0.5458251", "text": "def __init__(self, start, end, line):\r\n self.start = start\r\n self.end = end\r\n self.line = line", "title": "" }, { "docid": "14f2490d79394f2da87b216c53c8f9da", "score": "0.5406146", "text": "def getrange(self, name, start, end, callback=None):\n self._execute_command('GETRANGE', name, start, end, callback=callback)", "title": "" }, { "docid": "b67a017cb67bdcb076da12782b3c7a17", "score": "0.53886163", "text": "def backmap_ival(self, start, end):\r\n k = self.find_firstblock_contains(start)\r\n \r\n newivals = list()\r\n for i in xrange(k, self.numblock):\r\n gs = self.blocklist[i].get_genomic_start()\r\n bs = self.blocklist[i].get_start()\r\n be = self.blocklist[i].get_end()\r\n \r\n # new mRNA-like position in current block\r\n qs = max(bs, start) \r\n qe = min(be, end)\r\n \r\n # relative position in block + block start\r\n hs = qs - bs + gs\r\n he = qe - bs + gs\r\n if self.strand == '-':\r\n hs = be - qe + gs\r\n he = be - qs + gs\r\n \r\n if hs == he: # length 0\r\n continue\r\n \r\n newivals.append(Ival(hs, he))\r\n if end <= be:\r\n break\r\n \r\n if self.strand == '-':\r\n newivals.reverse()\r\n \r\n return newivals", "title": "" }, { "docid": "d012b039e69eef4febd35144f64ba662", "score": "0.538111", "text": "def load_ref(self, ref_fasta_name, chrom, start, end):\n\n if start >= end:\n mesg('*ERROR* Variant.load_ref() wrong query range {}-{} '\n '(start>=end)'.format(start, end), fatal=True)\n\n # open fai and get offset of region\n chrom_found = False\n for line in open(ref_fasta_name + '.fai'):\n tchrom, tlen, toffset, tnbase, tnchar = line.rstrip().split('\\t')\n tlen, toffset, tnbase, tnchar = (int(tlen), int(toffset),\n int(tnbase), int(tnchar))\n if tchrom == chrom:\n chrom_found = True\n break\n\n # check if chrom in fasta\n if not chrom_found:\n mesg('*ERROR* Variant.load_ref(): chrom {} is not found in '\n 'fasta {}'.format(chrom, ref_fasta_name), fatal=True)\n\n # check if range out of chromosome boundary\n if start < 0:\n start = 0\n mesg('*WARNING* Fasta.load_ref(): start position is less than 0.')\n if end > tlen - 1:\n end = tlen - 1\n mesg('*WARNING* Fasta.load_ref(): end position is beyond chrom '\n 'length.')\n\n # read fasta and fetch sequence\n # calculate byte-offset of region in the reference fasta\n tnextra = tnchar - tnbase\n offset_start = toffset + start + tnextra*(start//tnbase)\n offset_end = toffset + (end-1) + tnextra*((end-1)//tnbase)\n\n seq_str = ''\n with open(ref_fasta_name) as f:\n f.seek(offset_start)\n seq_str = f.read(offset_end-offset_start+1).replace('\\n','')\n\n # assign results\n self.chrom = chrom\n self.pos = start\n self.seq = list(seq_str)", "title": "" }, { "docid": "26871bbe4f829f303608049303d9ec4b", "score": "0.5358863", "text": "def makeReference(self):", "title": "" }, { "docid": "154fa6f60ac3815cd9645913ae0ac03e", "score": "0.53576046", "text": "def relative_interval_to_parent_location(\n self, relative_start: int, relative_end: int, relative_strand: Strand\n ) -> \"Location\":", "title": "" }, { "docid": "eb5adb25e5ed61e2f86f2fa179ab4a98", "score": "0.53566265", "text": "def block_get_start(file, line):\n pass # implemented in Ada", "title": "" }, { "docid": "ca0935b30440c2cd3025e0482bda2eb4", "score": "0.53415596", "text": "def subrange(self, start, end, *, strand=None):\n if (strand is not None) and (strand != self.strand):\n start, end = reverseRange(start, end, self.size)\n if (start > end) or (start < self.start) or (end > self.end):\n raise CoordsError(\"invalid subrange: {}-{} of {}\".format(start, end, self))\n return Coords(self.name, start, end, self.strand, self.size)", "title": "" }, { "docid": "b7b389adf3bbfcfecf861751d5932f2b", "score": "0.53408116", "text": "def span(self):\n return self._start, self._end", "title": "" }, { "docid": "22f067cf3329876ed113c832855c6e8e", "score": "0.53337103", "text": "def test_correct_block_boundary(self):\n # Segments ending at the end of the first block\n search = self.idx.search([3014687], [3014689])\n self.assertEqual(len(list(search)), 1)\n search = self.idx.search([3014688], [3014689])\n self.assertEqual(len(list(search)), 1)\n\n # Segments starting at the beginning of the second block\n search = self.idx.search([3014689], [3014690])\n self.assertEqual(len(list(search)), 1)\n search = self.idx.search([3014689], [3014691])\n self.assertEqual(len(list(search)), 1)\n\n # Segments overlapping the 2 blocks\n search = self.idx.search([3014688], [3014690])\n self.assertEqual(len(list(search)), 2)\n search = self.idx.search([3014687], [3014690])\n self.assertEqual(len(list(search)), 2)\n search = self.idx.search([3014687], [3014691])\n self.assertEqual(len(list(search)), 2)", "title": "" }, { "docid": "56a6c34d159804b195c90d40c18fffa1", "score": "0.5309184", "text": "def __init__(self, start, end):\n\t\tif self.geolocate(start):\n\t\t\tself.start=start\n\t\tif self.geolocate(end):\n\t\t\tself.end=end", "title": "" }, { "docid": "bb8f29666d7c6ced5395bc7756a8c6f9", "score": "0.5307484", "text": "def __init__(self, p1, p2):\n self.start = p1\n self.end = p2", "title": "" }, { "docid": "28aa54cb4f867fd8997ec240affab62e", "score": "0.5303796", "text": "def query_block(self) -> tuple[int, int, int]:\n if self.query_start < self.query_end:\n return (self.query_start, self.query_end, self.query_strand)\n else:\n return (self.query_end, self.query_start, self.query_strand)", "title": "" }, { "docid": "e8e2442f21901c515b3a48ada27a860e", "score": "0.5302977", "text": "def StartPoint(self) -> _n_2_t_1:", "title": "" }, { "docid": "e8e2442f21901c515b3a48ada27a860e", "score": "0.5302977", "text": "def StartPoint(self) -> _n_2_t_1:", "title": "" }, { "docid": "e8e2442f21901c515b3a48ada27a860e", "score": "0.5302977", "text": "def StartPoint(self) -> _n_2_t_1:", "title": "" }, { "docid": "e8e2442f21901c515b3a48ada27a860e", "score": "0.5302977", "text": "def StartPoint(self) -> _n_2_t_1:", "title": "" }, { "docid": "e462411ffd7867badca5cfdd1c2d95b7", "score": "0.5300183", "text": "def range(self):\r\n\t\t\r\n\t\treturn self._fragments['start_index'], self._fragments['end_index']", "title": "" }, { "docid": "9730009a2e20026c7c0a3fbe4f9461c1", "score": "0.5298707", "text": "def relative_start_to(self, reference):\n return self.aligned_start - reference", "title": "" }, { "docid": "92c8764ae6b5654b0007164ec451b21c", "score": "0.5285489", "text": "def extend_absolute(self, extend_start: int, extend_end: int) -> \"Location\":", "title": "" }, { "docid": "6cf58c10776edd444a9303265674595c", "score": "0.52595186", "text": "def __init__(self, gene, start, end, strand):\n self.start = int(start)\n self.end = int(end)\n self.strand = strand\n\n assert isinstance(gene, Gene)\n self.gene = gene\n\n # Set a back link to us\n self.gene.exons.append(self)", "title": "" }, { "docid": "2cd550164f20a24dc49e2b8eec9b8c52", "score": "0.5250012", "text": "def __init__(self, start: str, end: str) -> None:\n self.start = start\n self.end = end", "title": "" }, { "docid": "a48d8207a149569f8382ff65a1b97011", "score": "0.5230463", "text": "def GetRange(self, from_, to_):", "title": "" }, { "docid": "a48d8207a149569f8382ff65a1b97011", "score": "0.5230463", "text": "def GetRange(self, from_, to_):", "title": "" }, { "docid": "30a9ac353225a4df3c99bf33ef50c87d", "score": "0.5228832", "text": "def expand_block(self, feat):\n chrom_end = self._ref_sizes.get(feat.chrom)\n if chrom_end:\n if feat.start < self._end_buffer:\n feat.start = 0\n if feat.stop >= chrom_end - self._end_buffer:\n feat.stop = chrom_end\n return feat", "title": "" }, { "docid": "8f2fdca17a436f74dda8a073b3943884", "score": "0.5197684", "text": "def optimize_blocks(self) -> \"Location\":", "title": "" }, { "docid": "03046029dbde04764a1effbde8e574b3", "score": "0.5186175", "text": "def __init__(self, start):\n self.start = start\n self.current = start - 1", "title": "" }, { "docid": "4435c2923ccc5362d5b2577f7c97b34c", "score": "0.5185895", "text": "def scan_blocks(self) -> Iterator[\"Location\"]:", "title": "" }, { "docid": "4413681601cbd65ca36920ebed63c765", "score": "0.51577413", "text": "def BlockPosition(self) -> _n_2_t_1:", "title": "" }, { "docid": "0c935b3dfcc600daf7a7fad8ce6f7e29", "score": "0.5153327", "text": "def get_reference_regions(self):\n for chrom in self.chroms:\n tree = intervaltree.IntervalTree()\n for n in self.refNodes(chrom):\n tree.addi(n[\"start\"], n[\"end\"] + 1)\n tree.merge_overlaps()\n for iv in tree.items():\n yield f\"{chrom}:{iv.begin}-{iv.end-1}\"", "title": "" }, { "docid": "54f02b126056c8436cb02e27647d8f1a", "score": "0.5149547", "text": "def paintRange(self, startAddr: ghidra.program.model.address.Address, endAddr: ghidra.program.model.address.Address, value: db.Field) -> None:\n ...", "title": "" }, { "docid": "441ec8445f872cb8cd815fe548a0da00", "score": "0.51451206", "text": "def span(self) -> global___Span:", "title": "" }, { "docid": "48a2399ad4bad4a40ef43c8683308e02", "score": "0.5138103", "text": "def contact(init, tspan, a, h):\n return con.contact(init, tspan, h, a, lambda x: x, lambda t: 0)", "title": "" }, { "docid": "8c1ebadb1fe9909c047cd7a80cdd9482", "score": "0.51131845", "text": "def between_markers(text, begin=None, end=None):\n # your code here\n if begin == None and end == None:\n return text\n substr_start = 0\n substr_end = 0\n \n if begin == None:\n substr_start = 0\n sbustr_end = text.find(end)\n\n if end == None:\n sutstr_start = text.find(begin)\n substr_end = len(text)\n \n if begin != None and end != None:\n substr_start = text.find(begin)\n substr_end = text.find(end)\n #print(substr_start, substr_end)\n print(text[substr_start:substr_end])\n \n if substr_start >= substr_end:\n return ''\n else:\n return text[substr_start:substr_end]\n\n return ''", "title": "" }, { "docid": "e5b315f15df87e96f6d07097371896e0", "score": "0.51045996", "text": "def find(self, loc, strand=None):\n if strand and not strand == self.strand:\n return False\n return self.start <= loc < self.end", "title": "" }, { "docid": "23e52f7ed8d31444af696d97ce041f05", "score": "0.51017934", "text": "def blocks(self) -> List[\"Location\"]:", "title": "" }, { "docid": "a56340907360796c2bea79acfb67e58b", "score": "0.509453", "text": "def add_read(self, loc, strand=\"+\", increment=1):\n left_most_block = int(abs(math.floor(loc[\"left\"] / self.block_size)))\n right_most_block = int(abs(math.ceil((loc[\"right\"]+1) / self.block_size)))\n\n blocks_required = [\"%s:%s\" % (loc[\"chr\"], b) for b in range(left_most_block * self.block_size, right_most_block * self.block_size, self.block_size)]\n\n for blockID in blocks_required:\n # this is the span location of the block\n #block_loc = location(chr=blockID.split(\":\")[0], left=blockID.split(\":\")[1], right=int(blockID.split(\":\")[1])+self.block_size-1)\n\n # check if the block already exists\n if not self.__has_block(blockID): # not present, make a new one.\n self.__new_block(blockID)\n else:\n if not blockID in self.cacheQ: # not on cache, add it;\n self.cacheQ.insert(0, blockID) # put the ID at the front.\n self.cache[blockID] = self.__get_block(blockID)\n # block should now be on cache and accesable.\n\n bleft = int(blockID.split(\":\")[1])\n lleft = int(loc[\"left\"])\n lright = int(loc[\"right\"])\n # modify the data\n for pos in range(self.block_size): # iterate through the array.\n local_pos = bleft + pos # only require \"left\"\n # some funny stuff here:\n # right is inc'd by 1\n # as that is what is usually expected from 10-20.\n # (i.e. coords are NOT 0-based and are closed).\n if local_pos >= lleft and local_pos <= lright: # within the span to increment.\n self.cache[blockID][pos] += increment\n\n self.__flush_cache()\n return(True)", "title": "" }, { "docid": "1fe993d057cfce8c73c66ad0c29553ef", "score": "0.50945216", "text": "def TreatAsBlockRefForExplode(self) -> bool:", "title": "" }, { "docid": "c6eb4f30dc6e47fbcacff92a1dc71ca9", "score": "0.5092278", "text": "def GetRange(self):", "title": "" }, { "docid": "c6eb4f30dc6e47fbcacff92a1dc71ca9", "score": "0.5092278", "text": "def GetRange(self):", "title": "" }, { "docid": "c6eb4f30dc6e47fbcacff92a1dc71ca9", "score": "0.5092088", "text": "def GetRange(self):", "title": "" }, { "docid": "c6eb4f30dc6e47fbcacff92a1dc71ca9", "score": "0.5091833", "text": "def GetRange(self):", "title": "" }, { "docid": "3a922f8f087115028d8365f1b526996d", "score": "0.5091685", "text": "def get_bed(self, rgb=None, name=None, start_offset=None, stop_offset=None):\n if start_offset is not None and stop_offset is not None:\n assert start_offset <= stop_offset\n if start_offset is not None:\n assert start_offset >= self.start\n if stop_offset is not None:\n assert stop_offset <= self.stop\n if rgb is None:\n rgb = self.rgb\n if name is not None:\n name += \"/\" + self.name\n else:\n name = self.name\n if start_offset is None and stop_offset is None:\n return [self.chromosome, self.start, self.stop, name, self.score, convert_strand(self.strand),\n self.thick_start, self.thick_stop, rgb, self.block_count, self.block_sizes, self.block_starts]\n elif start_offset == stop_offset:\n assert self.chromosome_coordinate_to_transcript(start_offset) is not None # no intron records\n return [self.chromosome, start_offset, stop_offset, name, self.score, convert_strand(self.strand),\n start_offset, stop_offset, rgb, 1, 0, 0]\n\n def _move_start(exon_intervals, block_count, block_starts, block_sizes, start, start_offset):\n to_remove = len([x for x in exon_intervals if x.start <= start_offset and x.stop <= start_offset])\n assert to_remove < len(exon_intervals)\n if to_remove > 0:\n block_count -= to_remove\n block_sizes = block_sizes[to_remove:]\n start += block_starts[to_remove]\n new_block_starts = [0]\n for i in xrange(to_remove, len(block_starts) - 1):\n new_block_starts.append(block_starts[i + 1] - block_starts[i] + new_block_starts[-1])\n block_starts = new_block_starts\n if start_offset > start:\n block_sizes[0] += start - start_offset\n block_starts[1:] = [x + start - start_offset for x in block_starts[1:]]\n start = start_offset\n return start, block_count, block_starts, block_sizes\n\n def _move_stop(exon_intervals, block_count, block_starts, block_sizes, stop, start, stop_offset):\n to_remove = len([x for x in exon_intervals if x.stop >= stop_offset and x.start >= stop_offset])\n assert to_remove < len(exon_intervals)\n if to_remove > 0:\n block_count -= to_remove\n block_sizes = block_sizes[:-to_remove]\n block_starts = block_starts[:-to_remove]\n assert len(block_sizes) == len(block_starts)\n if len(block_sizes) == 0:\n block_sizes = block_starts = [0]\n block_count = 1\n stop = start + block_sizes[-1] + block_starts[-1]\n if start + block_starts[-1] < stop_offset < stop:\n block_sizes[-1] = stop_offset - start - block_starts[-1]\n stop = stop_offset\n return stop, block_count, block_starts, block_sizes\n\n block_count = int(self.block_count)\n block_starts = map(int, self.block_starts.split(\",\"))\n block_sizes = map(int, self.block_sizes.split(\",\"))\n start = self.start\n stop = self.stop\n thick_start = self.thick_start\n thick_stop = self.thick_stop\n\n if start_offset is not None and start_offset > start:\n start, block_count, block_starts, block_sizes = _move_start(self.exon_intervals, block_count, block_starts,\n block_sizes, start, start_offset)\n if stop_offset is not None and stop_offset < stop:\n stop, block_count, block_starts, block_sizes = _move_stop(self.exon_intervals, block_count, block_starts,\n block_sizes, stop, start, stop_offset)\n if start > thick_start:\n thick_start = start\n if stop < thick_stop:\n thick_stop = stop\n if (start > thick_stop and stop > thick_stop) or (start < thick_start and stop < thick_start):\n thick_start = 0\n thick_stop = 0\n block_starts = \",\".join(map(str, block_starts))\n block_sizes = \",\".join(map(str, block_sizes))\n return [self.chromosome, start, stop, name, self.score, convert_strand(self.strand), thick_start, thick_stop, rgb,\n block_count, block_sizes, block_starts]", "title": "" }, { "docid": "b2122a2bbe04f58cd80535468d86e54e", "score": "0.5083115", "text": "def BlockBegin(self) -> str:", "title": "" }, { "docid": "c444512a4f4e4f6a8af89d70d9282dcd", "score": "0.5081884", "text": "def r(start: Optional[int], cigar: Optional[str], strand: Optional[str] = \"+\") -> AlignedSegment:\n builder = SamBuilder()\n if start:\n r1, r2 = builder.add_pair(chrom=\"chr1\", start1=start, cigar1=cigar, strand1=strand)\n else:\n r1, r2 = builder.add_pair()\n return r1", "title": "" }, { "docid": "db7b0aab9e2b3685180c5adbf1bcfb9d", "score": "0.5077769", "text": "def search_for_block(self, start, end, count=1, join=\" \", max_len=1000, format_line=None):\n assert isinstance(count, int), \"count needs to be an integer\"\n assert isinstance(max_len, int), \"count needs to be an integer\"\n assert isinstance(join, str), \"join needs to be a string\"\n\n if count == 0:\n return None\n\n current_match = \"\"\n current_len = 0\n match = [None] * count\n\n #### we want a regex that will never match anything - and quickly - so trying to match something before the start of the line works\n if end is None:\n end = \"a^\"\n\n start_pattern = re.compile(start)\n end_pattern = re.compile(end)\n\n index = 0\n for line in self:\n if current_match:\n if end_pattern.search(line) or current_len >= max_len:\n match[index] = current_match\n current_match = None\n index += 1\n current_len = 0\n\n if index == count:\n break\n else:\n if format_line is not None:\n current_match = current_match + join + format_line(line.lstrip())\n else:\n current_match = current_match + join + line.lstrip()\n current_len += 1\n else:\n if start_pattern.search(line):\n if format_line is not None:\n current_match = format_line(line.lstrip())\n else:\n current_match = line.lstrip()\n current_len = 1\n\n if count == 1:\n return match[0]\n else:\n return match", "title": "" }, { "docid": "fd46f61c2c31f4fb581aa37a97d86bf9", "score": "0.5064656", "text": "def __init__(self, index: int, begin: int, block: bytes):\n self.index = index\n self.begin = begin\n self.block = block", "title": "" }, { "docid": "a8977874d9063ca28510672df796296c", "score": "0.50581986", "text": "def new_calculate_range(view, r):\r\n\r\n # FIXME: make sure this works with whitespace between markers, and doublecheck\r\n # with Vim to see whether '<;>' is allowed.\r\n # '<,>' returns all selected line blocks\r\n if r['left_ref'] == \"'<\" and r['right_ref'] == \"'>\":\r\n all_line_blocks = []\r\n for sel in view.sel():\r\n start = view.rowcol(sel.begin())[0] + 1\r\n end = view.rowcol(sel.end())[0] + 1\r\n if view.substr(sel.end() - 1) == '\\n':\r\n end -= 1\r\n all_line_blocks.append((start, end))\r\n return all_line_blocks, True\r\n \r\n # todo: '< and other marks\r\n if r['left_ref'] and (r['left_ref'].startswith(\"'\") or (r['right_ref'] and r['right_ref'].startswith(\"'\"))):\r\n return []\r\n\r\n # todo: don't mess up with the received ranged. Also, % has some strange\r\n # behaviors that should be easy to replicate.\r\n if r['left_ref'] == '%' or r['right_ref'] == '%':\r\n r['left_offset'] = 1\r\n r['right_ref'] = '$'\r\n\r\n current_line = None\r\n lr = r['left_ref']\r\n if lr is not None:\r\n current_line = calculate_relative_ref(view, lr) \r\n loffset = r['left_offset']\r\n if loffset:\r\n current_line = current_line or 0\r\n current_line += loffset\r\n\r\n searches = r['left_search_offsets']\r\n if searches:\r\n current_line = new_calculate_search_offsets(view, searches, current_line or calculate_relative_ref(view, '.'))\r\n left = current_line\r\n\r\n current_line = None\r\n rr = r['right_ref']\r\n if rr is not None:\r\n current_line = calculate_relative_ref(view, rr) \r\n roffset = r['right_offset']\r\n if roffset:\r\n current_line = current_line or 0\r\n current_line += roffset\r\n\r\n searches = r['right_search_offsets']\r\n if searches:\r\n current_line = new_calculate_search_offsets(view, searches, current_line or calculate_relative_ref(view, '.'))\r\n right = current_line\r\n\r\n if not right:\r\n right = left\r\n\r\n # todo: move this to the parsing phase? Do all vim commands default to '.' as a range?\r\n if not any([left, right]):\r\n left = right = calculate_relative_ref(view, '.')\r\n\r\n # todo: reverse range automatically if needed\r\n return [(left, right)], False", "title": "" }, { "docid": "782905b4955773a403e36ba7a9a55d91", "score": "0.5025284", "text": "def set_start_end(self, start_loc, end_loc):\n self.start = self.get_node(start_loc)\n self.end = self.get_node(end_loc)", "title": "" }, { "docid": "4a7b3d80f1019d562dbfdf1ed9dcf79c", "score": "0.5017543", "text": "def set_range(self, start, end):\r\n if start is not None:\r\n if start > self._num_data:\r\n raise ValueError('Invalid start index.')\r\n else:\r\n self._start = start\r\n self.seek(self._start)\r\n self._curr = self._start\r\n if end is not None:\r\n if end > start and end <= self._num_data:\r\n self._end = end\r\n else:\r\n raise ValueError('Invalid end index.')\r\n self._num_local_data = self._end - self._start", "title": "" }, { "docid": "bf77095a152d903c22d3a98130d33109", "score": "0.5009654", "text": "def __repr__(self):\r\n return '(' + str(self.start) + ';' + ' ' + str(self.end) + ')'", "title": "" }, { "docid": "c29c050871aafa1c60cf4a8f33ec5632", "score": "0.50086576", "text": "def after(\n self, start: int, end: int, annid=None, include_self=False, immediately=False\n ):\n self._create_index_by_offset()\n if immediately:\n intvs = self._index_by_offset.starting_at(end)\n else:\n intvs = self._index_by_offset.starting_from(end)\n # we need to filter self if self is zero-length!\n if not include_self and annid is not None:\n ignore = annid\n else:\n ignore = None\n return self._restrict_intvs(intvs, ignore=ignore)", "title": "" }, { "docid": "4f90dd68b459aba15120c0a01964282f", "score": "0.50022197", "text": "def __init__(self, regrange):\n self.range = regrange", "title": "" }, { "docid": "c95dd6f18ce546271b772e263f0cbd16", "score": "0.4991638", "text": "def extract_range(self, record, field_name, start, length):\n pass", "title": "" }, { "docid": "d788896cc62696e166306a192868c630", "score": "0.49750075", "text": "def extract(self, start, end, chrom=None):\n outseq = ''\n line_start = 1\n for line in self:\n if chrom is not None and self.id != chrom:\n continue\n line_end = line_start + len(line) - 1\n # if we haven't encountered the start yet, keep searching\n if line_end < start:\n line_start = line_end + 1\n continue\n slice_start = max(start, line_start) - line_start\n slice_end = min(end, line_end) - line_start + 1\n outseq += line[slice_start:slice_end]\n # done? (on the last line?)\n if line_end >= end:\n break\n line_start = line_end + 1\n return outseq", "title": "" }, { "docid": "fa5dc113b984b2794f75b2a7e6344d5f", "score": "0.49738085", "text": "def GetSpan(self):", "title": "" }, { "docid": "6b6a09d4410747b464ab2bf4bdf0d4d0", "score": "0.49726108", "text": "def slice(start, end):", "title": "" }, { "docid": "a4051739a85970697c3899e5386b65a7", "score": "0.49660826", "text": "def test_strand(self):\n with pysam.AlignmentFile('./data/mini_nla_test.bam') as f:\n for i,(R1,R2) in enumerate(pysamiterators.iterators.MatePairIterator(f)):\n frag = singlecellmultiomics.fragment.Fragment([R1,R2])\n self.assertIn( frag.get_strand(), [None,0,1])\n if frag.is_mapped:\n self.assertIn( frag.get_strand(), [0,1])", "title": "" }, { "docid": "125fa09b72c17c7b3d72830a0583ea69", "score": "0.4953", "text": "def find(self, s,sub,start,end):\n\t\tpass", "title": "" }, { "docid": "7ad4c46425a7c2dcdceff63c7cea210a", "score": "0.49490547", "text": "def ref( self ):\n return self._reference[ self._posn ]", "title": "" }, { "docid": "563c1b57f5abab8c0fbeb179a7dc0554", "score": "0.49476275", "text": "def getAddressRangeIterator(self, startAddress: ghidra.program.model.address.Address, endAddress: ghidra.program.model.address.Address) -> ghidra.program.model.address.AddressRangeIterator:\n ...", "title": "" }, { "docid": "193eb36885becd54f82b5751140cefdf", "score": "0.4946475", "text": "def adjrange(self, start, end):\n return Coords(self.name,\n start if start is not None else self.start,\n end if end is not None else self.end,\n self.strand, self.size)", "title": "" }, { "docid": "6c81969cd705acde7bb18d0d00e565c5", "score": "0.49434865", "text": "def __getitem__(self, key):\n if key == 'start' or key == 0:\n return self.a\n elif key == 'end' or key == 1:\n return self.b\n\n return None", "title": "" }, { "docid": "ca76c6400b47b1bdad586cb47b31291d", "score": "0.49405438", "text": "def find_start_end(grid):\r\n start = grid.index(\"emerald_block\")\r\n end = grid.index(\"redstone_block\")\r\n return (start, end)", "title": "" }, { "docid": "e7618ca429930988c7032999f6e42e44", "score": "0.4931168", "text": "def get_interval(self, start, end):\n\t\treturn self.file[start: end]", "title": "" }, { "docid": "19463db43c88cfeae5a7fb7338cba165", "score": "0.49303776", "text": "def __init__(self,\n reference_name=None, # type: str\n start=None, # type: int\n end=None, # type: int\n reference_bases=None, # type: str\n alternate_bases=None, # type: List[str]\n names=None, # type: List[str]\n quality=None, # type: float\n filters=None, # type: List[str]\n info=None, # type: Dict[str, Any]\n calls=None # type: List[VariantCall]\n ):\n # type: (...) -> None\n self.reference_name = reference_name\n self.start = start\n self.end = end\n self.reference_bases = reference_bases\n self.alternate_bases = alternate_bases or []\n self.names = names or []\n self.quality = quality\n self.filters = filters or []\n self.info = info or {}\n self.calls = calls or []", "title": "" }, { "docid": "d564c3438888de6dd3ff4889b28f6feb", "score": "0.4928802", "text": "def span(self) -> Range[T]:\n raise NotImplementedError()", "title": "" }, { "docid": "5bb65e0875966c234e88ef4a12639aeb", "score": "0.49217683", "text": "def get_rel_loc(self,coordinate,strand,txStart,txEnd,gene_div):\n\n lengths=[(i,int(round(1.0*(txEnd-txStart+1)/i))) for i in gene_div]\n if strand=='+':\n return [min((coordinate-txStart+1)/length+1,i) for i,length in lengths]\n else:\n return [min((txEnd-coordinate+1)/length+1,i) for i,length in lengths]", "title": "" }, { "docid": "c62be67924c7e3684a2dfee1568c94b7", "score": "0.49190974", "text": "def query(self, begin: int, end: int) -> Result | None:", "title": "" }, { "docid": "64f348f6c7ce86aa2c57a98bae8bef11", "score": "0.49187228", "text": "def block(self, f = 1, append = None):\n sequence = self.graph.debruijn_sequence(f)\n b = [self.trial_types[s] for s in sequence]\n if append == \"end\":\n return b + b[:self.n-1]\n elif append == \"start\":\n return b[-(self.n-1):] + b\n else:\n return b", "title": "" }, { "docid": "46f890f4a1612b4e8a55c632722dd697", "score": "0.49177834", "text": "def _goto(self, end):\n #console.log('_goto')\n self._position = end", "title": "" }, { "docid": "b6268053c10fd16fb94ce9a006dfabf9", "score": "0.49044132", "text": "def for_range(self, begint, endt, name=\"i\", thread_num=1,\n thread_type=\"whole\", block_num=1,\n dtype=\"int32\", for_type=\"serial\"):\n # pylint: disable=R0913, R0914, W0221, E1101\n # disable R0914, beacuse arugments are too many, and function is close\n # disable W0221, because for_range is to extend the parent class's\n # for_range\n return self.for_range_(begint, endt, name=name, thread_num=thread_num,\n thread_type=thread_type, block_num=block_num,\n dtype=dtype, for_type=for_type)", "title": "" }, { "docid": "6c3cad4ba6051f8f48d1d78abf3fdb65", "score": "0.4904088", "text": "def get_range_string(self):\n return self.left.chr+\":\"+str(self.left.end)+'/'+self.right.chr+\":\"+str(self.right.start)", "title": "" }, { "docid": "3529a5f5a1d30fafdbef527fbe09c375", "score": "0.4895525", "text": "def ref_link(self) -> tuple[str, int, int]:\n return (self.ref_name, self.ref_start, self.ref_end)", "title": "" }, { "docid": "8ad0f7c9d77b5d4f355fe38593f8279a", "score": "0.48908833", "text": "def get_people(self, start, end):\n if end > CIRCLE:\n yield from self.get_people(start, CIRCLE)\n yield from self.get_people(0, end - CIRCLE)\n else:\n for loc in self.people_locations:\n if loc < start:\n continue\n if loc >= end:\n break\n yield loc", "title": "" }, { "docid": "3c1d5734cea35edcdc9f48709921d11d", "score": "0.4889701", "text": "def parse_borrowed(parts: List[str]) -> Iterator[Ref]:\n if len(parts) > 3:\n yield Ref(origin=parts[2], destination=None, word=parts[3], kind=\"borrowed\")", "title": "" }, { "docid": "fb99fdb02318f458f4f8da78fb10d445", "score": "0.48859918", "text": "def _calc_coverage_single_end(self, ref_seq, bam):\n for alignment in bam.fetch(ref_seq):\n self._add_coverage_of_single_end_reads(alignment)", "title": "" }, { "docid": "483f2ad0ab9cd5a4d93486d3885ea651", "score": "0.48773125", "text": "def begin(self):\n return self.a", "title": "" }, { "docid": "55cef9b2d96ee42a7913b5ad8146a14f", "score": "0.48707762", "text": "def start(self) -> Node:\n if self.start_ref is not None:\n return self.start_ref()\n return None", "title": "" }, { "docid": "5b9a3e933769cfc6d5abf319213b96aa", "score": "0.48541695", "text": "def reset_strand(self, new_strand: Strand) -> \"Location\":", "title": "" }, { "docid": "608c510f4511f3c1674ce9119f6cf2fd", "score": "0.48529983", "text": "def test_get_begin_end_2(self):\n self.feature.start = 5\n self.feature.stop = 10\n self.feature.orientation = \"r\"\n start, end = self.feature.get_begin_end()\n with self.subTest():\n self.assertEqual(start, 10)\n with self.subTest():\n self.assertEqual(end, 5)", "title": "" }, { "docid": "a9265edd8606800b4d4d39358f70ac59", "score": "0.4834911", "text": "def begin(self):", "title": "" }, { "docid": "c333090a5a0e075d0237255a31a2b020", "score": "0.48320192", "text": "def __init__(self, start=0):\n self._current = start", "title": "" }, { "docid": "40b75930ed9f9ac10ae741d41ef87a9a", "score": "0.4828171", "text": "def within(self, loc):\n pass", "title": "" }, { "docid": "8cbeabc136e1c8068f684c1a5dbd1c4d", "score": "0.48170725", "text": "def __init__(self, start = 0):\n self._current = start", "title": "" }, { "docid": "caac7d119ba93f9c95b125de97ca51a4", "score": "0.47986224", "text": "def rrcoord_of_block (self,pt):\n ref = self.ref\n coords = [pt]\n (rco,jnk1,jnk2)=Lxy.dlat2xy(ref,coords)\n return rco[0]", "title": "" }, { "docid": "95214c2bfa1854919b98fc21a13466c5", "score": "0.47964376", "text": "def get_strand(self, loc):\n if(loc is not None):\n return self._strands[loc]\n return None", "title": "" } ]
41fa50a6a8868a4f24da5894655df3c0
Return `true` when self and other are not equal, false otherwise.
[ { "docid": "1440db2a248ce1382e63667cb5d4b50e", "score": "0.0", "text": "def __ne__(self, other: 'VolumeCollectionNext') -> bool:\n return not self == other", "title": "" } ]
[ { "docid": "87ebdeb25fa7429b0644fcaa1762fc07", "score": "0.8563128", "text": "def __eq__(self, other):\n return self != other", "title": "" }, { "docid": "bc10854e638575ab1efa1cf275b9677b", "score": "0.8528623", "text": "def __ne__(self, other):\n return False if self.__eq__(other) else True", "title": "" }, { "docid": "a7d9baf31d5cc301e2587b324fb52cfd", "score": "0.85016024", "text": "def __ne__ (self, other):\n return not self == other # rely on existing __eq__ definition", "title": "" }, { "docid": "403224fd5115046cca727f8e26be9058", "score": "0.8437724", "text": "def equals(self, other):\n\n\t\treturn False", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" }, { "docid": "30e027ef6788367a3ed78c7909bbf102", "score": "0.84058446", "text": "def __ne__(self, other):\n return not self == other", "title": "" } ]
b75a97c734e3388d69d630dcd8032152
Builds a notification channel message from args.
[ { "docid": "eea622a8cc2d7165c2e894b1bab6a893", "score": "0.6235747", "text": "def CreateNotificationChannelsFromArgs(args, messages):\n\n if args.IsSpecified('channels_from_prometheus_alertmanager_yaml'):\n alert_manager_yaml = args.channels_from_prometheus_alertmanager_yaml\n channels = NotificationChannelMessageFromString(\n alert_manager_yaml, messages\n )\n else:\n channels = []\n return channels", "title": "" } ]
[ { "docid": "85f4a0cd449992fd864a84b85c7225f8", "score": "0.7001008", "text": "def GetNotificationChannelFromArgs(args, messages):\n channels_base_flags = ['--display-name', '--channel-content',\n '--channel-content-from-file']\n ValidateAtleastOneSpecified(args, channels_base_flags)\n\n channel_string = args.channel_content or args.channel_content_from_file\n if channel_string:\n channel = MessageFromString(channel_string, messages.NotificationChannel,\n 'NotificationChannel',\n field_remappings=CHANNELS_FIELD_REMAPPINGS)\n # Without this, labels will be in a random order every time.\n if channel.labels:\n channel.labels.additionalProperties = sorted(\n channel.labels.additionalProperties, key=lambda prop: prop.key)\n else:\n channel = messages.NotificationChannel()\n\n enabled = args.enabled if args.IsSpecified('enabled') else None\n return ModifyNotificationChannel(channel,\n channel_type=args.type,\n display_name=args.display_name,\n description=args.description,\n enabled=enabled)", "title": "" }, { "docid": "c9efcf15449d197ed1f4206ba9eaa946", "score": "0.6807082", "text": "def build_message(msg_addr, msg_args):\r\n\r\n builder = osc_message_builder.OscMessageBuilder(address=msg_addr)\r\n if not hasattr(msg_args, '__iter__') or isinstance(msg_args, (str, bytes)):\r\n msg_args = [msg_args]\r\n for msg_arg in msg_args:\r\n builder.add_arg(msg_arg)\r\n msg = builder.build()\r\n return msg", "title": "" }, { "docid": "78dd3de83bc05d8d269b99bd3681d092", "score": "0.6528828", "text": "def build_message(self, message):\n # Make sure all args are strings\n message['args'] = [str(i) for i in message['args']]\n return \"{0} {1}\".format(message['command'], ' '.join(message['args']))", "title": "" }, { "docid": "4036ee30d88d8cf02a6281363238c37b", "score": "0.63657486", "text": "def chat_msg(where, args):\n l = []\n for k in args.keys():\n if k != \"message\":\n l.append( k + \":\" + args[k] )\n l.append( \"message\"+\":\"+args[\"message\"] )\n\n str_args = \"@\".join(l)\n msg = Message(where, \"chat\", str_args, 0)\n return msg", "title": "" }, { "docid": "80493856e7bd2c4a683ba56353333143", "score": "0.6198967", "text": "def build_message (message_type, *data_args):\r\n\r\n args = pack_data(*data_args)\r\n data = '%s%s%s' % (message_type,\r\n struct.pack('!i', len(args) + 4),\r\n args)\r\n return data", "title": "" }, { "docid": "f99e4c43dfb1dc7da625a540292a095a", "score": "0.6046136", "text": "def build_message (message_type, *data_args):\r\n\r\n args = pack_data(*data_args)\r\n data = ''.join ([message_type, struct.pack('!i', len(args) + 4), args])\r\n return data", "title": "" }, { "docid": "5feec011c9556f731b10e0b57ca504dc", "score": "0.6019015", "text": "def _build_notification(content):\n\n notification = domish.Element((None, 'message'))\n notification['from'] = config.JID\n # type 'headline' is more appropriate really but headlines\n # are often displayed in a dialog so that's not cool\n notification['type'] = 'chat'\n notification.addElement('body', content=content)\n return notification", "title": "" }, { "docid": "a3ad935dc9f56ad2b904bac33788b283", "score": "0.60171527", "text": "def BuildNotification( method, parameters ):\n return _BuildMessageData( {\n 'method': method,\n 'params': parameters,\n } )", "title": "" }, { "docid": "6e1f16c3e36c6d12d3ef1688a4df1d06", "score": "0.57624954", "text": "def irc_329(self, command, prefix, args):\n if self.channels.has_key(args[0]):\n self.channels[args[0]].announce(\"Channel created at %s\" % \\\n time.ctime(int(args[2])))", "title": "" }, { "docid": "e067ebc584d3f05cc815ed8ed242c44d", "score": "0.57138115", "text": "def _build_message(self, message, **kwargs):\n return message or self.__doc__ % kwargs", "title": "" }, { "docid": "52c282e2918377073adfc0d29f4e78dc", "score": "0.56343144", "text": "def CreateBasePromQLNotificationChannel(channel_name, messages):\n channel = messages.NotificationChannel()\n channel.displayName = channel_name\n channel.description = MIGRATED_FROM_PROMETHEUS_TEXT\n channel.labels = messages.NotificationChannel.LabelsValue()\n return channel", "title": "" }, { "docid": "a1568a1a486d05d3b6ffddc5fa63d0ed", "score": "0.55430573", "text": "def topic(self, channel, nick, host, *args):\n self.topic(channel, ' '.join(args))", "title": "" }, { "docid": "a87eafea738f5bd2b2418224c68bfa41", "score": "0.55375886", "text": "def construct_message(self):\n pass", "title": "" }, { "docid": "64b702ab7686efcd0cdb28cfeacde8b0", "score": "0.546217", "text": "def make_message(self, event_data: dict, cache: bool = True) -> Message:\n message = Message(self.client, **event_data)\n\n if message in self.messages:\n # don't bother re-caching\n i = self.messages.index(message)\n return self.messages[i]\n\n # discord won't give us the Guild id\n # so we have to search it from the channels\n channel_id = int(event_data.get(\"channel_id\", 0))\n channel = self.find_channel(channel_id)\n\n author_id = int(event_data.get(\"author\", {}).get(\"id\", 0))\n\n message.channel = channel\n if channel is not None:\n message.guild_id = channel.guild_id\n if message.channel.type == ChannelType.PRIVATE:\n if author_id == self._user.id:\n message.author = self._user\n else:\n message.author = message.channel.user\n elif message.channel.type == ChannelType.GROUP:\n message.author = message.channel.recipients.get(author_id, None)\n else:\n # Webhooks also exist.\n if event_data.get(\"webhook_id\") is not None:\n message.author = self.make_webhook(event_data)\n else:\n message.author = message.guild.members.get(author_id)\n\n for reaction_data in event_data.get(\"reactions\", []):\n emoji = reaction_data.get(\"emoji\", {})\n reaction = Reaction(**reaction_data)\n\n if \"id\" in emoji and emoji[\"id\"] is not None:\n emoji_obb = message.guild.emojis.get(int(emoji[\"id\"]))\n if emoji_obb is None:\n emoji_obb = Emoji(id=emoji[\"id\"], name=emoji[\"name\"])\n else:\n emoji_obb = emoji.get(\"name\", None)\n\n reaction.emoji = emoji_obb\n message.reactions.append(reaction)\n\n if cache and message not in self.messages:\n self.messages.append(message)\n\n return message", "title": "" }, { "docid": "95c064ad6c106fd0f55239a242772892", "score": "0.54533577", "text": "def command_build(self, parameters):\n message = dict(self.adapter_templates[\"build\"])\n obj = parameters['createdThing']['objectDescriptor'] #shouldn't this just be 'descriptor'?\n message[\"quantity\"] = self.get_quantity(obj)\n message[\"unit_type\"] = self.get_type(obj)\n message[\"ecg_id\"] = self.get_ecg_id(True)\n return message", "title": "" }, { "docid": "8abe34bbbe3d6f7b0f0a51606c759096", "score": "0.542072", "text": "def build(self, *args, **params):\n pass", "title": "" }, { "docid": "b097462ef3afc179a06402f9724edd2d", "score": "0.54105353", "text": "def get_notification_channel():\n pass", "title": "" }, { "docid": "47bd77655b740032570dc11c0f1475db", "score": "0.5403763", "text": "def build_command(self, command, *args):\n l = [\n '*{}'.format(len(args)+1),\n '${}'.format(len(command)),\n command,\n ]\n \n for arg in args:\n arg = str(arg)\n l.append('${}'.format(len(arg)))\n l.append(arg)\n\n l.append('')\n\n return '\\r\\n'.join(l).encode('latin-1')", "title": "" }, { "docid": "c6c745d565e76916ae2950c91b06ffd1", "score": "0.5274699", "text": "def get_message(name, *args):\r\n return get_message_from_template(\"messages\", name, args)", "title": "" }, { "docid": "033e18bcfd39b03ad54c7e030774598a", "score": "0.5271151", "text": "def __init__(__self__,\n resource_name: str,\n args: ChannelEmailArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "title": "" }, { "docid": "1fb540b947e35879317e6b805ac83e31", "score": "0.52631605", "text": "def _construct_command(command, *args) -> bytes:\n return bytes(f'{command} {\" \".join(args)}\\r\\n', 'utf-8')", "title": "" }, { "docid": "eb0012af1504d26e4b416eeec14503ef", "score": "0.52561724", "text": "def build_message(status, message='', data={}):\n return jsonify(status=status, message=message, data=data)", "title": "" }, { "docid": "55efdf8745c11b70672ed718215f6061", "score": "0.52552235", "text": "def get_message(args, template, use_keys=[]):\n context = {}\n for k, v in args.items():\n if v is None:\n v = ''\n elif k == 'labels':\n v = ' '.join(v)\n if k in use_keys:\n context[k] = v\n message = run_editor(template % context)\n body = []\n end_of_body = False\n new = {}\n for line in message.split('\\n'):\n if not line.startswith('#') and not end_of_body:\n body.append(line)\n else:\n end_of_body = True\n m = ARG_RE.match(line)\n if m:\n key = m.group(1).lower()\n value = m.group(2)\n if key == 'labels':\n value = value.split()\n if value:\n new[key] = value\n new['body'] = '\\n'.join(body).strip()\n if not new['body']:\n sys.exit(\"Action aborted! Message is empty.\")\n return new", "title": "" }, { "docid": "6d362a3ca9ec14b5b043afc724e7a25c", "score": "0.5228774", "text": "def _build_slack_message(self, notification):\n body = {'alarm_id': notification.alarm_id,\n 'alarm_definition_id': notification.raw_alarm['alarmDefinitionId'],\n 'alarm_name': notification.alarm_name,\n 'alarm_description': notification.raw_alarm['alarmDescription'],\n 'alarm_timestamp': notification.alarm_timestamp,\n 'state': notification.state,\n 'old_state': notification.raw_alarm['oldState'],\n 'message': notification.message,\n 'tenant_id': notification.tenant_id,\n 'metrics': notification.metrics}\n\n slack_request = {}\n slack_request['text'] = json.dumps(body, indent=3)\n\n return slack_request", "title": "" }, { "docid": "bcf59fd5d41349714bb7a59220f0cdf6", "score": "0.5226396", "text": "def create_command(self, *args):\n return f'{self.CMD_DELIMITER.join(str(a) for a in args)}{self.CMD_DELIMITER}'.encode()", "title": "" }, { "docid": "1c28fc547f7caf1482b6a4883c74e605", "score": "0.5205326", "text": "def make_msg_entry(author, target, message, channel):\n return {\n 'author':author,\n 'target': target,\n 'message': message,\n 'channel': channel,\n 'time': datetime.datetime.utcnow().timestamp(),\n }", "title": "" }, { "docid": "6dcb31da68f037b478f7df0376553872", "score": "0.51940274", "text": "def message(**kwargs):\n defaults = {\n 'message_type': 0,\n 'sender': 'SHIP1',\n 'recipient': 'SHIP2',\n 'text': 'HAIL.',\n 'created': int(time.time()),\n }\n\n if 'created' in kwargs:\n created = kwargs['created']\n if isinstance(created, datetime.datetime):\n created = int(time.mktime(created.time_tuple()))\n kwargs['created'] = created\n\n defaults.update(kwargs)\n\n return Message(**defaults)", "title": "" }, { "docid": "7cb96e68b17c462f8a7ca48cafbd8a32", "score": "0.51890916", "text": "def notification_event(bus, message):\n global NOTIFY\n NOTIFY = message.get_args_list()[0]", "title": "" }, { "docid": "13eb10be912e8cfb4f09755333988086", "score": "0.51813716", "text": "async def create_message(\n self,\n channel: snowflakes.SnowflakeishOr[channels_.TextableChannel],\n content: undefined.UndefinedOr[typing.Any] = undefined.UNDEFINED,\n *,\n attachment: undefined.UndefinedOr[files.Resourceish] = undefined.UNDEFINED,\n attachments: undefined.UndefinedOr[typing.Sequence[files.Resourceish]] = undefined.UNDEFINED,\n component: undefined.UndefinedOr[special_endpoints.ComponentBuilder] = undefined.UNDEFINED,\n components: undefined.UndefinedOr[typing.Sequence[special_endpoints.ComponentBuilder]] = undefined.UNDEFINED,\n embed: undefined.UndefinedOr[embeds_.Embed] = undefined.UNDEFINED,\n embeds: undefined.UndefinedOr[typing.Sequence[embeds_.Embed]] = undefined.UNDEFINED,\n tts: undefined.UndefinedOr[bool] = undefined.UNDEFINED,\n nonce: undefined.UndefinedOr[str] = undefined.UNDEFINED,\n reply: undefined.UndefinedOr[snowflakes.SnowflakeishOr[messages_.PartialMessage]] = undefined.UNDEFINED,\n mentions_everyone: undefined.UndefinedOr[bool] = undefined.UNDEFINED,\n mentions_reply: undefined.UndefinedOr[bool] = undefined.UNDEFINED,\n user_mentions: undefined.UndefinedOr[\n typing.Union[snowflakes.SnowflakeishSequence[users.PartialUser], bool]\n ] = undefined.UNDEFINED,\n role_mentions: undefined.UndefinedOr[\n typing.Union[snowflakes.SnowflakeishSequence[guilds.PartialRole], bool]\n ] = undefined.UNDEFINED,\n ) -> messages_.Message: # noqa: E501 - Line too long", "title": "" }, { "docid": "cfd3335f11a55f70badec01f2ac7a167", "score": "0.5180069", "text": "def build_message(self, no_checksum=False):\n\t\tmessage = \"!\" + str(self.message_name) + \",\" + str(self.msg_id) + \",\"\n\t\tfor param in self.params:\n\t\t\tif issubclass(type(param), numpy.ndarray):\n\t\t\t\tmessage += base64.b64encode(param) + \",\"\n\t\t\telse:\n\t\t\t\tmessage += base64.b64encode(str(param)) + \",\"\n\n\t\tif no_checksum is False:\n\t\t\tmessage += self.get_checksum(message) + \"#\"\n\n\t\treturn message", "title": "" }, { "docid": "97a2a862ce17e3d62db6efe548ca8306", "score": "0.51536024", "text": "def irc_332(self, command, prefix, args):\n if self.channels.has_key(args[1]):\n self.channels[args[1]].topic(args[-1])\n # state.channel.topic = ...", "title": "" }, { "docid": "b951fe2a97b83c74f83e221b634f1b70", "score": "0.51533777", "text": "def construct_channel(self, **kwargs):\n raise NotImplementedError(\"Chef subclass must implement this method\")", "title": "" }, { "docid": "51e4ca0a29d9f4d7ac8df09f895027f5", "score": "0.51379097", "text": "def construct_message(message, *args, **kwargs):\n\n #REQUEST ID\n id = get_new_request_id_bytes()\n\n #PAYLOAD\n payload_values = []\n arg_iterator = 0\n if message.elements:\n for element in message.elements:\n if element.name in kwargs.keys():\n arg = kwargs[element.name]\n else:\n if arg_iterator >= len(args):\n raise Exception('Parameter %s is missing' % element.name)\n arg = args[arg_iterator]\n arg_iterator += 1\n try:\n value = element.func(arg, **element.kwargs)\n except Exception as e:\n raise Exception(\"Error while parsing value of parameter '%s' : ERROR - %s\" % (element.name, e))\n payload_values.append(value)\n\n payload = id + b''.join(payload_values)\n\n ber = encode_ber(len(payload))\n\n return bytearray(HEADER + message.key + ber + payload)", "title": "" }, { "docid": "58d41399e74cc2201dad418c90004dfd", "score": "0.51126504", "text": "def create_notification(params):\n params['id'] = 'nt' + random_hex_generator(4)\n params['created_at'] = time.time()\n params['updated_at'] = time.time()\n params['metadata'] = None\n return params", "title": "" }, { "docid": "ac52afd0bf855f9bcddb041389c8aa74", "score": "0.50856286", "text": "def make_object(self, data, **kwargs):\n return Notification(**data)", "title": "" }, { "docid": "ac52afd0bf855f9bcddb041389c8aa74", "score": "0.50856286", "text": "def make_object(self, data, **kwargs):\n return Notification(**data)", "title": "" }, { "docid": "ac52afd0bf855f9bcddb041389c8aa74", "score": "0.50856286", "text": "def make_object(self, data, **kwargs):\n return Notification(**data)", "title": "" }, { "docid": "bcdef87758a1eb334119f990db0cd9c0", "score": "0.50849044", "text": "def message(self, channel: Channel, text: str) -> None:", "title": "" }, { "docid": "fe45a8677a9bd56ceab12f4fb345aed8", "score": "0.5083392", "text": "def build(self, inputs, channels):\n pass", "title": "" }, { "docid": "c4f3c9c2a5ae24a2c4a410fc1b90f8e7", "score": "0.50656813", "text": "def __call__(self, *args, **kwargs):\n if isinstance(self.encoding, DefaultEncoding):\n with channel(self.device, self.io) as dev:\n dev.send(\n self.message.format(*args, **kwargs))\n else:\n with channel(self.device, self.io) as dev:\n dev.send(\n self.message.format(*args, **kwargs), encoding=self.encoding)", "title": "" }, { "docid": "bd4482baca24cbbe59a962c8a60e563c", "score": "0.5056964", "text": "def create_message(self, level, msg_text, extra_tags):\n raise NotImplementedError()", "title": "" }, { "docid": "34bd4c47caa2bce03d1a5afbc3383ddf", "score": "0.5031514", "text": "def build_chat():\r\n add_community('SWEN-331')\r\n add_community('SWEN-440')\r\n add_community('SWEN-344')\r\n\r\n \"\"\"channels for SWEN-331\"\"\"\r\n add_channel('General', '1')\r\n add_channel('TA', '1')\r\n add_channel('Random', '1')\r\n\r\n \"\"\"channels for SWEN-440\"\"\"\r\n add_channel('General', '2')\r\n add_channel('TA', '2')\r\n add_channel('Random', '2')\r\n\r\n \"\"\"channels for SWEN-344\"\"\"\r\n add_channel('General', '3')\r\n add_channel('TA', '3')\r\n add_channel('Random', '3')", "title": "" }, { "docid": "8f5d2f0f0d1342581bcdef63ce276dd8", "score": "0.50044173", "text": "def nickmessage(self, user, channel, msg):\r\n pass", "title": "" }, { "docid": "c38bff68d44d08b3b2635bacb7d2e1cf", "score": "0.5004001", "text": "def create_welcome_message(sender,**kwargs):\n if kwargs.get('created', False):\n Notification.objects.create(recipient=kwargs.get('instance'),\n title=\"Welcome to ToolShare!\",\n message=\"Thanks for signing up!\")", "title": "" }, { "docid": "fb88acb3f786305a1c6f97df8ccd9e48", "score": "0.5001806", "text": "def Run(self, args):\n\n client = cloudbuild_util.GetClientInstance()\n messages = cloudbuild_util.GetMessagesModule()\n\n trigger = messages.BuildTrigger()\n if args.trigger_config:\n trigger = cloudbuild_util.LoadMessageFromPath(\n path=args.trigger_config,\n msg_type=messages.BuildTrigger,\n msg_friendly_name='build trigger config',\n skip_camel_case=['substitutions'])\n else:\n trigger = self.ParseTriggerFromFlags(args)\n\n # Send the Create request\n project = properties.VALUES.core.project.Get(required=True)\n created_trigger = client.projects_triggers.Create(\n messages.CloudbuildProjectsTriggersCreateRequest(\n buildTrigger=trigger, projectId=project))\n\n trigger_resource = resources.REGISTRY.Parse(\n None,\n collection='cloudbuild.projects.triggers',\n api_version='v1',\n params={\n 'projectId': project,\n 'triggerId': created_trigger.id,\n })\n log.CreatedResource(trigger_resource)\n\n return created_trigger", "title": "" }, { "docid": "9f10645cab109822a170facb156a9f28", "score": "0.499378", "text": "def irc_478(self, command, prefix, args):\n #\n # XXX make proper translation of 'char' to listname\n if self.channels.has_key(args[1]):\n self.channels[args[1]].announce( \"Channel list is full (%c)\" % args[1])", "title": "" }, { "docid": "5111aeeb48304d17bda7395f9f4f9adf", "score": "0.49872994", "text": "def build():\n msg.write(msg.INFORMATION, 'Done!')", "title": "" }, { "docid": "e5aabb573ff254c7d701e6e8898cc570", "score": "0.4974635", "text": "def createMessage(self, cmd, headers, body):\n frame = Frame()\n frame.command = cmd\n frame.headers = headers\n frame.body = body\n return frame.pack()", "title": "" }, { "docid": "b09958129715723a7b943f38273550db", "score": "0.49736002", "text": "def format_args_in_redis_protocol(*args):\n buf = WriteBuffer()\n l = \"*%d\\r\\n\" % len(args) # noqa: E741\n if six.PY2:\n buf.append(l)\n else: # pragma: no cover\n buf.append(l.encode('utf-8'))\n for arg in args:\n if isinstance(arg, six.text_type):\n # it's a unicode string in Python2 or a standard (unicode)\n # string in Python3, let's encode it in utf-8 to get raw bytes\n arg = arg.encode('utf-8')\n elif isinstance(arg, six.string_types):\n # it's a basestring in Python2 => nothing to do\n pass\n elif isinstance(arg, six.binary_type): # pragma: no cover\n # it's a raw bytes string in Python3 => nothing to do\n pass\n elif isinstance(arg, six.integer_types):\n tmp = \"%d\" % arg\n if six.PY2:\n arg = tmp\n else: # pragma: no cover\n arg = tmp.encode('utf-8')\n elif isinstance(arg, WriteBuffer):\n # it's a WriteBuffer object => nothing to do\n pass\n else:\n raise Exception(\"don't know what to do with %s\" % type(arg))\n l = \"$%d\\r\\n\" % len(arg) # noqa: E741\n if six.PY2:\n buf.append(l)\n else: # pragma: no cover\n buf.append(l.encode('utf-8'))\n buf.append(arg)\n buf.append(b\"\\r\\n\")\n return buf", "title": "" }, { "docid": "bc101544de9063b0a1392bdf66cc2576", "score": "0.49516234", "text": "def compose_message(donor_info):\n\n return(\"\\nTo: {name}\\nSubject: Thank you.\\n\\n{name} thank you for your previous generous donation of \"\n \"{last_donation:<,.2f}.\\nYou're total donations to date are now: {total_donations:<,.2f}.\".format(**donor_info))", "title": "" }, { "docid": "d1320829d30a26e7404e9f2ff54d711c", "score": "0.49490345", "text": "def construct_message(msg):\n from brokkr.config.unit import UNIT_CONFIG\n from brokkr.config.metadata import METADATA\n\n sensor_name = f\"{METADATA['name']}{UNIT_CONFIG['number']:02d}\"\n\n header = f\"Message from sensor {sensor_name} at {UNIT_CONFIG['site_description']}\"\n\n msg = '\\n'.join([header, msg])\n\n return msg", "title": "" }, { "docid": "46af546de0c9e5d3fa23736fd42eff79", "score": "0.49480566", "text": "def Run(self, args):\n\n client = cloudbuild_util.GetClientInstance()\n messages = cloudbuild_util.GetMessagesModule()\n\n trigger = messages.BuildTrigger()\n if args.trigger_config:\n trigger = cloudbuild_util.LoadMessageFromPath(\n path=args.trigger_config,\n msg_type=messages.BuildTrigger,\n msg_friendly_name='build trigger config',\n skip_camel_case=['substitutions'])\n else:\n repo_ref = args.CONCEPTS.repo.Parse()\n repo = repo_ref.reposId\n trigger = messages.BuildTrigger(\n description=args.description,\n triggerTemplate=messages.RepoSource(\n repoName=repo,\n branchName=args.branch_pattern,\n tagName=args.tag_pattern,\n ),\n )\n\n # Build Config\n if args.build_config:\n trigger.filename = args.build_config\n trigger.substitutions = cloudbuild_util.EncodeTriggerSubstitutions(\n args.substitutions, messages)\n if args.dockerfile:\n project = properties.VALUES.core.project.Get(required=True)\n image = args.dockerfile_image if args.dockerfile_image else 'gcr.io/%s/%s:$COMMIT_SHA' % (\n project, repo)\n trigger.build = messages.Build(steps=[\n messages.BuildStep(\n name='gcr.io/cloud-builders/docker',\n dir=args.dockerfile_dir,\n args=['build', '-t', image, '-f', args.dockerfile, '.'],\n )\n ])\n # Include/Exclude files\n if args.included_files:\n trigger.includedFiles = args.included_files\n if args.ignored_files:\n trigger.ignoredFiles = args.ignored_files\n\n # Send the Create request\n project = properties.VALUES.core.project.Get(required=True)\n created_trigger = client.projects_triggers.Create(\n messages.CloudbuildProjectsTriggersCreateRequest(\n buildTrigger=trigger, projectId=project))\n\n trigger_resource = resources.REGISTRY.Parse(\n None,\n collection='cloudbuild.projects.triggers',\n api_version='v1',\n params={\n 'projectId': project,\n 'triggerId': created_trigger.id,\n })\n log.CreatedResource(trigger_resource)\n\n return created_trigger", "title": "" }, { "docid": "3e124e7fc789d510014eb49edfb88126", "score": "0.49472663", "text": "def push(options, *args):\r\n for f in args:\r\n push_one(options.server, f, comment=options.message)", "title": "" }, { "docid": "b5b17498a573f5f12869ffb5903bf2d8", "score": "0.49391407", "text": "def generate_msg(self, msg, c):\r\n c.send(msg.encode(FORMAT))", "title": "" }, { "docid": "7933225fe022f63d7d6426c4fe0c9a31", "score": "0.49357966", "text": "def build_message(self, topic, extendedData={}, recipients=['communication_modules']):\n\n msg = Message(\n topic=topic,\n sender_id=self._id, \n sender_type=self._type, \n extended_data=extendedData, \n recipients=recipients, \n timestamp=datetime.datetime.utcnow())\n return msg", "title": "" }, { "docid": "a494f646ef590f75c50dec2e9ee30a96", "score": "0.49323547", "text": "def compose_commit_message(title, body='', agent=''):\n # force to str\n if not body: body = ''\n if not agent: agent = ''\n # formatting\n if body: body = '\\n\\n%s' % body\n if agent: agent = '\\n\\n@agent: %s' % agent\n return '%s%s%s' % (title, body, agent)", "title": "" }, { "docid": "eaf5d978bbfaf3dde220a816f37498f8", "score": "0.49223387", "text": "def build_ping_message(user_id, event):\n user_language = DatabaseController.load_selected_language(user_id)\n message = \"*{}*\\n\\n\".format(receive_translation(\"event_reminder\", user_language))\n\n message += \"*{}:* {}\\n\".format(receive_translation(\"event\", user_language), event.name)\n message += \"*{}:* {}\\n\".format(receive_translation(\"event_content\", user_language), event.content)\n message += \"*{}:* {}\\n\".format(receive_translation(\"event_start\", user_language), event.event_time)\n message += \"\\n\"\n\n return message", "title": "" }, { "docid": "1b5d811919d838fabd074e462315098c", "score": "0.49124753", "text": "def on_message(server, user, channel, message):", "title": "" }, { "docid": "528bcde41e7d0b9c926178257ee2d1d5", "score": "0.49099693", "text": "def _build_track_message(info_dict, mention_requester=False):\n ret = \"**{}**\".format(info_dict[\"title\"])\n if info_dict.get(\"uploader\"):\n ret += \" by {}\\n\".format(info_dict[\"uploader\"])\n\n if info_dict.get(\"requester\"):\n ret += \"\\nrequested by {}\".format(info_dict[\"requester\"].mention if mention_requester else\n str(info_dict[\"requester\"]))\n\n return ret.format(info_dict)", "title": "" }, { "docid": "fc65e408d841f9a3050fc0f6ccfa3c97", "score": "0.49047774", "text": "def new_channel() -> Channel:\n return Channel(descriptor='', token='', sender=new_entity(), recipients=[], domain='', min_period=0.0, max_size=0)", "title": "" }, { "docid": "cf784c6ab24bf00b8f5b0822e9b51b47", "score": "0.49021232", "text": "def notify_command(ctx, nthreads, no_icons):\n no_status_server = ctx.parent.params[\"no_status_server\"]\n\n return notify(\n send_to=settings.email,\n use_icons=not no_icons,\n nthreads=nthreads,\n status_server=not no_status_server,\n )", "title": "" }, { "docid": "cf736d8abb951b9f81007abec7b970ac", "score": "0.48987988", "text": "def build_msg(self, str_msg, context_str=None):\n\n if context_str is None:\n context_str = 'normal'\n\n if context_str in self.context.keys():\n if isinstance(str_msg, list):\n str_msg = ', '.join([str(s) for s in str_msg])\n\n final_msg = self.context[context_str] + str_msg + Style.RESET_ALL\n else:\n raise ValueError(\"Not a valid context\")\n return final_msg", "title": "" }, { "docid": "1e042c9b4e70465d0dabf36ec5900192", "score": "0.48698723", "text": "def create_msg():\n # Collect all parameters\n to = argToList(demisto.getArg('to'))\n cc = argToList(demisto.getArg('cc'))\n bcc = argToList(demisto.getArg('bcc'))\n additional_header = argToList(demisto.getArg('additionalHeader'))\n subject = demisto.getArg('subject') or ''\n body = demisto.getArg('body') or ''\n html_body = demisto.getArg('htmlBody') or ''\n reply_to = demisto.getArg('replyTo')\n template_params = parse_template_params()\n if template_params:\n body = body.format(**template_params)\n html_body = html_body.format(**template_params)\n\n # Basic validation - we allow pretty much everything, but you have to have at least a recipient\n # We allow messages without subject and also without body\n if not to and not cc and not bcc:\n return_error_mail_sender('You must have at least one recipient')\n\n attachments = collect_attachments()\n attachments.extend(collect_manual_attachments())\n\n # Let's see what type of message we are talking about\n if not html_body:\n # This is a simple text message - we cannot have CIDs here\n if len(attachments) > 0:\n # This is multipart - default is mixed\n msg = MIMEMultipart() # type: Message\n msg.preamble = 'The message is only available on a MIME-aware mail reader.\\n'\n msg.attach(MIMEText(body, 'plain', UTF_8))\n for att in attachments:\n handle_file(msg, att['name'], att['maintype'], att['subtype'], None, att['data'])\n else:\n # Just text, how boring\n msg = MIMEText(body, 'plain', UTF_8)\n else:\n html_body, html_attachments = handle_html(html_body)\n attachments += html_attachments\n if len(attachments) > 0:\n msg = MIMEMultipart()\n msg.preamble = 'The message is only available on a MIME-aware mail reader.\\n'\n if body:\n alt = MIMEMultipart('alternative')\n alt.attach(MIMEText(body, 'plain', UTF_8))\n alt.attach(MIMEText(html_body, 'html', UTF_8))\n msg.attach(alt)\n else:\n msg.attach(MIMEText(html_body, 'html', UTF_8))\n for att in attachments:\n handle_file(msg, att['name'], att['maintype'], att['subtype'], att['cid'], att['data'])\n else:\n if body:\n msg = MIMEMultipart('alternative')\n msg.preamble = 'The message is only available on a MIME-aware mail reader.\\n'\n msg.attach(MIMEText(body, 'plain', UTF_8))\n msg.attach(MIMEText(html_body, 'html', UTF_8))\n else:\n msg = MIMEText(html_body, 'html', UTF_8)\n\n # Add the relevant headers to the most outer message\n msg['Subject'] = header(subject)\n msg['From'] = header(demisto.getParam('from'))\n if reply_to:\n msg['Reply-To'] = header(reply_to)\n if to:\n msg['To'] = header(','.join(to))\n if cc:\n msg['CC'] = header(','.join(cc))\n if additional_header:\n for h in additional_header:\n header_name_and_value = h.split('=', 1)\n msg[header_name_and_value[0]] = header(header_name_and_value[1])\n # Notice we should not add BCC header since Python2 does not filter it\n return body, html_body, msg.as_string(), to, cc, bcc", "title": "" }, { "docid": "a255c9a618766c71f2df5a35d5ef5625", "score": "0.4862484", "text": "def message_command(self, name=None, guild_ids=None, default_permission=True, guild_permissions=None):\n def wraper(callback):\n self._add_to_cache(MessageCommand(callback, name, guild_ids, default_permission, guild_permissions))\n return wraper", "title": "" }, { "docid": "9b7fd1828ba92cd7b18ba732f537d841", "score": "0.4836255", "text": "def build_sample(notification):\n pass", "title": "" }, { "docid": "dae81ada753455dd4cc207cf88ead53c", "score": "0.48331353", "text": "def log_message(self, format, *args):\n log.LogInfo('%s - - [%s] %s\\n' % (\n self.address_string(), self.log_date_time_string(), format % args))", "title": "" }, { "docid": "0e2810e06590d37e5819fb1f3d30df14", "score": "0.48280546", "text": "def msbuild(self, args=msbuild_args):\r\n subprocess.check_call(('msbuild',)+args)", "title": "" }, { "docid": "e3f91c1c7569837f2dd9095b0cfb8a74", "score": "0.48240837", "text": "def build_and_append_to_outbox(conn, cmd, data):\n global messages_to_send\n msg = chatlib.build_message(cmd, data)\n messages_to_send.append((conn, msg))\n return", "title": "" }, { "docid": "ba15545376e15e3e983f7e55189398ee", "score": "0.48218468", "text": "async def create_guild_news_channel(\n self,\n guild: snowflakes.SnowflakeishOr[guilds.PartialGuild],\n name: str,\n *,\n position: undefined.UndefinedOr[int] = undefined.UNDEFINED,\n topic: undefined.UndefinedOr[str] = undefined.UNDEFINED,\n nsfw: undefined.UndefinedOr[bool] = undefined.UNDEFINED,\n rate_limit_per_user: undefined.UndefinedOr[time.Intervalish] = undefined.UNDEFINED,\n permission_overwrites: undefined.UndefinedOr[\n typing.Sequence[channels_.PermissionOverwrite]\n ] = undefined.UNDEFINED,\n category: undefined.UndefinedOr[snowflakes.SnowflakeishOr[channels_.GuildCategory]] = undefined.UNDEFINED,\n reason: undefined.UndefinedOr[str] = undefined.UNDEFINED,\n ) -> channels_.GuildNewsChannel:", "title": "" }, { "docid": "732a00067d417aa90c0a0ee7b000b9ef", "score": "0.48161188", "text": "def setMessage(self, message, *args):\n self._message = message.format(*args)", "title": "" }, { "docid": "935e2573f675ff935d7f02c454815990", "score": "0.4816092", "text": "def buildMessage(topic, tags, keys, body):\n if not isinstance(body, bytes):\n raise Exception(\"body must be type of bytes.\")\n else:\n return _MessageJ(JString(topic), JString(tags), JString(keys), body)", "title": "" }, { "docid": "0f436dd06fa285a57c5ec5b8099e4108", "score": "0.4804159", "text": "def generalmessage(self, user, channel, msg):\r\n pass", "title": "" }, { "docid": "bf2ce5d7eaabd90233c4cd6f1a8f2615", "score": "0.4802356", "text": "def irc_341(self, command, prefix, args):\n if self.channels.has_key(args[1]):\n self.channels[args[1]].announce(\"Inviting %s\" % args[0])\n else:\n self.viewtext.announce(\"Inviting %s to channel %s\" % (args[0],\n args[1]))", "title": "" }, { "docid": "6f6bdbfeeb54ac34b7c69a2abf5c4a3b", "score": "0.48012513", "text": "def generate_msg(id_, component, service, params = None):\n if params is None:\n msg = id_ + \" \" + component + \" \" + service + \"\\n\"\n else:\n msg = id_ + \" \" + component + \" \" + service + \" \" + json.dumps(params) + \"\\n\"\n return msg", "title": "" }, { "docid": "82112faa833e22e64c35837f67e5d6aa", "score": "0.47996476", "text": "def _build(self,*args, **kwargs):", "title": "" }, { "docid": "109694d37ab832a6016957d653507a12", "score": "0.4798008", "text": "def create(self, sender, user, type, content):\n notification = Notification(user=user, type=type, content=content, createTime=datetime.now(), status=NotificationStatus['unread'], sender=sender)\n return notification", "title": "" }, { "docid": "51660b0d8b2578c6e572ea309faf0d10", "score": "0.47971255", "text": "def buildMessage(self, builder_name, build_status, results, step_name):\n # TODO(maruel): Update function signature to match\n # mail.MailNotifier.buildMessage().\n if (self._last_time_mail_sent and self._last_time_mail_sent >\n time.time() - self.minimum_delay_between_alert):\n # Rate limit tree alerts.\n log.msg('Suppressing repeat email')\n return\n log.msg('About to email')\n self._last_time_mail_sent = time.time()\n\n # TODO(maruel): Use self.createEmail().\n blame_interested_users = self.shouldBlameCommitters(step_name)\n project_name = self.master_status.getTitle()\n revisions_list = build_utils.getAllRevisions(build_status)\n build_url = self.master_status.getURLForThing(build_status)\n waterfall_url = self.master_status.getBuildbotURL()\n status_text = self.status_header % {\n 'buildbotURL': waterfall_url,\n 'builder': builder_name,\n 'builderName': builder_name,\n 'buildProperties': build_status.getProperties(),\n 'buildURL': build_url,\n 'project': project_name,\n 'reason': build_status.getReason(),\n 'slavename': build_status.getSlavename(),\n 'steps': step_name,\n }\n # Use the first line as a title.\n status_title = status_text.split('\\n', 1)[0]\n blame_list = ','.join(build_status.getResponsibleUsers())\n revisions_string = ''\n latest_revision = 0\n if revisions_list:\n revisions_string = ', '.join([str(rev) for rev in revisions_list])\n latest_revision = max([rev for rev in revisions_list])\n if results[0] == FAILURE:\n result = 'failure'\n else:\n result = 'warning'\n\n # Generate a HTML table looking like the waterfall.\n # WARNING: Gmail ignores embedded CSS style. I don't know how to fix that so\n # meanwhile, I just won't embedded the CSS style.\n html_content = (\n\"\"\"<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\">\n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n<head>\n <title>%s</title>\n</head>\n<body>\n <a href=\"%s\">%s</a><p>\n %s<p>\n <a href=\"%s\">%s</a><p>\n Revision: %s<br>\n\"\"\" % (status_title, waterfall_url, waterfall_url,\n status_text.replace('\\n', \"<br>\\n\"), build_url,\n build_url, revisions_string))\n\n # Only include the blame list if relevant.\n if blame_interested_users:\n html_content += \" Blame list: %s<p>\\n\" % blame_list\n\n html_content += build_utils.EmailableBuildTable(build_status, waterfall_url)\n html_content += \"<p>\"\n # Add the change list descriptions. getChanges() returns a tuple of\n # buildbot.changes.changes.Change\n for change in build_status.getChanges():\n html_content += change.asHTML()\n html_content += \"</body>\\n</html>\"\n\n # Simpler text content for non-html aware clients.\n text_content = (\n\"\"\"%s\n\n%s\n\n%swaterfall?builder=%s\n\n--=> %s <=--\n\nRevision: %s\nBlame list: %s\n\nBuildbot waterfall: http://build.chromium.org/\n\"\"\" % (status_title,\n build_url,\n urllib.quote(waterfall_url, '/:'),\n urllib.quote(builder_name),\n status_text,\n revisions_string,\n blame_list))\n\n m = MIMEMultipart('alternative')\n # The HTML message, is best and preferred.\n m.attach(MIMEText(text_content, 'plain', 'iso-8859-1'))\n m.attach(MIMEText(html_content, 'html', 'iso-8859-1'))\n\n m['Date'] = formatdate(localtime=True)\n m['Subject'] = self.subject % {\n 'result': result,\n 'projectName': project_name,\n 'builder': builder_name,\n 'reason': build_status.getReason(),\n 'revision': str(latest_revision),\n 'buildnumber': str(build_status.getNumber()),\n 'date': str(datetime.date.today()),\n 'steps': step_name,\n 'slavename': build_status.getSlavename(),\n }\n m['From'] = self.fromaddr\n if self.reply_to:\n m['Reply-To'] = self.reply_to\n\n recipients = list(self.extraRecipients[:])\n if self.sheriffs:\n recipients.extend(BuildSheriffs.GetSheriffs(classes=self.sheriffs,\n data_dir=self.public_html))\n\n dl = []\n if self.sendToInterestedUsers and self.lookup and blame_interested_users:\n for u in build_status.getInterestedUsers():\n d = defer.maybeDeferred(self.lookup.getAddress, u)\n d.addCallback(recipients.append)\n dl.append(d)\n defered_object = defer.DeferredList(dl)\n if not self.enable_mail:\n defered_object.addCallback(self._logMail, recipients, m)\n else:\n defered_object.addCallback(self._gotRecipients, recipients, m)\n defered_object.addCallback(self.getFinishedMessage, builder_name,\n build_status, step_name)\n return defered_object", "title": "" }, { "docid": "21af2ce1cde8738952c704ff2eb7880a", "score": "0.47958565", "text": "def create_message(senderobj, message, channels=None,\n receivers=None, locks=None, header=None):\n global _Msg\n if not _Msg:\n from src.comms.models import Msg as _Msg\n if not message:\n # we don't allow empty messages.\n return\n new_message = _Msg(db_message=message)\n new_message.save()\n for sender in make_iter(senderobj):\n new_message.senders = sender\n new_message.header = header\n for channel in make_iter(channels):\n new_message.channels = channel\n for receiver in make_iter(receivers):\n new_message.receivers = receiver\n if locks:\n new_message.locks.add(locks)\n new_message.save()\n return new_message", "title": "" }, { "docid": "c8ac471aa50305f0e1c935a7b6aca7d1", "score": "0.47737974", "text": "def build_bundle(timetag, msg_addr, msg_args):\r\n\r\n if timetag < 1e6:\r\n timetag = time.time() + timetag\r\n bundle = osc_bundle_builder.OscBundleBuilder(timetag)\r\n msg = build_message(msg_addr, msg_args)\r\n bundle.add_content(msg)\r\n bundle = bundle.build()\r\n return bundle", "title": "" }, { "docid": "4dfda0a8c8092025ae86b39f2f960e09", "score": "0.47727537", "text": "def ParseTriggerFromFlags(args):\n messages = cloudbuild_util.GetMessagesModule()\n\n trigger, done = trigger_utils.ParseTriggerArgs(args, messages)\n if done:\n return trigger\n\n trigger.name = args.name\n\n # Build Config\n project = properties.VALUES.core.project.Get(required=True)\n default_image = 'gcr.io/%s/gcb-%s:$COMMIT_SHA' % (project, args.name)\n trigger_utils.ParseBuildConfigArgs(\n trigger, args, messages, default_image, need_repo=True)\n\n return trigger", "title": "" }, { "docid": "3192059edfa070ceac553083857fb83b", "score": "0.47594386", "text": "async def create_github_issue(\n ctx, *args: commands.clean_content(fix_channel_mentions=True)):\n issue = \" \".join(list(args))\n logging.info(\"dev command invocation: %s\", issue)\n answer = create_github_issue_helper(ctx, issue)\n await ctx.send(answer)", "title": "" }, { "docid": "ca97f6e6125b6ed8f7683b836cd2e228", "score": "0.47581926", "text": "def sys_notification(target, types, from_user, event_id):\n \n msg_from = UserProfile.objects.get(\n django_user=User.objects.get(username__exact='admin')) \n #admin userprofile #TODO: FUCKING HARDCODE, BUT NO BETTER WAY?!\n\n message = Message()\n message.msg_from = msg_from\n message.msg_to = target\n\n print \"prepare to send\"\n\n if (types == \"followed\"):\n message.title = str(from_user.django_user) + \" followed you.\"\n message.content = str(from_user.django_user) + \" followed you.\"\n print \"send\"\n elif(types == \"add_comment\"):\n message.title = str(from_user.django_user) + \" comments on your event.\"\n message.content = str(from_user.django_user) + \" comments on your event.\" + \"<a href='/events/\"+ event_id+\"'>link</a>\"\n elif(types == \"save_event\"):\n message.title = \"The event saved successfully. \"\n message.content = \"The event saved successfully. \" + \"<a href='/events/\"+ event_id+\"'>link</a>\"\n elif(types == \"approve_event\"):\n message.title = \"The event has been approved.\"\n message.content = \"The event has been approved. \" + \"<a href='/events/\"+ event_id+\"'>link</a>\"\n elif(types == \"approve_user\"):\n message.title = \"You have been promoted. \"\n message.content = \"You have been promoted. \"\n \n\n else:\n message.title = \"Unknown System Notification.\"\n message.content = \"Unknown System Notification.\" \n message.save()", "title": "" }, { "docid": "af13ef9b106003649f6f024c730205cc", "score": "0.47535768", "text": "def prepare_message(self, project, params, client):\n channel = params.get('channel')\n if not channel:\n raise Return((None, None))\n\n data = params.get('data', None)\n\n message = {\n 'uid': uuid.uuid4().hex,\n 'timestamp': int(time.time()),\n 'client': client,\n 'channel': channel,\n 'data': data\n }\n\n for callback in self.pre_publish_callbacks:\n try:\n message = yield callback(project[\"_id\"], message)\n except Exception as err:\n logger.exception(err)\n else:\n if message is None:\n raise Return((None, None))\n\n raise Return((message, None))", "title": "" }, { "docid": "e0b34058738778eb47f1fb126afb2ffc", "score": "0.4747345", "text": "def feedstock_args(self):\n build_args = [\"--working_directory\", self.repository]\n\n if self.channels:\n for channel in self.channels:\n build_args += [\"--channels\", channel]\n\n if self.python:\n build_args += [\"--python_versions\", self.python]\n if self.build_type:\n build_args += [\"--build_types\", self.build_type]\n if self.mpi_type:\n build_args += [\"--mpi_types\", self.mpi_type]\n if self.cudatoolkit:\n build_args += [\"--cuda_versions\", self.cudatoolkit]\n\n\n if self.recipe:\n build_args += [\"--recipes\", self.recipe]\n\n return build_args", "title": "" }, { "docid": "728ce58858e3b606edf17f78523d1aab", "score": "0.4742345", "text": "def inject_message(self, command, *args):\n self.command_queue.put((command, args))", "title": "" }, { "docid": "e99751ff91dd4c2ad312d0071e57e5a7", "score": "0.47405645", "text": "def hello(self, server, channel, nick, args):\n return 'Hello, ' + nick.split('!')[0] + '!'", "title": "" }, { "docid": "1a8c08df26d0f3107b99caeb6776063c", "score": "0.47400635", "text": "def __init__(self, change_notification, **args):\n \n self.deep_construct(TextBufferSpecifyFromNotify,\n {'change_notification':change_notification,\n 'contents': change_notification.get_text()},\n args)\n\n self.change_notification.set_change_notification_callback(\n self._on_change_translator)", "title": "" }, { "docid": "f989a169cc40c7636c88fd11b3c2f003", "score": "0.47359362", "text": "def write_message(self, *args):\n message = ''.join('<%s>' % s for s in args)\n self._telnet.write(message + '|')", "title": "" }, { "docid": "b3485af68be087fd162da5ada131718f", "score": "0.47329813", "text": "def create(self, *args, **kwargs):\r\n make_alertmessage_create(self.token, *args, **kwargs)", "title": "" }, { "docid": "d916d5639163d074d6bcaa6d38a8bb14", "score": "0.47294822", "text": "def build_notification_string(messages, daysToSearch):\n\n notificationMessage = \"There were a total of \" + str(len(messages)) + \" critical alerts in the last \" + str(daysToSearch) + \" days. \"\n\n # iterate over the messages list and append each message to the notification string\n for message in messages:\n notificationMessage += \"Alert number \" + str(messages.index(message)+1) + \". \"\n notificationMessage += message + \" \"\n\n # write the notification string to the console\n print(notificationMessage) \n\n return notificationMessage", "title": "" }, { "docid": "aa597848baef50f585b41776be111210", "score": "0.47279218", "text": "def send(self, notification: Notification):", "title": "" }, { "docid": "51fc7a73de8f6da9d76aa29041e00002", "score": "0.47194043", "text": "def rpc_message_build(code, **kwargs):\r\n data = RpcMessage.build(kwargs, code = code)\r\n\r\n return rpc_packet_t.build(Container(code = code,\r\n data = data)\r\n )", "title": "" }, { "docid": "35d643c2cd6f6677c406d29162748339", "score": "0.47176954", "text": "def get(self, arg):\n self.write(SynopticPushClient.meta.get_message_dict())", "title": "" }, { "docid": "5e90be983810ab096953356192f43b56", "score": "0.47142652", "text": "def message(self):\n return self.args[0]", "title": "" }, { "docid": "67574238ce26680c45b1a95df0a8d257", "score": "0.4707869", "text": "def create_notification_plan(params):\n params['id'] = 'np' + random_hex_generator(4)\n params['critical_state'] = None\n params['warning_state'] = None\n params['ok_state'] = None\n params['created_at'] = time.time()\n params['updated_at'] = time.time()\n params['metadata'] = None\n return params", "title": "" }, { "docid": "7e9ffeeaae92c5841398e95db0f9e20c", "score": "0.47071633", "text": "def feedstock_args(self):\n build_args = [\"--working_directory\", self.repository]\n\n if self.channels:\n for channel in self.channels:\n build_args += [\"--channels\", channel]\n\n if self.python:\n build_args += [\"--python_versions\", self.python]\n if self.build_type:\n build_args += [\"--build_types\", self.build_type]\n if self.mpi_type:\n build_args += [\"--mpi_types\", self.mpi_type]\n if self.cudatoolkit:\n build_args += [\"--cuda_versions\", self.cudatoolkit]\n if self.conda_build_configs:\n build_args += [\"--conda_build_configs\", \"\\'\" + \",\".join(self.conda_build_configs) + \"\\'\"]\n\n if self.recipe:\n build_args += [\"--recipes\", self.recipe]\n\n return build_args", "title": "" }, { "docid": "844e866d206d73afcaf00e03736b3cda", "score": "0.47070882", "text": "def build_message(self):\n\t\tself.message['direction'] = self.elevatorInfo['direction']\n\t\tself.message['currentFloor'] = self.elevatorInfo['currentFloor']\n\t\tself.message['orderQueue'] = self.elevatorInfo['orderQueue'].serialize()\n\t\ttry:\n\t\t\torder = self.newOrderQueue.get_nowait().serialize()\n\t\t\tself.message['newOrders'].append(order)\n\t\t\tTimer(1/config.HEARTBEAT_FREQUENCY*config.BROADCAST_HEARTBEATS, self.remove_order, (order, )).start()\n\t\texcept:\n\t\t\tpass\n\t\ttry:\n\t\t\tstartedorder = self.startedOrderQueue.get_nowait().serialize()\n\t\t\tself.message['startedOrders'].append(startedorder)\n\t\t\tTimer(1/config.HEARTBEAT_FREQUENCY*config.BROADCAST_HEARTBEATS, self.remove_started_order, (startedorder, )).start()\n\t\texcept:\n\t\t\tpass\n\t\treturn json.dumps(self.message)", "title": "" }, { "docid": "bee873d947dea3156185f0623dece9b5", "score": "0.4702095", "text": "def _make_notif_link(self, notif: EmailPushAction) -> str:\n if self.hs.config.email.email_riot_base_url:\n return \"%s/#/room/%s/%s\" % (\n self.hs.config.email.email_riot_base_url,\n notif.room_id,\n notif.event_id,\n )\n elif self.app_name == \"Vector\":\n # need /beta for Universal Links to work on iOS\n return \"https://vector.im/beta/#/room/%s/%s\" % (\n notif.room_id,\n notif.event_id,\n )\n else:\n return \"https://matrix.to/#/%s/%s\" % (notif.room_id, notif.event_id)", "title": "" }, { "docid": "01aec9b2d698604ebd63cc3d0dcab0d9", "score": "0.46939972", "text": "def payload(self):\n if self.deviceToken == None:\n raise APNSUndefinedDeviceToken(\"You forget to set deviceToken \"\\\n \"in your notification.\")\n\n payload = self.build()\n payloadLength = len(payload)\n tokenLength = len(self.deviceToken)\n # Below not used at the moment\n # tokenFormat = \"s\" * tokenLength\n # payloadFormat = \"s\" * payloadLength\n\n apnsPackFormat = \"!BH\" + str(tokenLength) + \"sH\" + \\\n str(payloadLength) + \"s\"\n\n # build notification message in binary format\n return struct.pack(apnsPackFormat,\n self.command,\n tokenLength,\n self.deviceToken,\n payloadLength,\n payload)", "title": "" }, { "docid": "97f359f278084ec0501bec119aa346a2", "score": "0.46931958", "text": "def build_message(template,\n patient_nhs_number='9446245796',\n message_id: str = None,\n to_party_id='YES-0000806',\n to_asid='928942012545'):\n current_utc_time = datetime.datetime.utcnow()\n timestamp = current_utc_time.strftime(TIMESTAMP_FORMAT)\n file_upload = 'test file will go here'\n dissent_override = '0'\n use_date_filter = False\n document_type = '196971000000103'\n message_id = message_id if message_id is not None else message_utilities.get_uuid()\n\n message = message_builder.MustacheMessageBuilder(template).build_message({\n UUID: message_id,\n TIMESTAMP: timestamp,\n ASID: get_asid(),\n TO_ASID: to_asid,\n PATIENT_NHS_NUMBER: patient_nhs_number,\n TO_PARTY_ID: to_party_id,\n FILE_UPLOAD: file_upload,\n DISSENT_OVERRIDE: dissent_override,\n USE_DATE_FILTER: use_date_filter,\n DOCUMENT_TYPE: document_type\n })\n\n return MhsMessage(message, message_id)", "title": "" } ]
a2c5d12cdab9305340eea3544e06c91b
Passes the input in a roundrobin manner. The roundrobin application means that each element of the input batch will be passed through a single ensemble member in a deterministic roundrobin manner, i.e. element_i > member_k where k = i % num_networks.
[ { "docid": "08a1dc2038aed43b9a94bce3889ff8c1", "score": "0.67145306", "text": "def apply_round_robin(base_apply: Callable[[networks.Params, Any], Any],\n params: networks.Params, *args, **kwargs) -> Any:\n # `num_networks` is the size of the batch dimension in `params`.\n num_networks = jax.tree_util.tree_leaves(params)[0].shape[0]\n\n # Reshape args and kwargs for the round-robin:\n args = jax.tree_map(\n functools.partial(_split_batch_dimension, num_networks), args)\n kwargs = jax.tree_map(\n functools.partial(_split_batch_dimension, num_networks), kwargs)\n # `out.shape` is `(num_networks, initial_batch_size/num_networks, ...)\n out = jax.vmap(base_apply)(params, *args, **kwargs)\n # Reshape to [initial_batch_size, <remaining dimensions>]. Using the 'F' order\n # forces the original values to the last dimension.\n return jax.tree_map(lambda x: x.reshape((-1,) + x.shape[2:], order='F'), out)", "title": "" } ]
[ { "docid": "866d75e472380b64851b9dadd81a9c84", "score": "0.5458495", "text": "def roundrobin(*iterables):\n pending = len(iterables)\n nexts = cycle(iter(it).next for it in iterables)\n while pending:\n try:\n for next in nexts:\n yield next()\n except StopIteration:\n pending -= 1\n nexts = cycle(islice(nexts, pending))", "title": "" }, { "docid": "d07515a1eaef35b289cf65aedc6949a9", "score": "0.54218984", "text": "def forward(self, input, **kwargs):\n group_size = self.group_size or input.size(0)\n assert input.size(0) >= group_size, \\\n 'Can not use a smaller batch size ' + \\\n '({}) than the specified '.format(input.size(0)) + \\\n 'group size ({}) '.format(group_size) + \\\n 'of this minibatch std layer.'\n assert input.size(0) % group_size == 0, \\\n 'Can not use a batch of a size ' + \\\n '({}) that is not '.format(input.size(0)) + \\\n 'evenly divisible by the group size ({})'.format(group_size)\n x = input\n\n # B = batch size, C = num channels\n # *s = the size dimensions (height, width for images)\n\n # BC*s -> G[B/G]C*s\n y = input.view(group_size, -1, *input.size()[1:])\n # For numerical stability when training with mixed precision\n y = y.float()\n # G[B/G]C*s\n y -= y.mean(dim=0, keepdim=True)\n # [B/G]C*s\n y = torch.mean(y ** 2, dim=0)\n # [B/G]C*s\n y = torch.sqrt(y + self.eps)\n # [B/G]\n y = torch.mean(y.view(y.size(0), -1), dim=-1)\n # [B/G]1*1\n y = y.view(-1, *[1] * (input.dim() - 1))\n # Cast back to input dtype\n y = y.to(x)\n # B1*1\n y = y.repeat(group_size, *[1] * (y.dim() - 1))\n # B1*s\n y = y.expand(y.size(0), 1, *x.size()[2:])\n # B[C+1]*s\n x = torch.cat([x, y], dim=1)\n return x", "title": "" }, { "docid": "55b289b5604421a1f1c84a0ddf408f79", "score": "0.5285405", "text": "def iterate_minibatches(inputs, targets, batchsize, shuffle=False, **augmentation_params):\n assert len(inputs) == len(targets)\n assert len(inputs) >= batchsize\n if shuffle:\n indices = np.arange(len(inputs))\n np.random.shuffle(indices)\n\n def gen(inputs, targets, batchsize, **augmentation_params):\n for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):\n if shuffle:\n excerpt = indices[start_idx:start_idx + batchsize]\n else:\n excerpt = slice(start_idx, start_idx + batchsize)\n if targets.shape < 2:\n targets = targets.reshape(-1, 1)\n X, y = data_augmentation(inputs[excerpt], **augmentation_params), targets[excerpt]\n yield X, y\n return buffered_gen_threaded(gen(inputs, targets, batchsize, **augmentation_params))", "title": "" }, { "docid": "1f906076b1b43668aadd90fe4c55be9c", "score": "0.5279125", "text": "def _batched(X_train, Y_train, batch_size):\n idx = 0\n while idx < len(X_train):\n if len(X_train) - idx < batch_size:\n yield X_train[idx:], Y_train[idx:]\n else:\n yield X_train[idx:idx+batch_size], Y_train[idx:idx+batch_size]\n idx += batch_size", "title": "" }, { "docid": "2db6daacafa38710a58153c467ad84b0", "score": "0.5236657", "text": "def round_robin_selection(pop: List[creator.Individual], n_selected: int) -> List[creator.Individual]:\n def tourn(ind1, ind2):\n if ind1.fitness.dominates(ind2.fitness):\n return ind1\n elif ind2.fitness.dominates(ind1.fitness):\n return ind2\n\n if ind1.fitness.crowding_dist < ind2.fitness.crowding_dist:\n return ind2\n elif ind1.fitness.crowding_dist > ind2.fitness.crowding_dist:\n return ind1\n\n if random.random() <= 0.5:\n return ind1\n return ind2\n\n randoms = []\n immigrants = []\n\n for ind in pop:\n if is_immigrant(ind):\n immigrants.append(ind)\n else:\n randoms.append(ind)\n\n r_size = len(randoms)\n i_size = len(immigrants)\n\n if r_size > 4 and i_size > 4:\n whole_1 = random.sample(pop, len(pop))\n whole_2 = random.sample(pop, len(pop))\n\n chosen = []\n for i in range(0, int(n_selected/2), 2):\n assert i_size == len(immigrants), \"immigrants torunament change size\"\n\n chosen.append(tourn(whole_1[i], whole_1[i + 1]))\n chosen.append(tourn(whole_2[i], whole_2[i + 1]))\n\n if i % r_size == 0 or (i+1) % r_size == 0:\n random.shuffle(randoms)\n\n if i % i_size == 0 or (i+1) % i_size == 0:\n random.shuffle(immigrants)\n\n chosen.append(tourn(randoms[i % r_size], randoms[(i+1) % r_size]))\n chosen.append(tourn(immigrants[i % i_size], immigrants[(i+1) % i_size]))\n else:\n whole_1 = random.sample(pop, len(pop))\n whole_2 = random.sample(pop, len(pop))\n\n chosen = []\n for i in range(0, n_selected, 4):\n chosen.append(tourn(whole_1[i], whole_1[i + 1]))\n chosen.append(tourn(whole_1[i + 2], whole_1[i + 3]))\n chosen.append(tourn(whole_2[i], whole_2[i + 1]))\n chosen.append(tourn(whole_2[i + 2], whole_2[i + 3]))\n\n return chosen", "title": "" }, { "docid": "9539411dfb6aa774ff5e608b5c6b1fd1", "score": "0.5145597", "text": "def RoundRobin(stats):\n k = len(stats)\n n = sum(s[0] for s in stats) # total number of pulls\n\n return n % k", "title": "" }, { "docid": "84ddbcdbe5805f68292d9b7a84f771a3", "score": "0.51338005", "text": "def forward(self, x):\n ########################################################################\n # YOUR CODE #\n ########################################################################\n #print(\"begin --- \", x.size())\n #print(\"conv1 - bn1 --- \", x.size())\n #print(\"conv1 - bn1 - maxpool(2,2) --- \", x.size())\n #print(\"conv2 - bn2 - maxpool(2,2) --- \", x.size())\n #print(\"upsampling conv_tr4 --- \", x.size())\n \n x, ind1 = self.pool1(F.relu(self.bn1(self.conv1(x))))\n #print(\"down 1 --- \", x.size())\n x, ind2 = self.pool2(F.relu(self.bn2(self.conv2(x))))\n #print(\"down 2 --- \", x.size())\n\n x, ind3 = self.pool3(F.relu(self.bn3(self.conv3(x))))\n #print(\"down 3 --- \", x.size())\n \n x = F.relu(self.bn_back3(self.conv_back3(self.unpool3(x, ind3))))\n #print(\"up 3 --- \", x.size())\n\n x = F.relu(self.bn_back2(self.conv_back2(self.unpool2(x, ind2))))\n #print(\"up 2 --- \", x.size())\n \n #x = F.relu(self.bn_back1(self.conv_back1(self.unpool1(x, ind1))))\n #print(\"up 1 --- \", x.size())\n\n \n #x = self.conv_back1(self.unpool1(x, ind1))\n #print(\" up, y -------\", y.size())\n x = self.conv_tr1(x)\n #print(x.size())\n pass\n\n ########################################################################\n # END OF YOUR CODE #\n ########################################################################\n\n return x", "title": "" }, { "docid": "a76a92156fdb35bd7abd7bce827d986a", "score": "0.5118706", "text": "def test_simple_forward_batch(adj_batch):\n nnet = GCN_simple(3, [10, 10], 2, 2, dropout=0)\n # 2 proteins with 2 aminoacids and 3 features each\n v = torch.FloatTensor(\n [[[23.0, 0.0, 2.0], [4.0, 2.0, 0.0]], [[1.0, 1.0, 24.0], [2.0, 1.0, 0.0]],]\n )\n out = nnet.forward([v, adj_batch])\n # 2 instances + 2 classes\n assert out.shape == torch.Size([2, 2])", "title": "" }, { "docid": "1e595a243eb16a03d867f38ca9c1aca5", "score": "0.511103", "text": "def round_robin_selection_v2(pop: List[creator.Individual], n_selected: int) -> List[creator.Individual]:\n def tourn(ind1, ind2):\n if ind1.fitness.dominates(ind2.fitness):\n return ind1\n elif ind2.fitness.dominates(ind1.fitness):\n return ind2\n\n if ind1.fitness.crowding_dist < ind2.fitness.crowding_dist:\n return ind2\n elif ind1.fitness.crowding_dist > ind2.fitness.crowding_dist:\n return ind1\n\n if random.random() <= 0.5:\n return ind1\n return ind2\n\n randoms = []\n immigrants = []\n\n for ind in pop:\n if is_immigrant(ind):\n immigrants.append(ind)\n else:\n randoms.append(ind)\n\n r_size = len(randoms)\n i_size = len(immigrants)\n\n whole_1 = random.sample(pop, len(pop))\n whole_2 = random.sample(pop, len(pop))\n\n chosen = []\n mixing_round = False\n for i in range(0, n_selected, 4):\n chosen.append(tourn(whole_1[i], whole_1[i + 1]))\n chosen.append(tourn(whole_1[i + 2], whole_1[i + 3]))\n\n if mixing_round:\n # add a random-immigrant\n\n ni = max(i-2, 0)\n # random + immigrant\n if ni % r_size == 0 or (ni + 1) % r_size == 0:\n random.shuffle(randoms)\n\n if ni % i_size == 0 or (ni + 1) % i_size == 0:\n random.shuffle(immigrants)\n\n chosen.append(tourn(randoms[i % r_size], randoms[(i + 1) % r_size]))\n chosen.append(tourn(immigrants[i % i_size], immigrants[(i + 1) % i_size]))\n else:\n # normal tournament\n chosen.append(tourn(whole_2[i], whole_2[i + 1]))\n chosen.append(tourn(whole_2[i + 2], whole_2[i + 3]))\n\n mixing_round = not mixing_round and (r_size > 4 and i_size > 4)\n\n return chosen", "title": "" }, { "docid": "211eca61b51562baac910d92c9eee0c9", "score": "0.5099081", "text": "def test_minmax_order(self):\n SequentialEnsemble([self.inX, self.outX, self.inX], (0,1), (0,0))", "title": "" }, { "docid": "8ad5e57a755aa0ce65501fc00495af58", "score": "0.50976694", "text": "def pool_forward(A_prev, hparameters, mode = \"max\"):\r\n \r\n # Retrieve dimensions from the input shape\r\n (m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape\r\n \r\n # Retrieve hyperparameters from \"hparameters\"\r\n f = hparameters[\"f\"]\r\n stride = hparameters[\"stride\"]\r\n \r\n # Define the dimensions of the output\r\n n_H = int(1 + (n_H_prev - f) / stride)\r\n n_W = int(1 + (n_W_prev - f) / stride)\r\n n_C = n_C_prev\r\n \r\n # Initialize output matrix A\r\n A = np.zeros((m, n_H, n_W, n_C)) \r\n \r\n ### START CODE HERE ###\r\n for i in range(m): # loop over the training examples\r\n for h in range(n_H): # loop on the vertical axis of the output volume\r\n for w in range(n_W): # loop on the horizontal axis of the output volume\r\n for c in range (n_C): # loop over the channels of the output volume\r\n \r\n # Find the corners of the current \"slice\" (≈4 lines)\r\n vert_start = h * stride\r\n vert_end = vert_start + f\r\n horiz_start = w * stride\r\n horiz_end = horiz_start + f\r\n \r\n # Use the corners to define the current slice on the ith training example of A_prev, channel c. (≈1 line)\r\n a_prev_slice = A_prev[i, vert_start:vert_end, horiz_start:horiz_end, c]\r\n \r\n # Compute the pooling operation on the slice. Use an if statment to differentiate the modes. Use np.max/np.mean.\r\n if mode == \"max\":\r\n A[i, h, w, c] = np.max(a_prev_slice)\r\n elif mode == \"average\":\r\n A[i, h, w, c] = np.mean(a_prev_slice)\r\n \r\n ### END CODE HERE ###\r\n \r\n # Store the input and hparameters in \"cache\" for pool_backward()\r\n cache = (A_prev, hparameters)\r\n \r\n # Making sure your output shape is correct\r\n assert(A.shape == (m, n_H, n_W, n_C))\r\n \r\n return A, cache", "title": "" }, { "docid": "31f2756cfeaea6ade74212efa085366f", "score": "0.5042141", "text": "def test_single_input_multi_batch(self):\n # Folder must be root to load in make_net properly\n if os.getcwd().split('\\\\')[-1] == 'tests': os.chdir('../..')\n \n # Get 'empty' GRU\n gru = get_gru_berkeley(1)\n \n # Completely zero GRU, all inputs get ignored\n result = gru(asarray([[0], [0]]))\n for aa, bb in zip(result, asarray([[0], [0]])):\n self.assertTrue(float(aa) - EPSILON <= float(bb) <= float(aa) + EPSILON)\n gru.hx = asarray([]) # GRU keeps own state, reset it\n result = gru(asarray([[1], [1]]))\n for aa, bb in zip(result, asarray([[0], [0]])):\n self.assertTrue(float(aa) - EPSILON <= float(bb) <= float(aa) + EPSILON)\n gru.hx = asarray([]) # GRU keeps own state, reset it\n \n # Modify the GRU to have weight-arrays of one\n gru.weight_hh = ones((3, 1))\n gru.weight_xh = ones((3, 1))\n \n # Load in PyTorch native GRU to compare with\n pytorch_gru = get_pytorch_gru(1, gru)\n \n # Test if they continue to obtain the same results\n for _ in range(100):\n i = random()\n a = gru(asarray([[i], [i]]))\n gru.hx = asarray([]) # GRU keeps own state, reset it\n b = pytorch_gru(FloatTensor([[i], [i]]))\n self.assertEqual(a.shape, b.shape)\n for aa, bb in zip(a, b):\n self.assertTrue(float(aa) - EPSILON <= float(bb) <= float(aa) + EPSILON)\n \n # Set bias to minus ones\n gru.bias = ones((3,)) * -1\n \n # Load in PyTorch native GRU to compare with\n pytorch_gru = get_pytorch_gru(1, gru)\n \n # Test if they continue to obtain the same results\n for _ in range(100):\n i = random()\n a = gru(asarray([[i], [i]]))\n gru.hx = asarray([]) # GRU keeps own state, reset it\n b = pytorch_gru(FloatTensor([[i], [i]]))\n self.assertEqual(a.shape, b.shape)\n for aa, bb in zip(a, b):\n self.assertTrue(float(aa) - EPSILON <= float(bb) <= float(aa) + EPSILON)", "title": "" }, { "docid": "d6d3a72676360d5e40a7bfe7cb1c1fd7", "score": "0.50327253", "text": "def test_maxoverlap_ensemble_size(self):\n SequentialEnsemble([self.inX, self.outX, self.inX], (0,0,0), (0,0,0))", "title": "" }, { "docid": "2b654f048f7cc33694dff88d697d6802", "score": "0.50142056", "text": "def paralll_worker(rank, size,\n target_function=None,\n batch=None,\n fixed_args=None,\n output_queue=None):\n for input in batch:\n result = target_function(*input, *fixed_args)\n output_queue.put((input, result))", "title": "" }, { "docid": "efde1b0de5e0615de8ca0ea44a63c5c2", "score": "0.49821076", "text": "def test_multi_input_multi_batch(self):\n # Folder must be root to load in make_net properly\n if os.getcwd().split('\\\\')[-1] == 'tests': os.chdir('../..')\n \n # Get 'empty' GRU\n gru = get_gru_berkeley(2)\n \n # Completely zero GRU, all inputs get ignored\n result = gru(asarray([[0, 0], [0, 0]]))\n for aa, bb in zip(result, asarray([[0], [0]])):\n self.assertTrue(float(aa) - EPSILON <= float(bb) <= float(aa) + EPSILON)\n gru.hx = asarray([]) # GRU keeps own state, reset it\n result = gru(asarray([[1, 1], [1, 1]]))\n for aa, bb in zip(result, asarray([[0], [0]])):\n self.assertTrue(float(aa) - EPSILON <= float(bb) <= float(aa) + EPSILON)\n gru.hx = asarray([]) # GRU keeps own state, reset it\n \n # Modify the GRU to have weight-arrays of one\n gru.weight_hh = ones((3, 1))\n gru.weight_xh = ones((3, 2))\n \n # Load in PyTorch native GRU to compare with\n pytorch_gru = get_pytorch_gru(2, gru)\n \n # Test if they continue to obtain the same results\n for _ in range(100):\n i = random()\n a = gru(asarray([[i, i], [i, i]]))\n gru.hx = asarray([]) # GRU keeps own state, reset it\n b = pytorch_gru(FloatTensor([[i, i], [i, i]]))\n self.assertEqual(a.shape, b.shape)\n for aa, bb in zip(a, b):\n self.assertTrue(float(aa) - EPSILON <= float(bb) <= float(aa) + EPSILON)\n \n # Set bias to minus ones\n gru.bias = ones((3,)) * -1\n \n # Load in PyTorch native GRU to compare with\n pytorch_gru = get_pytorch_gru(2, gru)\n \n # Test if they continue to obtain the same results\n for _ in range(100):\n i = random()\n a = gru(asarray([[i, i], [i, i]]))\n gru.hx = asarray([]) # GRU keeps own state, reset it\n b = pytorch_gru(FloatTensor([[i, i], [i, i]]))\n self.assertEqual(a.shape, b.shape)\n for aa, bb in zip(a, b):\n self.assertTrue(float(aa) - EPSILON <= float(bb) <= float(aa) + EPSILON)", "title": "" }, { "docid": "674df89e019458e36d7004b57341f7e2", "score": "0.49588466", "text": "def train(self, X, Y, iterations=100):\n for it in range(iterations):\n for idx in range(len(X)):\n x = X[idx]\n y = Y[idx]\n self.forward_propagate(x)\n self.back_propagate(y)", "title": "" }, { "docid": "dc60b90659211099da7eb83210ca15e7", "score": "0.49282935", "text": "def test_single_input_multi_batch(self):\n # Folder must be root to load in make_net properly\n if os.getcwd().split('\\\\')[-1] == 'tests': os.chdir('../..')\n \n # Get 'empty' GRU\n gru = get_gru_pytorch_copy(1)\n \n # Completely zero GRU, all inputs get ignored\n result = gru(tensor([[0], [0]], dtype=float64))\n for aa, bb in zip(result, asarray([[0], [0]])):\n self.assertTrue(float(aa) - EPSILON <= float(bb) <= float(aa) + EPSILON)\n gru.hx = None # GRU keeps own state, reset it\n result = gru(tensor([[1], [1]], dtype=float64))\n for aa, bb in zip(result, asarray([[0], [0]])):\n self.assertTrue(float(aa) - EPSILON <= float(bb) <= float(aa) + EPSILON)\n gru.hx = None # GRU keeps own state, reset it\n \n # Modify the GRU to have weight-arrays of one\n gru.weight_hh = tensor(ones((3, 1)), dtype=float64)\n gru.weight_ih = tensor(ones((3, 1)), dtype=float64)\n \n # Load in PyTorch native GRU to compare with\n pytorch_gru = get_pytorch_gru(1, gru)\n \n # Test if they continue to obtain the same results\n for _ in range(100):\n i = random()\n a = gru(tensor([[i], [i]], dtype=float64))\n gru.hx = None # GRU keeps own state, reset it\n b = pytorch_gru(FloatTensor([[i], [i]]))\n self.assertEqual(a.shape, b.shape)\n for aa, bb in zip(a, b):\n self.assertTrue(float(aa) - EPSILON <= float(bb) <= float(aa) + EPSILON)\n \n # Set bias_ih to minus ones\n gru.bias_ih = tensor(ones((3,)), dtype=float64) * -1\n \n # Load in PyTorch native GRU to compare with\n pytorch_gru = get_pytorch_gru(1, gru)\n \n # Test if they continue to obtain the same results\n for _ in range(100):\n i = random()\n a = gru(tensor([[i], [i]], dtype=float64))\n gru.hx = None # GRU keeps own state, reset it\n b = pytorch_gru(FloatTensor([[i], [i]]))\n self.assertEqual(a.shape, b.shape)\n for aa, bb in zip(a, b):\n self.assertTrue(float(aa) - EPSILON <= float(bb) <= float(aa) + EPSILON)", "title": "" }, { "docid": "7ef49d5abeff65e718c19843536648a2", "score": "0.4910226", "text": "def hybrid_forward(self, F, x, gt_box=None, gt_label=None, m_rpn_box=None):\n\n def _split(x, axis, num_outputs, squeeze_axis):\n x = F.split(\n x, axis=axis, num_outputs=num_outputs, squeeze_axis=squeeze_axis\n )\n if isinstance(x, list):\n return x\n else:\n return [x]\n\n if m_rpn_box is not None:\n manual_rpn_box = True\n else:\n manual_rpn_box = False\n feat = self.features(x)\n if not isinstance(feat, (list, tuple)):\n feat = [feat]\n\n # RPN proposals\n if autograd.is_training():\n if manual_rpn_box:\n rpn_box = m_rpn_box\n self.nms_thresh = 1\n else:\n (\n rpn_score,\n rpn_box,\n raw_rpn_score,\n raw_rpn_box,\n anchors,\n ) = self.rpn(F.zeros_like(x), *feat)\n rpn_box, samples, matches = self.sampler(\n rpn_box, rpn_score, gt_box\n )\n else:\n if manual_rpn_box:\n rpn_box = m_rpn_box\n self.nms_thresh = 1\n else:\n _, rpn_box = self.rpn(F.zeros_like(x), *feat)\n\n # create batchid for roi\n if not manual_rpn_box:\n num_roi = (\n self._num_sample\n if autograd.is_training()\n else self._rpn_test_post_nms\n )\n batch_size = self._batch_size if autograd.is_training() else 1\n else:\n num_roi = m_rpn_box.shape[1]\n batch_size = rpn_box.shape[0]\n\n with autograd.pause():\n roi_batchid = F.arange(0, batch_size)\n roi_batchid = F.repeat(roi_batchid, num_roi)\n # remove batch dim because ROIPooling require 2d input\n rpn_roi = F.concat(\n *[roi_batchid.reshape((-1, 1)), rpn_box.reshape((-1, 4))],\n dim=-1\n )\n rpn_roi = F.stop_gradient(rpn_roi)\n\n if self.num_stages > 1:\n # using FPN\n pooled_feat = self._pyramid_roi_feats(\n F,\n feat,\n rpn_roi,\n self._roi_size,\n self._strides,\n roi_mode=self._roi_mode,\n )\n else:\n # ROI features\n if self._roi_mode == \"pool\":\n pooled_feat = F.ROIPooling(\n feat[0], rpn_roi, self._roi_size, 1.0 / self._strides\n )\n elif self._roi_mode == \"align\":\n pooled_feat = F.contrib.ROIAlign(\n feat[0],\n rpn_roi,\n self._roi_size,\n 1.0 / self._strides,\n sample_ratio=2,\n )\n else:\n raise ValueError(\"Invalid roi mode: {}\".format(self._roi_mode))\n\n # RCNN prediction\n if self.top_features is not None:\n top_feat = self.top_features(pooled_feat)\n else:\n top_feat = pooled_feat\n if self.box_features is None:\n box_feat = F.contrib.AdaptiveAvgPooling2D(top_feat, output_size=1)\n else:\n box_feat = self.box_features(top_feat)\n cls_pred = self.class_predictor(box_feat)\n # cls_pred (B * N, C) -> (B, N, C)\n cls_pred = cls_pred.reshape((batch_size, num_roi, self.num_class + 1))\n if manual_rpn_box:\n spatial_feat = top_feat.mean(axis=1).reshape(\n (-4, rpn_box.shape[0], rpn_box.shape[1], -3)\n )\n cls_ids, scores = self.cls_decoder(F.softmax(cls_pred, axis=-1))\n cls_ids = cls_ids.transpose((0, 2, 1)).reshape((0, 0, 0, 1))\n scores = scores.transpose((0, 2, 1)).reshape((0, 0, 0, 1))\n cls_ids = _split(\n cls_ids, axis=0, num_outputs=batch_size, squeeze_axis=True\n )\n scores = _split(\n scores, axis=0, num_outputs=batch_size, squeeze_axis=True\n )\n return cls_ids, scores, rpn_box, spatial_feat\n\n # no need to convert bounding boxes in training, just return\n if autograd.is_training():\n (\n cls_targets,\n box_targets,\n box_masks,\n indices,\n ) = self._target_generator(\n rpn_box, samples, matches, gt_label, gt_box\n )\n box_feat = F.reshape(box_feat.expand_dims(0), (batch_size, -1, 0))\n box_pred = self.box_predictor(\n F.concat(\n *[\n F.take(\n F.slice_axis(\n box_feat, axis=0, begin=i, end=i + 1\n ).squeeze(),\n F.slice_axis(\n indices, axis=0, begin=i, end=i + 1\n ).squeeze(),\n )\n for i in range(batch_size)\n ],\n dim=0\n )\n )\n # box_pred (B * N, C * 4) -> (B, N, C, 4)\n box_pred = box_pred.reshape((batch_size, -1, self.num_class, 4))\n if self._additional_output:\n return (\n cls_pred,\n box_pred,\n rpn_box,\n samples,\n matches,\n raw_rpn_score,\n raw_rpn_box,\n anchors,\n cls_targets,\n box_targets,\n box_masks,\n top_feat,\n indices,\n )\n return (\n cls_pred,\n box_pred,\n rpn_box,\n samples,\n matches,\n raw_rpn_score,\n raw_rpn_box,\n anchors,\n cls_targets,\n box_targets,\n box_masks,\n indices,\n )\n\n box_pred = self.box_predictor(box_feat)\n # box_pred (B * N, C * 4) -> (B, N, C, 4)\n box_pred = box_pred.reshape((batch_size, num_roi, self.num_class, 4))\n # cls_ids (B, N, C), scores (B, N, C)\n cls_ids, scores = self.cls_decoder(F.softmax(cls_pred, axis=-1))\n # cls_ids, scores (B, N, C) -> (B, C, N) -> (B, C, N, 1)\n cls_ids = cls_ids.transpose((0, 2, 1)).reshape((0, 0, 0, 1))\n scores = scores.transpose((0, 2, 1)).reshape((0, 0, 0, 1))\n # box_pred (B, N, C, 4) -> (B, C, N, 4)\n box_pred = box_pred.transpose((0, 2, 1, 3))\n\n # rpn_boxes (B, N, 4) -> B * (1, N, 4)\n rpn_boxes = _split(\n rpn_box, axis=0, num_outputs=batch_size, squeeze_axis=False\n )\n # cls_ids, scores (B, C, N, 1) -> B * (C, N, 1)\n cls_ids = _split(\n cls_ids, axis=0, num_outputs=batch_size, squeeze_axis=True\n )\n scores = _split(\n scores, axis=0, num_outputs=batch_size, squeeze_axis=True\n )\n # box_preds (B, C, N, 4) -> B * (C, N, 4)\n box_preds = _split(\n box_pred, axis=0, num_outputs=batch_size, squeeze_axis=True\n )\n\n # per batch predict, nms, each class has topk outputs\n results = []\n # add feat index\n if self._additional_output:\n sizes = scores[0].shape[0:2]\n # ind = mx.nd.array(list(range(sizes[1])))\n ind = mx.nd.linspace(0, 999, 1000)\n ind = mx.nd.repeat(ind, repeats=sizes[0])\n ind = (\n ind.reshape(sizes[1], sizes[0])\n .transpose((1, 0))\n .expand_dims(axis=2)\n )\n for rpn_box, cls_id, score, box_pred in zip(\n rpn_boxes, cls_ids, scores, box_preds\n ):\n # box_pred (C, N, 4) rpn_box (1, N, 4) -> bbox (C, N, 4)\n bbox = self.box_decoder(box_pred, rpn_box)\n if self._additional_output:\n # res (C, N, 7)\n res = F.concat(*[cls_id, score, bbox, ind], dim=-1)\n else:\n # res (C, N, 6)\n res = F.concat(*[cls_id, score, bbox], dim=-1)\n if self.force_nms:\n # res (1, C*N, 6), to allow cross-catogory suppression\n res = res.reshape((1, -1, 0))\n # res (C, self.nms_topk, 6)\n res = F.contrib.box_nms(\n res,\n overlap_thresh=self.nms_thresh,\n topk=self.nms_topk,\n valid_thresh=0.001,\n id_index=0,\n score_index=1,\n coord_start=2,\n force_suppress=self.force_nms,\n )\n # res (C * self.nms_topk, 6)\n res = res.reshape((-3, 0))\n results.append(res)\n\n # result B * (C * topk, 6) -> (B, C * topk, 6)\n result = F.stack(*results, axis=0)\n ids = F.slice_axis(result, axis=-1, begin=0, end=1)\n scores = F.slice_axis(result, axis=-1, begin=1, end=2)\n bboxes = F.slice_axis(result, axis=-1, begin=2, end=6)\n if self._additional_output:\n feat_ind = F.slice_axis(result, axis=-1, begin=6, end=7)\n spatial_feat = (\n top_feat.mean(axis=1).expand_dims(0).reshape(batch_size, 0, -1)\n )\n return ids, scores, bboxes, feat, feat_ind, spatial_feat\n return ids, scores, bboxes", "title": "" }, { "docid": "b09bad00f70075c808fe9f78b86608a1", "score": "0.49029836", "text": "def broadcast_forward(self,\n seq_repr: torch.Tensor,\n adj: torch.LongTensor) -> List[torch.Tensor]:\n out = [seq_repr]\n # gcn_out = seq_repr\n B, L, E = seq_repr.size()\n D = self.gcn_dim\n for layer_idx in range(self.num_blocks): # each GCN layer\n gcn_inp = out[-1]\n act_sum = None\n for et_idx in range(self.num_unRare_edge_types): # each edge type\n in_arc = self.in_proj[layer_idx][et_idx](gcn_inp)\n # (B, L, L) * (B, L, D) --> (B, L, D)\n in_arc = torch.matmul(adj[:, et_idx, :, :].to(torch.float), in_arc)\n if self.use_drop:\n in_arc = self.dropout(in_arc)\n if self.gate:\n in_arc_gate = self.in_gate_proj[layer_idx][et_idx](gcn_inp)\n # (B, L, L) * (B, L, D) --> (B, L, D)\n in_arc_gate = torch.matmul(adj[:, et_idx, :, :].to(torch.float), in_arc_gate)\n in_arc_gate = self.sigmoid(in_arc_gate)\n in_arc = in_arc * in_arc_gate\n\n out_arc = self.out_proj[layer_idx][et_idx](gcn_inp)\n out_arc = torch.matmul(adj[:, et_idx, :, :].transpose(1, 2).to(torch.float), out_arc)\n if self.use_drop:\n out_arc = self.dropout(out_arc)\n if self.gate:\n out_arc_gate = self.out_gate_proj[layer_idx][et_idx](gcn_inp)\n out_arc_gate = torch.matmul(adj[:, et_idx, :, :].permute(0, 2, 1).to(torch.float), out_arc_gate)\n out_arc_gate = self.sigmoid(out_arc_gate)\n out_arc = out_arc * out_arc_gate\n if act_sum is None:\n act_sum = in_arc + out_arc\n else:\n act_sum = act_sum + in_arc + out_arc\n\n Rest = self.num_all_edge_types-self.num_unRare_edge_types\n if Rest > 0:\n et_idx = self.num_unRare_edge_types\n # (B, L, D)\n in_arc = self.in_proj[layer_idx][et_idx](gcn_inp)\n # (B, Rest, L, L) * (B, 1, L, D) --> (B, Rest, L, D)\n in_arc = torch.matmul(adj[:, et_idx:, :, :].to(torch.float), in_arc.unsqueeze(1))\n assert in_arc.size() == (B, Rest, L, D)\n if self.use_drop:\n in_arc = self.dropout(in_arc)\n if self.gate:\n in_arc_gate = self.in_gate_proj[layer_idx][et_idx](gcn_inp)\n # (B, Rest, L, L) * (B, 1, L, 1) --> (B, Rest, L, D)\n in_arc_gate = torch.matmul(adj[:, et_idx:, :, :].to(torch.float), in_arc_gate.unsqueeze(1))\n in_arc_gate = self.sigmoid(in_arc_gate)\n in_arc = in_arc * in_arc_gate\n # (B, Rest, L, D) --> (B, L, D)\n in_arc = in_arc.sum(dim=1)\n\n out_arc = self.out_proj[layer_idx][et_idx](gcn_inp)\n out_arc = torch.matmul(adj[:, et_idx:, :, :].permute(0, 1, 3, 2).to(torch.float), out_arc.unsqueeze(1))\n assert out_arc.size() == (B, Rest, L, D)\n if self.use_drop:\n out_arc = self.dropout(out_arc)\n if self.gate:\n out_arc_gate = self.out_gate_proj[layer_idx][et_idx](gcn_inp)\n out_arc_gate = torch.matmul(adj[:, et_idx:, :, :].permute(0, 1, 3, 2).to(torch.float), out_arc_gate.unsqueeze(1))\n assert out_arc_gate.size() == (B, Rest, L, 1)\n out_arc_gate = self.sigmoid(out_arc_gate)\n out_arc = out_arc * out_arc_gate\n assert out_arc.size() == (B, Rest, L, D)\n out_arc = out_arc.sum(dim=1)\n act_sum = act_sum + in_arc + out_arc\n\n if self.residual:\n act_sum = act_sum + gcn_inp\n gcn_out = self.activation(act_sum)\n out.append(gcn_out)\n\n return out[-1]", "title": "" }, { "docid": "48f4d6a0910879fc8faf9c2a50410886", "score": "0.48996678", "text": "def test_multi_input_single_batch(self):\n # Folder must be root to load in make_net properly\n if os.getcwd().split('\\\\')[-1] == 'tests': os.chdir('../..')\n \n # Get 'empty' GRU\n gru = get_gru_berkeley(2)\n \n # Completely zero GRU, all inputs get ignored\n self.assertEqual(gru(asarray([[0, 0]])), 0)\n gru.hx = asarray([]) # GRU keeps own state, reset it\n self.assertEqual(gru(asarray([[1, 1]])), 0)\n gru.hx = asarray([]) # GRU keeps own state, reset it\n \n # Modify the GRU to have weight-arrays of one\n gru.weight_hh = ones((3, 1))\n gru.weight_xh = ones((3, 2))\n \n # Load in PyTorch native GRU to compare with\n pytorch_gru = get_pytorch_gru(2, gru)\n \n # Test if they continue to obtain the same results\n for _ in range(100):\n i = random()\n a = gru(asarray([[i, i]]))\n gru.hx = asarray([]) # GRU keeps own state, reset it\n b = pytorch_gru(FloatTensor([[i, i]]))\n self.assertEqual(a.shape, b.shape)\n for aa, bb in zip(a, b):\n self.assertTrue(float(aa) - EPSILON <= float(bb) <= float(aa) + EPSILON)\n \n # Set bias to minus ones\n gru.bias = ones((3,)) * -1\n \n # Load in PyTorch native GRU to compare with\n pytorch_gru = get_pytorch_gru(2, gru)\n \n # Test if they continue to obtain the same results\n for _ in range(100):\n i = random()\n a = gru(asarray([[i, i]]))\n gru.hx = asarray([]) # GRU keeps own state, reset it\n b = pytorch_gru(FloatTensor([[i, i]]))\n self.assertEqual(a.shape, b.shape)\n for aa, bb in zip(a, b):\n self.assertTrue(float(aa) - EPSILON <= float(bb) <= float(aa) + EPSILON)", "title": "" }, { "docid": "7ec3a23f53d72cfb40216cc42766dc1b", "score": "0.48975766", "text": "def pool_forward(A_prev, kernel_shape, stride=(1, 1), mode='max'):\n\n # num images\n n_images = A_prev.shape[0]\n\n # input_width and input_height\n i_h = A_prev.shape[1]\n i_w = A_prev.shape[2]\n\n # images channel\n i_c = A_prev.shape[3]\n\n # kernel_width and kernel_height\n k_h = kernel_shape[0]\n k_w = kernel_shape[1]\n\n # stride_height and stride_width\n s_h = stride[0]\n s_w = stride[1]\n\n # output_height and output_width\n o_h = int((i_h - k_h) / s_h) + 1\n o_w = int((i_w - k_w) / s_w) + 1\n\n # creating outputs of size: [n_images, o_h ⊛ o_w ⊛ k_c ⊛ i_c]\n outputs = np.zeros((n_images, o_h, o_w, i_c))\n\n # vectorizing the n_images into an array (creating a new dimension)\n imgs_arr = np.arange(0, n_images)\n\n # funtion selector\n funct = np.max\n if (mode == \"avg\"):\n funct = np.average\n\n # iterating over the output array and generating the pooling\n for x in range(o_h):\n for y in range(o_w):\n x0 = x * s_h\n y0 = y * s_w\n x1 = x0 + k_h\n y1 = y0 + k_w\n outputs[imgs_arr, x, y] = funct(A_prev[imgs_arr, x0: x1, y0: y1],\n axis=(1, 2))\n\n return outputs", "title": "" }, { "docid": "1a3342451d41e7f6bc3d33528624abe4", "score": "0.4892275", "text": "def forward(self, x, print_sizes = False) -> torch.Tensor:\n\n x_conv1 = self.conv1(x)\n x_relu1 = self.relu1(x_conv1)\n x_bn = self.batch_norm1(x_relu1)\n if print_sizes: print (x_bn.shape)\n\n x_conv2 = self.batch_norm2(self.relu2(self.conv2(x_bn)))\n if print_sizes: print (x_conv2.shape)\n\n x_pool = self.pool(x_conv2)\n # print (x_pool)\n if print_sizes: print(x_pool.shape)\n\n return x_pool", "title": "" }, { "docid": "3cd4f2c210b3496a259e920f7f198c7b", "score": "0.48796692", "text": "def run(self, data):\n round_number = 0\n weights = [1./len(data) for _ in range(len(data))]\n while round_number < self.max_rounds and not self.converged:\n self.weights[round_number + 1] = weights\n round = BoostRound(self, round_number)\n round.run(data, weights)\n self.training_errors[round_number + 1] = round.training_errors\n self.testing_errors[round_number + 1] = round.testing_error\n self.training_errors_weighted[round_number + 1] = round.training_errors_weighted\n self.weight_distribution[round_number + 1] = round.weight_distribution\n self.converged = round.converged\n\n\n round_number += 1", "title": "" }, { "docid": "a1e00be783cfa2d69dfd26f66d8acbb6", "score": "0.48684937", "text": "def propagate_parallel(self):\n\n def one_run(i):\n np.random.seed() # reinitialise the random seed, otherwise the trajectories \n # become correlated\n this_traj = self.pt_trajs[i]\n this_walker = self.walkers[i]\n walker_out = this_traj.run_serial(this_walker)\n return walker_out\n\n walkers = np.asarray( sampling.apply_pool( self.pt_pool,one_run,range(len(self.pt_trajs)) ) )\n return walkers", "title": "" }, { "docid": "40feb1825539ce5e1ede601659c34b48", "score": "0.48642635", "text": "def round_robin_split(reviews):\n ten_splits = []\n for i in range(10):\n ten_splits.append([reviews[r] for r in range(len(reviews)) if r % 10 == i])\n return ten_splits", "title": "" }, { "docid": "aec571d8e93bb8ab49a187ec6215d197", "score": "0.48621538", "text": "def iterate_minibatches(inputs, targets, batchsize=100, shuffle=True):\n assert len(inputs) == len(targets)\n if shuffle:\n indices = np.arange(len(inputs))\n np.random.shuffle(indices)\n for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):\n if shuffle:\n excerpt = indices[start_idx:start_idx + batchsize]\n else:\n excerpt = slice(start_idx, start_idx + batchsize)\n yield inputs[excerpt], targets[excerpt]", "title": "" }, { "docid": "486871963f5474e8f30deaf48c3b3802", "score": "0.4856252", "text": "def evolve_networks(self):\r\n evolution_ops = []\r\n sorted_networks = self.sort_by_fitness(self.networks)\r\n winners = sorted_networks[0:self.num_top_networks_to_keep]\r\n if (len(sorted_networks) > 0 and\r\n len([n for n in self.networks if\r\n n.fitness == -sys.maxsize]) == len(self.networks)):\r\n # Reinitialize all networks, they all failed without any achievement\r\n print('Resetting all networks as they all have negative fitness!')\r\n for network in sorted_networks:\r\n evolution_ops += [network.reinitialize_network()]\r\n else:\r\n networks_to_evolve = sorted_networks[self.num_top_networks_to_keep:]\r\n # Keep num_top_networks_to_keep unchanged\r\n for i, network in enumerate(networks_to_evolve):\r\n if i == 0:\r\n # Crossover of two best networks\r\n ops = self._perform_crossover(\r\n network, winners[0], winners[1], self.evolve_bias,\r\n self.evolve_kernel)\r\n evolution_ops += ops\r\n elif i < (len(networks_to_evolve) - self.num_top_networks_to_mutate):\r\n # Crossover of random winners\r\n parentA = random.choice(winners)\r\n parentB = random.choice(winners)\r\n while parentA == parentB:\r\n parentB = random.choice(winners)\r\n ops = self._perform_crossover(\r\n network, parentA, parentB, self.evolve_bias, self.evolve_kernel)\r\n evolution_ops += ops\r\n else:\r\n # Mutate random winners: num_top_networks_to_mutate\r\n ops = self.copy_network_variables(random.choice(winners), network)\r\n evolution_ops += ops\r\n # Assure, that all assignments are run before performing mutation\r\n with tf.control_dependencies(evolution_ops):\r\n ops = self._perform_mutation(\r\n network, self.evolve_bias, self.evolve_kernel)\r\n evolution_ops += ops\r\n\r\n return evolution_ops", "title": "" }, { "docid": "f6c14754d038cbece8ffbc358b547c09", "score": "0.48509577", "text": "def test_multi_input_multi_batch(self):\n # Folder must be root to load in make_net properly\n if os.getcwd().split('\\\\')[-1] == 'tests': os.chdir('../..')\n \n # Get 'empty' GRU\n gru = get_gru_pytorch_copy(2)\n \n # Completely zero GRU, all inputs get ignored\n result = gru(tensor([[0, 0], [0, 0]], dtype=float64))\n for aa, bb in zip(result, asarray([[0], [0]])):\n self.assertTrue(float(aa) - EPSILON <= float(bb) <= float(aa) + EPSILON)\n gru.hx = None # GRU keeps own state, reset it\n result = gru(tensor([[1, 1], [1, 1]], dtype=float64))\n for aa, bb in zip(result, asarray([[0], [0]])):\n self.assertTrue(float(aa) - EPSILON <= float(bb) <= float(aa) + EPSILON)\n gru.hx = None # GRU keeps own state, reset it\n \n # Modify the GRU to have weight-arrays of one\n gru.weight_hh = tensor(ones((3, 1)), dtype=float64)\n gru.weight_ih = tensor(ones((3, 2)), dtype=float64)\n \n # Load in PyTorch native GRU to compare with\n pytorch_gru = get_pytorch_gru(2, gru)\n \n # Test if they continue to obtain the same results\n for _ in range(100):\n i = random()\n a = gru(tensor([[i, i], [i, i]], dtype=float64))\n gru.hx = None # GRU keeps own state, reset it\n b = pytorch_gru(FloatTensor([[i, i], [i, i]]))\n self.assertEqual(a.shape, b.shape)\n for aa, bb in zip(a, b):\n self.assertTrue(float(aa) - EPSILON <= float(bb) <= float(aa) + EPSILON)\n \n # Set bias_ih to minus ones\n gru.bias_ih = tensor(ones((3,)), dtype=float64) * -1\n \n # Load in PyTorch native GRU to compare with\n pytorch_gru = get_pytorch_gru(2, gru)\n \n # Test if they continue to obtain the same results\n for _ in range(100):\n i = random()\n a = gru(tensor([[i, i], [i, i]], dtype=float64))\n gru.hx = None # GRU keeps own state, reset it\n b = pytorch_gru(FloatTensor([[i, i], [i, i]]))\n self.assertEqual(a.shape, b.shape)\n for aa, bb in zip(a, b):\n self.assertTrue(float(aa) - EPSILON <= float(bb) <= float(aa) + EPSILON)", "title": "" }, { "docid": "6727eb081989bf34f413f668eb732710", "score": "0.48474112", "text": "def iterate_minibatches(inputs, targets, batchsize, shuffle=False):\n assert len(inputs) == len(targets)\n if shuffle:\n indices = numpy.arange(len(inputs))\n numpy.random.shuffle(indices)\n for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):\n if shuffle:\n excerpt = indices[start_idx:start_idx + batchsize]\n else:\n excerpt = slice(start_idx, start_idx + batchsize)\n yield inputs[excerpt], targets[excerpt]", "title": "" }, { "docid": "d7aa1617b8a88dd2912a31f2504314a8", "score": "0.48457602", "text": "def __init__(self,\n num_classes=3,\n num_input_features=[64, 128, 256],\n rpn_stride=4,\n layer_nums=[3, 3, 5, 5],\n layer_strides=[1, 2, 2, 2],\n upsample_strides=[2, 2, 2],\n num_anchor_per_loc=20,\n box_code_size=7,\n num_direction_bins=2,\n is_multi=True):\n super(EfficientRPN, self).__init__()\n\n self._num_input_features = num_input_features\n self._layer_strides = layer_strides\n self._layer_nums = layer_nums\n self._upsample_strides = upsample_strides\n self._num_anchor_per_loc = num_anchor_per_loc\n self._num_direction_bins = num_direction_bins\n self._num_classes = num_classes\n self._box_code_size = box_code_size\n self._rpn_stride = rpn_stride\n self._is_multi = is_multi\n\n blocks = []\n downblocks = []\n upblocks = []\n upaddblocks = []\n predblocks = []\n\n if self._is_multi:\n if layer_strides[0] == 1:\n block_filters = num_input_features + [num_input_features[-1]*2]\n else:\n block_filters = num_input_features + [num_input_features[-1]*2, num_input_features[-1]*4]\n else:\n block_filters = []\n for i in range(len(layer_strides)):\n if layer_strides[0] == 1:\n block_filters.append(num_input_features * (2 ** i))\n else:\n if i == 0:\n block_filters.append(num_input_features * (2 ** i))\n block_filters.append(num_input_features * (2 ** (i + 1)))\n\n for i, layer_num in enumerate(layer_nums):\n if layer_strides[0] == 1:\n if i == 0:\n downblocks.append(nn.Sequential(\n nn.ZeroPad2d(1),\n nn.Conv2d(block_filters[i], block_filters[i], 3, stride=layer_strides[i], bias=False),\n nn.BatchNorm2d(block_filters[i], eps=1e-3, momentum=0.01),\n nn.ReLU(),\n ))\n else:\n downblocks.append(nn.Sequential(\n nn.ZeroPad2d(1),\n nn.Conv2d(block_filters[i-1], block_filters[i], 3, stride=layer_strides[i], bias=False),\n nn.BatchNorm2d(block_filters[i], eps=1e-3, momentum=0.01),\n nn.ReLU(),\n ))\n\n block = self._make_layer(block_filters[i], layer_num)\n else:\n downblocks.append(nn.Sequential(\n nn.ZeroPad2d(1),\n nn.Conv2d(block_filters[i], block_filters[i+1], 3, stride=layer_strides[i], bias=False),\n nn.BatchNorm2d(block_filters[i+1], eps=1e-3, momentum=0.01),\n nn.ReLU(),\n ))\n\n block = self._make_layer(block_filters[i+1], layer_num)\n blocks.append(block)\n\n for i in range(len(upsample_strides)):\n stride = np.round(upsample_strides[i]).astype(np.int64)\n upblocks.append(nn.Sequential(\n nn.Upsample(scale_factor=stride, mode=\"bilinear\", align_corners=True),\n nn.ZeroPad2d(1),\n nn.Conv2d(block_filters[-(i+1)], block_filters[-(i+2)], 3, stride=1, bias=False),\n nn.BatchNorm2d(block_filters[-(i+2)], eps=1e-3, momentum=0.01),\n nn.ReLU(),\n ))\n upaddblocks.append(nn.Sequential(\n nn.ZeroPad2d(1),\n nn.Conv2d(block_filters[-(i+2)], block_filters[-(i+2)], 3, stride=1, bias=False),\n nn.BatchNorm2d(block_filters[-(i+2)], eps=1e-3, momentum=0.01),\n nn.ReLU(),\n ))\n if layer_strides[0] == 1:\n t = 0\n else:\n t = 1\n\n predblocks.append(nn.Sequential(\n nn.ZeroPad2d(1),\n nn.Conv2d(block_filters[t], block_filters[t], 3, stride=1, bias=False),\n nn.BatchNorm2d(block_filters[t], eps=1e-3, momentum=0.01),\n nn.ReLU(),\n ))\n\n predblocks.append(nn.Sequential(\n nn.Upsample(scale_factor=2, mode=\"bilinear\", align_corners=True),\n nn.ZeroPad2d(1),\n nn.Conv2d(block_filters[t+1], block_filters[t], 3, stride=1, bias=False),\n nn.BatchNorm2d(block_filters[t], eps=1e-3, momentum=0.01),\n nn.ReLU(),\n ))\n\n predblocks.append(nn.Sequential(\n nn.Upsample(scale_factor=4, mode=\"bilinear\", align_corners=True),\n nn.ZeroPad2d(1),\n nn.Conv2d(block_filters[t+2], block_filters[t], 3, stride=1, bias=False),\n nn.BatchNorm2d(block_filters[t], eps=1e-3, momentum=0.01),\n nn.ReLU(),\n ))\n\n self._blocks = nn.ModuleList(blocks)\n self._downblocks = nn.ModuleList(downblocks)\n self._upblocks = nn.ModuleList(upblocks)\n self._upaddblocks = nn.ModuleList(upaddblocks)\n self._predblocks = nn.ModuleList(predblocks)\n\n self._conv_cls = nn.Conv2d(block_filters[t] * 3, num_anchor_per_loc*num_classes, 1)\n self._conv_box = nn.Conv2d(block_filters[t] * 3, num_anchor_per_loc*box_code_size, 1)\n self._conv_dir = nn.Conv2d(block_filters[t] * 3, num_anchor_per_loc*num_direction_bins, 1)", "title": "" }, { "docid": "ccc8b4d3a4ee7efa864699f205255665", "score": "0.4843603", "text": "def _worker_task(self, num_subnetworks):\n\n if self._drop_remainder and self._num_workers > 1 and (num_subnetworks >\n self._num_workers):\n logging.log_first_n(\n logging.WARNING,\n \"With drop_remainer=True, %s workers and %s subnetworks, the last %s \"\n \"subnetworks will be dropped and will not be trained\", 1,\n self._num_workers, num_subnetworks,\n num_subnetworks - self._num_workers - 1)\n # The first worker will always build the ensemble so we add 1.\n return self._worker_index % (num_subnetworks + 1)", "title": "" }, { "docid": "43a1f10d2fceb622a74023d80f626aa5", "score": "0.48391882", "text": "def pool_forward(A_prev, hparameters, mode = \"max\"):\n \n # Retrieve dimensions\n (m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape\n \n # Retrieve hyperparameters\n f = hparameters[\"f\"]\n stride = hparameters[\"stride\"]\n \n # Define the dimensions of the output\n n_H = int(1 + (n_H_prev - f) / stride)\n n_W = int(1 + (n_W_prev - f) / stride)\n n_C = n_C_prev\n \n # Initialize output matrix A\n A = np.zeros((m, n_H, n_W, n_C)) \n \n \n for i in range(m): # loop over the training examples\n for h in range(n_H): # loop on the vertical axis of the output volume\n for w in range(n_W): # loop on the horizontal axis of the output volume\n for c in range (n_C): # loop over the channels of the output volume\n \n \n vert_start = h * stride\n vert_end = vert_start + f\n horiz_start = w * stride\n horiz_end = horiz_start + f\n a_prev_slice = A_prev[i, vert_start:vert_end, horiz_start:horiz_end, c]\n if mode == \"max\":\n A[i, h, w, c] = np.max(a_prev_slice)\n elif mode == \"average\":\n A[i, h, w, c] = np.mean(a_prev_slice)\n \n # Store the input and hparameters in \"cache\" for pool_backward()\n cache = (A_prev, hparameters)\n \n # Making sure your output shape is correct\n assert(A.shape == (m, n_H, n_W, n_C))\n \n return A, cache", "title": "" }, { "docid": "9d2d5618772a3de0997a670537282fd1", "score": "0.4835962", "text": "def step(self, closure=None):\n loss = None\n if closure is not None:\n loss = closure()\n\n self.balanced_iter += 1\n is_balanced = False\n\n # one group for one rram\n for group_id, group in enumerate(self.param_groups):\n weight_decay = group['weight_decay']\n momentum = group['momentum']\n dampening = group['dampening']\n nesterov = group['nesterov']\n\n is_bn_group = group['config'] == 'bn'\n\n d_group = []\n dim_group = []\n for p in group['params']:\n if p.grad is None:\n continue\n d_p = p.grad.data\n if weight_decay != 0:\n d_p.add_(weight_decay, p.data)\n if momentum != 0:\n param_state = self.state[p]\n if 'momentum_buffer' not in param_state:\n buf = param_state['momentum_buffer'] = p.data.new().resize_as_(p.data).zero_()\n buf.mul_(momentum).add_(d_p)\n else:\n buf = param_state['momentum_buffer']\n buf.mul_(momentum).add_(1 - dampening, d_p)\n if nesterov:\n d_p = d_p.add(momentum, buf)\n else:\n d_p = buf\n\n if is_bn_group:\n p.data.add_(-group['lr'], d_p)\n continue\n\n if 'accumulation_buffer' not in param_state:\n buf = param_state['accumulation_buffer'] = p.data.new().resize_as_(p.data).zero_()\n else:\n buf = param_state['accumulation_buffer']\n buf.add_(-group['lr'], d_p)\n buf = buf.view(buf.size()[0], -1)\n d_group.append(buf)\n dim_group.append(buf.size()[1])\n\n if is_bn_group:\n continue\n\n # update weight\n d_p = torch.cat(d_group, 1)\n dim = d_p.size()[1]\n importance, m_col_ids = d_p.abs().max(0) # pytorch 2.0: size [dim] ; 1.12.0: size [1 x dim]\n if OLD_PYTORCH:\n _, m_row_id = importance.squeeze().max(0)\n m_row_id = m_row_id[0]\n m_col_id = m_col_ids[0][m_row_id]\n else:\n _, m_row_id = importance.max(0)\n #add a judgement condition to decide whether to update in this iteration\n m_row_id = m_row_id.item()\n m_col_id = m_col_ids[m_row_id]\n\n dim_p_total = 0\n for p, dim_p in zip(group['params'], dim_group):\n if m_row_id < (dim_p_total + dim_p):\n m_row_id_p = m_row_id - dim_p_total\n if dim <= self.small_rram_size and self.is_small_by_pos:\n # small rram write in position\n delta = -group['lr'] * d_p[m_col_id, m_row_id]\n # update the weight if the gradient has been accumulated to > self.fixed_step\n if abs(delta) > self.fixed_step:\n update_step = (delta / self.fixed_step).int().float() * self.fixed_step\n p.data.view(p.data.size()[0], -1)[m_col_id, m_row_id_p] += update_step\n self.state[p]['accumulation_buffer'].view(p.data.size()[0], -1)[m_col_id, m_row_id_p] = (delta - update_step) / -group['lr']\n self.frequency_list[group_id][self.row_map_m_to_r_list[group_id][m_row_id], m_col_id] += 1\n self.row_frequency_list[group_id][self.row_map_m_to_r_list[group_id][m_row_id]] += 1\n else:\n # large rram write in row\n #d_p_t = d_p.narrow(1, m_row_id, 1) # size of [n * 1]\n #index = d_p_t.abs().gt(self.fixed_step / group['lr']).squeeze().nonzero().squeeze()\n #if index.numel() > 0:\n #delta = -group['lr'] * d_p.index_select(0, index)\n #delta = d_p.narrow(1, m_row_id, 1)#-group['lr'] * d_p.narrow(1, m_row_id, 1)\n #update_step = delta #(delta / self.fixed_step).int().float() * self.fixed_step\n p.data.view(p.data.size()[0], -1).narrow(1, m_row_id_p, 1).add_(-group['lr'], d_p.narrow(1, m_row_id, 1))#update_step)\n self.state[p]['accumulation_buffer'].view(p.data.size()[0], -1).narrow(1, m_row_id_p, 1).zero_() \n #self.state[p]['accumulation_buffer'].view(p.data.size()[0], -1).narrow(1, m_row_id_p, 1).add_((delta - update_step) / -group['lr'])\n self.frequency_list[group_id][self.row_map_m_to_r_list[group_id][m_row_id], :] += 1\n self.row_frequency_list[group_id][self.row_map_m_to_r_list[group_id][m_row_id]] += 1\n break\n dim_p_total += dim_p\n\n # balanced swap\n if self.balanced_iter % self.balanced_freq[group['config']] == 0:\n is_balanced = True\n _, lru = self.row_frequency_list[group_id].sort(0)\n # lru = sorted(range(dim), key=lambda i: self.frequency_list[group_id][i])\n balanced_row_num = self.balanced_row_num_list[group_id]\n mru_r_row_ids = lru[-balanced_row_num:].tolist()\n lru_r_row_ids = lru[:balanced_row_num].tolist()\n for lru_r_row_id, mru_r_row_id in zip(lru_r_row_ids, reversed(mru_r_row_ids)):\n mru_m_row_id = self.row_map_r_to_m_list[group_id][mru_r_row_id]\n lru_m_row_id = self.row_map_r_to_m_list[group_id][lru_r_row_id]\n self.row_map_m_to_r_list[group_id][mru_m_row_id] = lru_r_row_id\n self.row_map_m_to_r_list[group_id][lru_m_row_id] = mru_r_row_id\n self.row_map_r_to_m_list[group_id][mru_r_row_id] = lru_m_row_id\n self.row_map_r_to_m_list[group_id][lru_r_row_id] = mru_m_row_id\n self.frequency_list[group_id][lru_r_row_id, :].add_(1)\n self.frequency_list[group_id][mru_r_row_id, :].add_(1)\n self.row_frequency_list[group_id][lru_r_row_id] += 1\n self.row_frequency_list[group_id][mru_r_row_id] += 1\n if is_balanced:\n print('='*89)\n print('Swap Rows')\n print('='*89)\n\n return loss", "title": "" }, { "docid": "a7d30932fdb90d5c9def5e38517f1b68", "score": "0.4829011", "text": "def _process_shard(args):\n # type: ((str, int, int, int, bool)) -> (np.ndarray, np.ndarray, np.ndarray)\n shard_path, num_items, num_neg, seed, is_training, match_mlperf = args\n np.random.seed(seed)\n\n # The choice to store the training shards in files rather than in memory\n # is motivated by the fact that multiprocessing serializes arguments,\n # transmits them to map workers, and then deserializes them. By storing the\n # training shards in files, the serialization work only needs to be done once.\n #\n # A similar effect could be achieved by simply holding pickled bytes in\n # memory, however the processing is not I/O bound and is therefore\n # unnecessary.\n with tf.gfile.Open(shard_path, \"rb\") as f:\n shard = pickle.load(f)\n\n users = shard[rconst.TRAIN_KEY][movielens.USER_COLUMN]\n items = shard[rconst.TRAIN_KEY][movielens.ITEM_COLUMN]\n\n if not is_training:\n # For eval, there is one positive which was held out from the training set.\n test_positive_dict = dict(zip(\n shard[rconst.EVAL_KEY][movielens.USER_COLUMN],\n shard[rconst.EVAL_KEY][movielens.ITEM_COLUMN]))\n\n delta = users[1:] - users[:-1]\n boundaries = ([0] + (np.argwhere(delta)[:, 0] + 1).tolist() +\n [users.shape[0]])\n\n user_blocks = []\n item_blocks = []\n label_blocks = []\n for i in range(len(boundaries) - 1):\n assert len(set(users[boundaries[i]:boundaries[i+1]])) == 1\n current_user = users[boundaries[i]]\n\n positive_items = items[boundaries[i]:boundaries[i+1]]\n positive_set = set(positive_items)\n if positive_items.shape[0] != len(positive_set):\n raise ValueError(\"Duplicate entries detected.\")\n\n if is_training:\n n_pos = len(positive_set)\n negatives = stat_utils.sample_with_exclusion(\n num_items, positive_set, n_pos * num_neg, replacement=True)\n\n else:\n if not match_mlperf:\n # The mlperf reference allows the holdout item to appear as a negative.\n # Including it in the positive set makes the eval more stringent,\n # because an appearance of the test item would be removed by\n # deduplication rules. (Effectively resulting in a minute reduction of\n # NUM_EVAL_NEGATIVES)\n positive_set.add(test_positive_dict[current_user])\n\n negatives = stat_utils.sample_with_exclusion(\n num_items, positive_set, num_neg, replacement=match_mlperf)\n positive_set = [test_positive_dict[current_user]]\n n_pos = len(positive_set)\n assert n_pos == 1\n\n user_blocks.append(current_user * np.ones(\n (n_pos * (1 + num_neg),), dtype=np.int32))\n item_blocks.append(\n np.array(list(positive_set) + negatives, dtype=np.uint16))\n labels_for_user = np.zeros((n_pos * (1 + num_neg),), dtype=np.int8)\n labels_for_user[:n_pos] = 1\n label_blocks.append(labels_for_user)\n\n users_out = np.concatenate(user_blocks)\n items_out = np.concatenate(item_blocks)\n labels_out = np.concatenate(label_blocks)\n\n assert users_out.shape == items_out.shape == labels_out.shape\n return users_out, items_out, labels_out", "title": "" }, { "docid": "0199107f611ecdca644f9d3096ba24b8", "score": "0.48227844", "text": "def setBatchSize(self, _batch_size = 0): # this formally assigns the mode and index\n self.batch_size = int(_batch_size) if isint(_batch_size) else len(_batch_size)\n if not(self.batch_size): return\n\n # Pooling mode defaults to ranger\n \n self.mode = self.pool_mode\n\n if self.pool_mode == POOLMODE_DEFAULT:\n if np.all(self.stride == self.window):\n self.mode = POOLMODE_AXES\n else:\n self.mode = POOLMODE_RANGER\n\n # Whatever mode, we need self.stride for the back-propagation (note this differs from ConvLayer())\n if self.mode == POOLMODE_TEST1:\n self.strides = strider(np.hstack((self.batch_size, self.input_maps, self.input_dims)),\n np.hstack((self.window)), \n np.hstack((self.stride)))\n\n strides_num = self.strides.shape[-2] # number of strides per map\n\n inner_ind = np.tile(np.arange(strides_num).reshape(1, 1, strides_num), (self.batch_size, self.maps, 1))\n middl_ind = np.tile(np.arange(self.maps).reshape(1, self.maps, 1), (self.batch_size, 1, strides_num))\n outer_ind = np.tile(np.arange(self.batch_size).reshape(self.batch_size, 1, 1), (1, self.maps, strides_num))\n self.ind_pooled = [outer_ind, middl_ind, inner_ind, None]\n\n else:\n self.strides = strider(np.hstack((self.input_maps, self.input_dims)),\n np.hstack((self.window)), \n np.hstack((self.stride)))\n\n strides_num = self.strides.shape[-2] # number of strides per map\n\n inner_ind = np.tile(np.arange(strides_num).reshape(1, strides_num), (self.maps, 1))\n outer_ind = np.tile(np.arange(self.maps).reshape(self.maps, 1), (1, strides_num))\n self.ind_pooled = [outer_ind, inner_ind, None]\n\n self.scores = np.empty(np.hstack((self.batch_size, self.maps, self.dims)), dtype = float)\n self.arg_pooled = np.empty(np.hstack((self.batch_size, self.maps, strides_num)), dtype = int)\n self.back_pool = np.empty(np.hstack((self.batch_size, self.maps, strides_num, self.Window)), dtype = float)\n self.back_data = np.empty(np.hstack((self.batch_size, self.input_maps, self.input_dims)), dtype = float)", "title": "" }, { "docid": "f893d830e23c6c085509be8f8df0da9f", "score": "0.48193127", "text": "def batch_test(params, inputs, targets, variable):\n\n # Values: 0.1 to 1.0\n values = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0]\n epochs = []\n\n # Set custom re-runs\n if variable == 1: # learning\n values = []\n if variable == 2: # momentum\n values = []\n if variable == 3: # weights\n values = []\n\n for v in values:\n run_epochs = []\n\n for i in xrange(30):\n bpnn = BackPropagationNeuralNetwork(params, inputs, targets)\n if variable == 1: # learning rate\n bpnn.LEARNING_CONSTANT = v\n print 'Testing learning rates...', i\n elif variable == 2: # momentum\n bpnn.MOMENTUM_CONSTANT = v\n print 'Testing momentum constants...', i\n elif variable == 3: # initial weight range\n bpnn.INITIAL_WEIGHT_RANGE = v\n print 'Testing initial weight ranges...', i\n else:\n print 'Failure. Variable value must be 1, 2, or 3.'\n return []\n\n s = bpnn.train()\n while not s:\n s = bpnn.train\n\n run_epochs.append(bpnn.num_epochs)\n print 'num epochs', bpnn.num_epochs\n\n epochs.append(run_epochs)\n\n print 'Settings:'\n print 'Learning Constant:', bpnn.LEARNING_CONSTANT\n print 'Momentum Constant:', bpnn.MOMENTUM_CONSTANT\n print 'Initial Weight range:', bpnn.INITIAL_WEIGHT_RANGE\n print ''\n\n return epochs", "title": "" }, { "docid": "1cf4768e1b84085bee004351b541488b", "score": "0.48067772", "text": "def test_single_input_single_batch(self):\n # Folder must be root to load in make_net properly\n if os.getcwd().split('\\\\')[-1] == 'tests': os.chdir('../..')\n \n # Get 'empty' GRU\n gru = get_gru_berkeley(1)\n \n # Completely zero GRU, all inputs get ignored\n self.assertEqual(gru(asarray([[0]])), 0)\n gru.hx = asarray([]) # GRU keeps own state, reset it\n self.assertEqual(gru(asarray([[1]])), 0)\n gru.hx = asarray([]) # GRU keeps own state, reset it\n \n # Modify the GRU to have weight-arrays of one\n gru.weight_hh = asarray(ones((3, 1)))\n gru.weight_xh = asarray(ones((3, 1)))\n \n # Load in PyTorch native GRU to compare with\n pytorch_gru = get_pytorch_gru(1, gru)\n \n # Test if they continue to obtain the same results\n for _ in range(100):\n i = random()\n a = gru(asarray([[i]]))\n gru.hx = asarray([]) # GRU keeps own state, reset it\n b = pytorch_gru(FloatTensor([[i]]))\n self.assertEqual(a.shape, b.shape)\n self.assertTrue(float(a) - EPSILON <= float(b) <= float(a) + EPSILON)\n \n # Set bias to minus ones\n gru.bias = ones((3,)) * -1\n \n # Load in PyTorch native GRU to compare with\n pytorch_gru = get_pytorch_gru(1, gru)\n \n # Test if they continue to obtain the same results\n for _ in range(100):\n i = random()\n a = gru(asarray([[i]]))\n gru.hx = asarray([]) # GRU keeps own state, reset it\n b = pytorch_gru(FloatTensor([[i]]))\n self.assertEqual(a.shape, b.shape)\n self.assertTrue(float(a) - EPSILON <= float(b) <= float(a) + EPSILON)", "title": "" }, { "docid": "7ead468c0951b7f96028022720ccd856", "score": "0.48038852", "text": "def test_multi_input_single_batch(self):\n # Folder must be root to load in make_net properly\n if os.getcwd().split('\\\\')[-1] == 'tests': os.chdir('../..')\n \n # Get 'empty' GRU\n gru = get_gru_pytorch_copy(2)\n \n # Completely zero GRU, all inputs get ignored\n self.assertEqual(gru(tensor([[0, 0]], dtype=float64)), 0)\n gru.hx = None # GRU keeps own state, reset it\n self.assertEqual(gru(tensor([[1, 1]], dtype=float64)), 0)\n gru.hx = None # GRU keeps own state, reset it\n \n # Modify the GRU to have weight-arrays of one\n gru.weight_hh = tensor(ones((3, 1)), dtype=float64)\n gru.weight_ih = tensor(ones((3, 2)), dtype=float64)\n \n # Load in PyTorch native GRU to compare with\n pytorch_gru = get_pytorch_gru(2, gru)\n \n # Test if they continue to obtain the same results\n for _ in range(100):\n i = random()\n a = gru(tensor([[i, i]], dtype=float64))\n gru.hx = None # GRU keeps own state, reset it\n b = pytorch_gru(FloatTensor([[i, i]]))\n self.assertEqual(a.shape, b.shape)\n for aa, bb in zip(a, b):\n self.assertTrue(float(aa) - EPSILON <= float(bb) <= float(aa) + EPSILON)\n \n # Set bias_ih to minus ones\n gru.bias_ih = tensor(ones((3,)), dtype=float64) * -1\n \n # Load in PyTorch native GRU to compare with\n pytorch_gru = get_pytorch_gru(2, gru)\n \n # Test if they continue to obtain the same results\n for _ in range(100):\n i = random()\n a = gru(tensor([[i, i]], dtype=float64))\n gru.hx = None # GRU keeps own state, reset it\n b = pytorch_gru(FloatTensor([[i, i]]))\n self.assertEqual(a.shape, b.shape)\n for aa, bb in zip(a, b):\n self.assertTrue(float(aa) - EPSILON <= float(bb) <= float(aa) + EPSILON)", "title": "" }, { "docid": "9a0a167ae14ed397d6905c89cf46f2e8", "score": "0.47997963", "text": "def forward(self, input):\n\n # take the positive (object) scores\n scores = input[0][:, self._num_anchors:, :, :] # (batch, 9, H, W)\n bbox_deltas = input[1] # (batch, 36, H, W)\n im_info = input[2] # (batch, 2)\n cfg_key = input[3] # 'train/test'\n\n pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N # 6000 for train\n post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N # 300 for test\n nms_thresh = cfg[cfg_key].RPN_NMS_THRESH # 0.7\n min_size = cfg[cfg_key].RPN_MIN_SIZE # 16\n batch_size = bbox_deltas.size(0) # batch\n\n # compute the shift value for H*W cells\n feat_height, feat_width = scores.size(2), scores.size(3) # H, W\n shift_x = np.arange(0, feat_width) * self._feat_stride\n shift_y = np.arange(0, feat_height) * self._feat_stride\n shift_x, shift_y = np.meshgrid(shift_x, shift_y)\n shifts = torch.from_numpy(np.vstack((shift_x.ravel(), shift_y.ravel(),\n shift_x.ravel(), shift_y.ravel())).transpose())\n shifts = shifts.contiguous().type_as(scores).float() # (H*W, 4)\n\n # copy and shift the 9 anchors for H*W cells\n # copy the H*W*9 anchors for batch images\n A = self._num_anchors # 9\n K = shifts.size(0) # H * W\n self._anchors = self._anchors.type_as(scores)\n anchors = self._anchors.view(1, A, 4) + shifts.view(K, 1, 4) # (H*W, 9, 4) anchors for 1 image\n anchors = anchors.view(1, K*A, 4).expand(batch_size, K*A, 4) # (batch, H*W*9, 4) anchors for batch images\n\n # make bbox_deltas the same order with the anchors:\n bbox_deltas = bbox_deltas.permute(0, 2, 3, 1).contiguous() # (batch, 36, H, W) --> (batch, H, W, 36)\n bbox_deltas = bbox_deltas.view(batch_size, -1, 4) # (batch, H, W, 36) --> (batch, H*W*9, 4)\n\n # Same story for the scores:\n scores = scores.permute(0, 2, 3, 1).contiguous() # (batch, 9, H, W) --> (batch, H, W, 9)\n scores = scores.view(batch_size, -1) # (batch, H, W, 9) --> (batch, H*W*9)\n\n # Finetune [x1, y1, x2, y2] of anchors according to the predicted bbox_delta\n proposals = bbox_transform_inv(anchors, bbox_deltas, batch_size) # (batch, H*W*9, 4)\n\n # 2. clip predicted boxes to the image, make sure [x1, y1, x2, y2] are within the image [h, w]\n proposals = clip_boxes(proposals, im_info, batch_size) # (batch, H*W*9, 4)\n scores_keep = scores\n proposals_keep = proposals\n\n # 3. remove predicted bboxes whose height or width < threshold\n # (NOTE: convert min_size to input image scale stored in im_info[2])\n # 4. sort all (proposal, score) pairs by score from highest to lowest\n _, order = torch.sort(scores_keep, 1, True) # high score to low score\n\n # initialise the proposals by zero tensor\n output = scores.new(batch_size, post_nms_topN, 5).zero_()\n\n # for each image\n for i in range(batch_size):\n proposals_single = proposals_keep[i]\n scores_single = scores_keep[i]\n order_single = order[i]\n\n # 5. take top pre_nms_topN proposals before NMS (e.g. 6000)\n if pre_nms_topN > 0 and pre_nms_topN < scores_keep.numel():\n order_single = order_single[:pre_nms_topN]\n proposals_single = proposals_single[order_single, :]\n scores_single = scores_single[order_single].view(-1,1)\n\n # 6. apply NMS (e.g. threshold = 0.7)\n keep_idx_i = nms(proposals_single, scores_single.squeeze(1), nms_thresh)\n keep_idx_i = keep_idx_i.long().view(-1)\n\n # 7. take after_nms_topN proposals after NMS (e.g. 300 for test, 2000 for train)\n if post_nms_topN > 0:\n keep_idx_i = keep_idx_i[:post_nms_topN]\n\n # 8. return the top proposals (-> RoIs top)\n proposals_single = proposals_single[keep_idx_i, :]\n scores_single = scores_single[keep_idx_i, :]\n\n # 9. padding 0 at the end.\n num_proposal = proposals_single.size(0)\n output[i,:,0] = i\n output[i,:num_proposal,1:] = proposals_single\n\n return output # (batch, 2000, 5) 2000 training proposals, each row is [batch_ind, x1, y1, x2, y2]", "title": "" }, { "docid": "a28a35716343554c22f7fd682a037f71", "score": "0.47991076", "text": "def _perform_crossover(self, crossover_network, network1, network2, bias,\r\n kernel):\r\n crossover_ops1 = []\r\n crossover_ops2 = []\r\n if bias:\r\n bias_scope1 = network1.trainable_biases()\r\n bias_scope2 = network2.trainable_biases()\r\n bias_crossover = crossover_network.trainable_biases()\r\n assert len(bias_scope1) == len(bias_scope2), \\\r\n \"Number of bias variables was {} for network1 and {} for network2 \" \\\r\n \"but has to be the same for both networks\".format(\r\n len(bias_scope1), len(bias_scope2))\r\n assert len(bias_scope1) == len(bias_crossover), \\\r\n \"Number of bias variables was {} for network1+2 and {} for \" \\\r\n \"crossover network but has to be the same for both networks\".format(\r\n len(bias_scope1), len(bias_crossover))\r\n crossover_point = random.randint(0, len(bias_scope1) - 1)\r\n for i in range(len(bias_crossover)):\r\n if i < crossover_point:\r\n crossover_ops1.append(\r\n tf.assign(bias_crossover[i], bias_scope1[i]))\r\n crossover_ops2.append(\r\n tf.assign(bias_crossover[i], bias_scope2[i]))\r\n else:\r\n crossover_ops1.append(\r\n tf.assign(bias_crossover[i], bias_scope2[i]))\r\n crossover_ops2.append(\r\n tf.assign(bias_crossover[i], bias_scope1[i]))\r\n if kernel:\r\n kernel_scope1 = network1.trainable_kernel()\r\n kernel_scope2 = network2.trainable_kernel()\r\n kernel_crossover = crossover_network.trainable_kernel()\r\n assert len(kernel_scope1) == len(kernel_scope2), \\\r\n \"Number of kernel variables was {} for network1 and {} for \" \\\r\n \"network2 but has to be the same for both networks\".format(\r\n len(kernel_scope1), len(kernel_scope2))\r\n assert len(kernel_scope1) == len(kernel_crossover), \\\r\n \"Number of kernel variables was {} for network1+2 and {} for \" \\\r\n \"crossover network but has to be the same for both networks\".format(\r\n len(kernel_scope1), len(kernel_crossover))\r\n crossover_point = random.randint(0, len(kernel_scope1) - 1)\r\n for i in range(len(kernel_crossover)):\r\n if i < crossover_point:\r\n crossover_ops1.append(\r\n tf.assign(kernel_crossover[i], kernel_scope1[i]))\r\n crossover_ops2.append(\r\n tf.assign(kernel_crossover[i], kernel_scope2[i]))\r\n else:\r\n crossover_ops1.append(\r\n tf.assign(kernel_crossover[i], kernel_scope2[i]))\r\n crossover_ops2.append(\r\n tf.assign(kernel_crossover[i], kernel_scope1[i]))\r\n\r\n # Select either A|B or B|A where | indicates the crossover point\r\n crossover_ops = self.choose_random(crossover_ops1, crossover_ops2)\r\n return crossover_ops", "title": "" }, { "docid": "4e4c3b93bdb5290b828322e0f41f35b9", "score": "0.47942954", "text": "def sample_relation_groundtruth(relation_conn, precompute_bbox, topN_boxes_ids, gt_boxes, is_training):\n # ipdb.set_trace()\n ## construct the global connection map\n num_phrases, topN = topN_boxes_ids.shape\n conn_map = np.zeros((num_phrases * topN, num_phrases * topN)) - 1\n\n ## todo\n ## we can further consider inner relation and sym relation\n for rel_id, rel in enumerate(relation_conn):\n conn_map[rel[0] * topN:(rel[0] + 1) * topN, rel[1] * topN:(rel[1] + 1) * topN] = rel_id\n\n conn_phrtnsbj, conn_phrtnobj = np.where(conn_map>=0)\n\n conn_phrtnsbj_1 = conn_phrtnsbj // topN\n conn_phrtnobj_1 = conn_phrtnobj // topN\n\n conn_phrtnobj_select = np.tile(np.arange(topN), int(conn_phrtnobj.shape[0] / topN))\n conn_phrtnsbj_bbox_id = topN_boxes_ids[conn_phrtnsbj_1, conn_phrtnsbj % topN]\n conn_phrtnobj_bbox_id = topN_boxes_ids[conn_phrtnobj_1, conn_phrtnobj_select]\n\n ## prepare the gt_boxes\n gt_boxes_phrtnsbj = gt_boxes[conn_phrtnsbj_1]\n gt_boxes_phrtnobj = gt_boxes[conn_phrtnobj_1]\n\n precompute_bbox_phrtnsbj = precompute_bbox[conn_phrtnsbj_bbox_id.astype(np.int32)]\n precompute_bbox_phrtnobj = precompute_bbox[conn_phrtnobj_bbox_id.astype(np.int32)]\n\n iou_phrtnsbj, inter_sbj, union_sbj = boxlist_iou_unified(precompute_bbox_phrtnsbj, gt_boxes_phrtnsbj) ## M\n iou_phrtnobj, inter_obj, union_obj = boxlist_iou_unified(precompute_bbox_phrtnobj, gt_boxes_phrtnobj) ## M\n\n ## select -3 here. for simply to calculate the iou\n iou_phrtnsbj_indicator = -3 * np.ones_like(iou_phrtnsbj)\n iou_phrtnobj_indicator = -3 * np.ones_like(iou_phrtnobj)\n\n ## -1 ignore, 0 bg, 1 fg.\n iou_phrtnsbj_indicator[iou_phrtnsbj >= cfg.MODEL.VG.RELATION_FG] = 1\n iou_phrtnsbj_indicator[iou_phrtnsbj < cfg.MODEL.VG.RELATION_BG] = 0\n\n iou_phrtnobj_indicator[iou_phrtnobj >= cfg.MODEL.VG.RELATION_FG] = 1\n iou_phrtnobj_indicator[iou_phrtnobj < cfg.MODEL.VG.RELATION_BG] = 0\n\n ## relation indicator_fusion, -1 ignore, 0, bg(0,0), 1,bg(0,1), 2,fg(1,1)\n relation_iou_indicator_fusion = iou_phrtnsbj_indicator + iou_phrtnobj_indicator\n\n relation_iou_indicator = -1 * np.ones_like(relation_iou_indicator_fusion)\n conn_map_select = conn_map[conn_phrtnsbj, conn_phrtnobj]\n\n\n if not is_training:\n relation_iou_indicator = relation_iou_indicator * 0\n\n if cfg.MODEL.RELATION.REL_PAIR_IOU:\n relation_iou_indicator = (inter_sbj+inter_obj)/(union_sbj+union_obj+1e-8)\n relation_iou_indicator = np.where(relation_iou_indicator>0.5, relation_iou_indicator, 0)\n else:\n relation_iou_indicator[np.where(relation_iou_indicator_fusion == 2)[0]] = 1\n\n relation_iou_indicator = relation_iou_indicator.astype(np.float32)\n\n else:\n\n relation_inds_pos = []\n relation_inds_neg = []\n\n for rel_id in range(len(relation_conn)):\n\n relation_iou_indicator_rel = relation_iou_indicator_fusion.copy()\n relation_iou_indicator_rel[conn_map_select!=rel_id] = -3\n\n relation_iou_indicator_pos = np.where(relation_iou_indicator_rel == 2)[0]\n relation_iou_indicator_neg1 = np.where(relation_iou_indicator_rel == 1)[0]\n relation_iou_indicator_neg2 = np.where(relation_iou_indicator_rel == 0)[0]\n\n num_pos = relation_iou_indicator_pos.shape[0]\n num_neg1 = relation_iou_indicator_neg1.shape[0]\n num_neg2 = relation_iou_indicator_neg2.shape[0]\n\n if num_pos >= 50:\n\n np.random.shuffle(relation_iou_indicator_pos)\n relation_iou_indicator_pos = relation_iou_indicator_pos[:(num_neg1+num_neg2)]\n relation_inds_pos.append(relation_iou_indicator_pos)\n relation_inds_neg.append(relation_iou_indicator_neg1)\n relation_inds_neg.append(relation_iou_indicator_neg2)\n\n elif num_pos >= 1:\n\n np.random.shuffle(relation_iou_indicator_neg1)\n np.random.shuffle(relation_iou_indicator_neg2)\n select_num_neg = num_pos\n\n if num_neg1 >= select_num_neg and num_neg2 >= select_num_neg:\n relation_inds_pos.append(relation_iou_indicator_pos)\n relation_inds_neg.append(relation_iou_indicator_neg1[:select_num_neg])\n relation_inds_neg.append(relation_iou_indicator_neg2[:select_num_neg])\n\n elif num_neg1 >= select_num_neg and num_neg2 <= select_num_neg:\n relation_inds_pos.append(relation_iou_indicator_pos)\n relation_inds_neg.append(relation_iou_indicator_neg1[:(2*num_pos-num_neg2)])\n relation_inds_neg.append(relation_iou_indicator_neg2)\n\n elif num_neg1 <= select_num_neg and num_neg2 >= select_num_neg:\n relation_inds_pos.append(relation_iou_indicator_pos)\n relation_inds_neg.append(relation_iou_indicator_neg1)\n relation_inds_neg.append(relation_iou_indicator_neg2[:(2*num_pos - num_neg1)])\n else:\n np.random.shuffle(relation_iou_indicator_pos)\n relation_inds_pos.append(relation_iou_indicator_pos[:(num_neg1+num_neg2)])\n relation_inds_neg.append(relation_iou_indicator_neg1)\n relation_inds_neg.append(relation_iou_indicator_neg2)\n\n elif num_pos == 0:\n\n num_neg1_min = min(num_neg1, 5)\n num_neg2_min = min(num_neg2, 5)\n\n np.random.shuffle(relation_iou_indicator_neg1)\n np.random.shuffle(relation_iou_indicator_neg2)\n relation_inds_neg.append(relation_iou_indicator_neg1[:num_neg1_min])\n relation_inds_neg.append(relation_iou_indicator_neg2[:num_neg2_min])\n\n ## avoid the concatnate the empty array\n relation_inds_pos.append(np.array([1]))\n relation_inds_pos.append(np.array([1]))\n relation_inds_pos = np.concatenate(tuple(relation_inds_pos), 0)\n relation_inds_neg = np.concatenate(tuple(relation_inds_neg), 0)\n\n relation_iou_indicator[relation_inds_pos[:-1]] = 1\n relation_iou_indicator[relation_inds_neg[:-1]] = 0\n\n return relation_iou_indicator, conn_map_select, conn_phrtnsbj, conn_phrtnobj, conn_map", "title": "" }, { "docid": "dfa7febb309da4bef92b6ed9e55958d8", "score": "0.47912672", "text": "def forward(self, x):\n p = self.size // 2\n if (x.shape[2] - 1) // self.stride != (x.shape[2] + 2 * p - self.size) // self.stride:\n padding1 = (self.size - 1) // 2\n padding2 = padding1 + 1\n else:\n padding1 = (self.size - 1) // 2\n padding2 = padding1\n if (x.shape[3] - 1) // self.stride != (x.shape[3] + 2 * p - self.size) // self.stride:\n padding3 = (self.size - 1) // 2\n padding4 = padding3 + 1\n else:\n padding3 = (self.size - 1) // 2\n padding4 = padding3\n x = F.max_pool2d(F.pad(x, (padding3, padding4, padding1, padding2), mode='replicate'), self.size, stride=self.stride)\n return x", "title": "" }, { "docid": "e19f4d212eed788cd8f42b0ae740c848", "score": "0.4790272", "text": "def evaluate_networks():\n X = np.array([[0, 1, 0, 1], [0, 0, 1, 1]])\n y_std = np.array([0, 1, 1, 0])\n y_merged = np.array([[0, 1, 1, 0], [1, 1, 1, 0], [0, 1, 1, 1]])\n epochs_std = []\n epochs_merged1 = []\n epochs_merged2 = []\n epochs_merged3 = []\n epochs_merged4 = []\n\n # Train each of the networks 20 times\n for i in range(20):\n print(f'Starting training run {i}')\n sn = StandardNetwork(0.1, True)\n mn1 = MergedNetwork(0.1, np.array([0.34, 0.33, 0.33]), True)\n mn2 = MergedNetwork(0.1, np.array([0.5, 0.25, 0.25]), True)\n mn3 = MergedNetwork(0.1, np.array([0.25, 0.5, 0.25]), True)\n mn4 = MergedNetwork(0.1, np.array([0.25, 0.25, 0.5]), True)\n\n tmp_loss = sn.train(X, y_std, 10000)\n epochs_std.append(len(tmp_loss))\n\n tmp_loss = mn1.train(X, y_merged, 10000)\n epochs_merged1.append(len(tmp_loss))\n\n tmp_loss = mn2.train(X, y_merged, 10000)\n epochs_merged2.append(len(tmp_loss))\n\n tmp_loss = mn3.train(X, y_merged, 10000)\n epochs_merged3.append(len(tmp_loss))\n\n tmp_loss = mn4.train(X, y_merged, 10000)\n epochs_merged4.append(len(tmp_loss))\n\n epochs_std = np.array(epochs_std)\n epochs_merged1 = np.array(epochs_merged1)\n epochs_merged2 = np.array(epochs_merged2)\n epochs_merged3 = np.array(epochs_merged3)\n epochs_merged4 = np.array(epochs_merged4)\n\n # Create the boxplots\n epoch_results = {\"Standard\": epochs_std, \"Merged\\nEven\": epochs_merged1, \"Merged\\nXOR\": epochs_merged2,\n \"Merged\\nNAND\": epochs_merged3, \"Merged\\nOR\": epochs_merged4}\n fig = plt.figure(figsize=(8, 6))\n ax = fig.subplots()\n ax.boxplot(epoch_results.values())\n ax.set_xticklabels(epoch_results.keys())\n ax.tick_params(axis='both', which='major', labelsize=12)\n ax.tick_params(axis='both', which='minor', labelsize=12)\n ax.set_ylabel('Epochs taken', labelpad=6, fontsize=14)\n ax.set_xlabel('Network trained', labelpad=10, fontsize=14)\n if not os.path.exists('../../plots/XOR/'):\n os.makedirs('../../plots/XOR/')\n plt.savefig('../../plots/XOR/XOR_boxplot.eps', format='eps', bbox_inches=\"tight\", pad_inches=0)\n plt.close()", "title": "" }, { "docid": "6a657b1c566f2c485cca101cd4eabebc", "score": "0.477962", "text": "def max_pool_forward_naive(x, pool_param):\n out = None\n #############################################################################\n # TODO: Implement the max pooling forward pass #\n #############################################################################\n m, C, H, W = x.shape\n pool_height = pool_param['pool_height']\n pool_width = pool_param['pool_width']\n stride = pool_param['stride']\n\n H_out = 1 + (H - pool_height)/stride \n W_out = 1 + (W - pool_width)/stride \n out = np.zeros((m, C, H_out, W_out))\n \n for i in range(0, m):\n x_data = x[i]\n xx, yy = -1, -1\n for j in range(0, H-pool_height+1, stride):\n yy += 1\n for k in range(0, W-pool_width+1, stride):\n xx += 1\n x_data_sample = x_data[:, j : j+pool_height, k : k+pool_width]\n for l in range(0, C):\n out[i, l, yy, xx] = np.max(x_data_sample[l])\n xx = -1\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n cache = (x, pool_param)\n return out, cache", "title": "" }, { "docid": "0c85090d834efcc0e49a3ab397dbcf48", "score": "0.47793213", "text": "def _communicate_size_to_each_rank(\n input_size_list, output_size, input, pg, tensor_type=torch.int\n):\n input_size_list_tensor = torch.tensor(\n input_size_list, dtype=tensor_type, device=input.device\n )\n output_size_list_tensor = torch.empty(\n output_size, dtype=tensor_type, device=input.device\n )\n dist.all_to_all_single(\n output_size_list_tensor,\n input_size_list_tensor,\n group=pg,\n )\n return output_size_list_tensor.tolist()", "title": "" }, { "docid": "8d1b42ccaf1883588a49b5e938c0355b", "score": "0.4775591", "text": "def max_pool_forward_naive(x, pool_param):\n out = None\n #############################################################################\n # TODO: Implement the max pooling forward pass #\n #############################################################################\n pool_height=pool_param[\"pool_height\"]\n pool_width=pool_param[\"pool_width\"]\n stride=pool_param[\"stride\"]\n \n (N, C, H, W)=x.shape\n\n H_dash=1 + (H - pool_height) / stride\n W_dash= 1 + (W -pool_width) / stride\n \n out=np.zeros((N,C,H_dash,W_dash))\n \n for i in range(N):\n for j in range(C):\n for k in range(H_dash):\n start_height=k*stride\n for l in range(W_dash):\n start_width=l*stride\n # print 'start',start_width\n selected_data=x[i,j,start_height:start_height+pool_height,start_width:start_width+pool_width]\n # print selected_data\n\n out[i,j,k,l]=np.max(selected_data)\n \n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n cache = (x, pool_param)\n return out, cache", "title": "" }, { "docid": "20bee5181e5ab92977c10a98b675e512", "score": "0.4768586", "text": "def data_generator(dataset, config, batch_size=1):\n # Anchors\n # [anchor_count, (y1, x1, y2, x2)]\n backbone_shapes = compute_backbone_shapes(config, config.IMAGE_SHAPE)\n anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,\n config.RPN_ANCHOR_RATIOS,\n backbone_shapes,\n config.BACKBONE_STRIDES,\n config.RPN_ANCHOR_STRIDE)\n\n # Keras requires a generator to run indefinitely.\n print(\"Creating data generator for batch size \", batch_size)\n while True:\n try:\n batch_image_shape = np.zeros(\n (batch_size,) + (3,), dtype=np.int32)\n batch_rpn_match = np.zeros(\n [batch_size, anchors.shape[0], 1], dtype=np.int32)\n batch_rpn_bbox = np.zeros(\n [batch_size, config.RPN_TRAIN_ANCHORS_PER_IMAGE, 3],\n dtype=np.float32)\n batch_images = np.zeros(\n (batch_size,) + tuple(config.IMAGE_SHAPE), dtype=np.float32)\n batch_gt_class_ids = np.zeros(\n (batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)\n batch_gt_boxes = np.zeros(\n (batch_size, config.MAX_GT_INSTANCES, 4), dtype=np.int32)\n images, gt_boxes, gt_class_ids = dataset.load_input()\n for b in range(batch_size):\n # RPN Targets\n rpn_match, rpn_bbox = build_rpn_targets(\n images[b].shape, anchors, gt_class_ids[b],\n gt_boxes[b], config)\n # If more instances than fits in the array, sub-sample from them.\n if gt_boxes[b].shape[0] > config.MAX_GT_INSTANCES:\n ids = np.random.choice(\n np.arange(gt_boxes[b].shape[0]),\n config.MAX_GT_INSTANCES,\n replace=False)\n gt_class_ids[b] = gt_class_ids[b][ids]\n gt_boxes[b] = gt_boxes[b][ids]\n\n # Add to batch\n batch_image_shape[b] = images[b].shape\n batch_rpn_match[b] = rpn_match[:, np.newaxis]\n batch_rpn_bbox[b] = rpn_bbox\n batch_images[b] = images[b].astype(np.float32)\n batch_gt_class_ids[b, :gt_class_ids[b].shape[0]] = gt_class_ids[b]\n batch_gt_boxes[b, :gt_boxes[b].shape[0]] = gt_boxes[b]\n inputs = [batch_images, batch_image_shape, batch_rpn_match,\n batch_rpn_bbox, batch_gt_class_ids, batch_gt_boxes]\n outputs = []\n yield inputs, outputs\n except (GeneratorExit, KeyboardInterrupt):\n print(\"Something wrong\")\n raise", "title": "" }, { "docid": "8640c36b4b8b5ac8f6c63fda02923e0b", "score": "0.4768023", "text": "def sample_relation_groundtruth_v1(relation_conn, precompute_bbox, topN_boxes_ids, gt_boxes, is_training):\n # ipdb.set_trace()\n ## construct the global connection map\n num_phrases, topN = topN_boxes_ids.shape\n conn_map = np.zeros((num_phrases * topN, num_phrases * topN)) - 1\n\n ## todo\n ## we can further consider inner relation and sym relation\n for rel_id, rel in enumerate(relation_conn):\n conn_map[rel[0] * topN:(rel[0] + 1) * topN, rel[1] * topN:(rel[1] + 1) * topN] = rel_id\n\n conn_phrtnsbj, conn_phrtnobj = np.where(conn_map>=0)\n\n conn_phrtnsbj_1 = conn_phrtnsbj // topN\n conn_phrtnobj_1 = conn_phrtnobj // topN\n\n conn_phrtnobj_select = np.tile(np.arange(topN), int(conn_phrtnobj.shape[0] / topN))\n conn_phrtnsbj_bbox_id = topN_boxes_ids[conn_phrtnsbj_1, conn_phrtnsbj % topN]\n conn_phrtnobj_bbox_id = topN_boxes_ids[conn_phrtnobj_1, conn_phrtnobj_select]\n\n ## prepare the gt_boxes\n gt_boxes_phrtnsbj = gt_boxes[conn_phrtnsbj_1]\n gt_boxes_phrtnobj = gt_boxes[conn_phrtnobj_1]\n\n precompute_bbox_phrtnsbj = precompute_bbox[conn_phrtnsbj_bbox_id.astype(np.int32)]\n precompute_bbox_phrtnobj = precompute_bbox[conn_phrtnobj_bbox_id.astype(np.int32)]\n\n iou_phrtnsbj = boxlist_iou(precompute_bbox_phrtnsbj, gt_boxes_phrtnsbj).diag().detach().cpu().numpy() ## M\n iou_phrtnobj = boxlist_iou(precompute_bbox_phrtnobj, gt_boxes_phrtnobj).diag().detach().cpu().numpy()\n\n ## select -3 here. for simply to calculate the iou\n iou_phrtnsbj_indicator = -3 * np.ones_like(iou_phrtnsbj)\n iou_phrtnobj_indicator = -3 * np.ones_like(iou_phrtnobj)\n\n ## -1 ignore, 0 bg, 1 fg.\n iou_phrtnsbj_indicator[iou_phrtnsbj >= cfg.MODEL.VG.RELATION_FG] = 1\n iou_phrtnsbj_indicator[iou_phrtnsbj < cfg.MODEL.VG.RELATION_BG] = 0\n\n iou_phrtnobj_indicator[iou_phrtnobj >= cfg.MODEL.VG.RELATION_FG] = 1\n iou_phrtnobj_indicator[iou_phrtnobj < cfg.MODEL.VG.RELATION_BG] = 0\n\n ## relation indicator_fusion, -1 ignore, 0, bg(0,0), 1,bg(0,1), 2,fg(1,1)\n relation_iou_indicator_fusion = iou_phrtnsbj_indicator + iou_phrtnobj_indicator\n\n relation_iou_indicator = -1 * np.ones_like(relation_iou_indicator_fusion)\n conn_map_select = conn_map[conn_phrtnsbj, conn_phrtnobj]\n\n\n if not is_training:\n relation_iou_indicator = relation_iou_indicator * 0\n relation_iou_indicator[np.where(relation_iou_indicator_fusion == 2)[0]] = 1\n relation_iou_indicator = relation_iou_indicator.astype(np.float32)\n\n else:\n\n relation_inds_pos = []\n relation_inds_neg = []\n\n for rel_id in range(len(relation_conn)):\n\n relation_iou_indicator_rel = relation_iou_indicator_fusion.copy()\n relation_iou_indicator_rel[conn_map_select!=rel_id] = -3\n\n relation_iou_indicator_pos = np.where(relation_iou_indicator_rel == 2)[0]\n relation_iou_indicator_neg1 = np.where(relation_iou_indicator_rel == 1)[0]\n relation_iou_indicator_neg2 = np.where(relation_iou_indicator_rel == 0)[0]\n\n num_pos = relation_iou_indicator_pos.shape[0]\n num_neg1 = relation_iou_indicator_neg1.shape[0]\n num_neg2 = relation_iou_indicator_neg2.shape[0]\n\n if num_pos >= 1:\n\n np.random.shuffle(relation_iou_indicator_neg1)\n np.random.shuffle(relation_iou_indicator_neg2)\n select_num_neg = num_pos\n # select_num_neg = num_pos//2 + 1\n\n if num_neg1 >= select_num_neg and num_neg2 >= select_num_neg:\n relation_inds_pos.append(relation_iou_indicator_pos)\n relation_inds_neg.append(relation_iou_indicator_neg1[:select_num_neg])\n relation_inds_neg.append(relation_iou_indicator_neg2[:select_num_neg])\n\n elif num_neg1 >= select_num_neg and num_neg2 <= select_num_neg:\n relation_inds_pos.append(relation_iou_indicator_pos)\n relation_inds_neg.append(relation_iou_indicator_neg1[:select_num_neg])\n relation_inds_neg.append(relation_iou_indicator_neg2)\n\n elif num_neg1 <= select_num_neg and num_neg2 >= select_num_neg:\n relation_inds_pos.append(relation_iou_indicator_pos)\n relation_inds_neg.append(relation_iou_indicator_neg1)\n relation_inds_neg.append(relation_iou_indicator_neg2[:select_num_neg])\n else:\n np.random.shuffle(relation_iou_indicator_pos)\n relation_inds_pos.append(relation_iou_indicator_pos[:(num_neg1+num_neg2)//2])\n relation_inds_neg.append(relation_iou_indicator_neg1)\n relation_inds_neg.append(relation_iou_indicator_neg2)\n\n else:\n num_neg1_min = min(num_neg1, 1)\n num_neg2_min = min(num_neg2, 1)\n\n np.random.shuffle(relation_iou_indicator_neg1)\n np.random.shuffle(relation_iou_indicator_neg2)\n relation_inds_neg.append(relation_iou_indicator_neg1[:num_neg1_min])\n relation_inds_neg.append(relation_iou_indicator_neg2[:num_neg2_min])\n\n ## avoid the concatnate the empty array\n relation_inds_pos.append([])\n relation_inds_neg.append([])\n relation_inds_pos = np.concatenate(relation_inds_pos, 0).astype(np.int32)\n relation_inds_neg = np.concatenate(relation_inds_neg, 0).astype(np.int32)\n\n if len(relation_inds_pos) > 0:\n relation_iou_indicator[relation_inds_pos] = 1\n if len(relation_inds_neg) > 0:\n relation_iou_indicator[relation_inds_neg] = 0\n\n return relation_iou_indicator, conn_map_select, conn_phrtnsbj, conn_phrtnobj, conn_map", "title": "" }, { "docid": "d4badc5bdb4a1d00b22bc82dbccb0ccd", "score": "0.47628304", "text": "def nextBatch(self, batchSize):\n start = self.index\n self.index += batchSize\n if self.index > self.imgNum:\n # One round completed\n self.round += 1\n # Shuffle the data\n perm = np.arange(self.imgNum)\n np.random.shuffle(perm)\n self.images = self.images[:,perm]\n self.labels = self.labels[perm]\n # Start next round\n #print 'Start round #',self.round\n start = 0\n self.index = batchSize\n assert batchSize <= self.imgNum\n end = self.index \n #print start, end\n return self.images[:,range(start,end)], self.labels[start:end]", "title": "" }, { "docid": "091e89db877231567fee3de43fe953bf", "score": "0.4750476", "text": "def batch(inputs, targets, batch_size=32, shuffle=True):\n starts = np.arange(0, len(inputs), batch_size)\n if shuffle:\n np.random.shuffle(starts)\n for start in starts:\n end = start + batch_size\n batch_inputs = inputs[start:end]\n batch_targets = targets[start:end]\n yield batch_inputs, batch_targets", "title": "" }, { "docid": "59767dc0896a6514b7711c3d9c7562c0", "score": "0.47501263", "text": "def random_mini_batches(X, Y, mini_batch_size=64, seed=0):", "title": "" }, { "docid": "8d1a172242af8757c64729a2213639f8", "score": "0.474094", "text": "def pool_forward(A_prev, kernel_shape, stride=(1, 1), mode='max'):\n img_n = A_prev.shape[0]\n img_h = A_prev.shape[1]\n img_w = A_prev.shape[2]\n img_c = A_prev.shape[3]\n ker_h = kernel_shape[0]\n ker_w = kernel_shape[1]\n\n img_h_out = (img_h - ker_h) // stride[0] * stride[0] + ker_h\n img_w_out = (img_w - ker_w) // stride[1] * stride[1] + ker_w\n pad = A_prev[:, :img_h_out * ker_h, :img_w_out * ker_w, ...]\n\n output = asStride(pad, kernel_shape, stride)\n if mode == \"max\":\n r = np.nanmax(output, axis=(3, 4))\n else:\n r = np.nanmean(output, axis=(3, 4))\n return r", "title": "" }, { "docid": "78950d98149c72e86f098a3b23f6d118", "score": "0.47373518", "text": "def pool_forward(A_prev, kernel_shape, stride=(1, 1), mode='max'):\n # variables of previous layer\n m, h_prev, w_prev, c_prev = A_prev.shape\n\n # variables for the kernel\n kh, kw = kernel_shape\n\n # variables for stride\n sh, sw = stride\n\n # output dimensions\n oh = int(((h_prev - kh) / sh) + 1)\n ow = int(((w_prev - kw) / sw) + 1)\n\n # initialize output\n output = np.zeros((m, oh, ow, c_prev))\n\n for i in range(ow):\n for j in range(oh):\n # elm-wise mult of kernel and image\n x = A_prev[:, j * sh: j * sh + kh, i * sw: i * sw + kw]\n if mode == 'max':\n output[:, j, i, :] = np.max(x, axis=(1, 2))\n else:\n output[:, j, i, :] = np.mean(x, axis=(1, 2))\n return output", "title": "" }, { "docid": "e11fac2a762880bc20a909d929c4de8c", "score": "0.473437", "text": "def forward(ctx, input, num_sync_devices, num_groups):\n ctx.num_sync_devices = num_sync_devices\n ctx.num_groups = num_groups\n\n input_list = [\n torch.zeros_like(input) for k in range(du.get_local_size())\n ]\n dist.all_gather(\n input_list, input, async_op=False, group=du._LOCAL_PROCESS_GROUP\n )\n\n inputs = torch.stack(input_list, dim=0)\n if num_groups > 1:\n rank = du.get_local_rank()\n group_idx = rank // num_sync_devices\n inputs = inputs[\n group_idx\n * num_sync_devices : (group_idx + 1)\n * num_sync_devices\n ]\n inputs = torch.sum(inputs, dim=0)\n return inputs", "title": "" }, { "docid": "9e2c6374ef19b1a1e4d79b6b6c76a39e", "score": "0.47316426", "text": "def forward_model(self, batch):", "title": "" }, { "docid": "e7629d6a64b9a5ef3e3c6e6a692dac36", "score": "0.4728635", "text": "def shuffled(*args):\n batch = list(zip(*args))\n random.shuffle(batch)\n return zip(*batch)", "title": "" }, { "docid": "a86182c332b8e4ab964c50a2f00dea0f", "score": "0.47274685", "text": "def pool_forward(A_prev, hyper_param, mode=\"max\"):\n (m, n_prev_H, n_prev_W, n_prev_C) = A_prev.shape\n\n f = hyper_param[\"f\"]\n stride = hyper_param[\"stride\"]\n\n # dimension of the output\n n_H = int((n_prev_H - f) / stride) + 1 # new n_H after conv\n n_W = int((n_prev_W - f) / stride) + 1 # new n_W after conv\n n_C = n_prev_C\n\n # output\n A = np.zeros((m, n_H, n_W, n_C))\n\n for i in range(m):\n for h in range(n_H):\n for w in range(n_W):\n for c in range(n_C):\n y0 = h * stride\n y1 = y0 + f\n x0 = w * stride\n x1 = x0 + f\n\n a_slice_prev = A_prev[y0:y1, x0:x1, :]\n if mode == \"max\":\n A[i, h, w, c] = np.max(a_slice_prev)\n if mode == \"average\":\n A[i, h, w, c] = np.mean(a_slice_prev)\n\n cache = (A_prev, hyper_param)\n return A, cache", "title": "" }, { "docid": "01113fd52d4d6f7542780e5c0c4689c4", "score": "0.4726742", "text": "def test_multiple_param_groups(self):\n self.dist_init(self.rank)\n BATCH_SIZE, NUM_ITERS = 8, 3\n INPUT_DIM, HIDDEN_DIM, OUTPUT_DIM = 5, 10, 5\n WD, LR = 0.01, 0.01\n model1 = torch.nn.Sequential(\n torch.nn.Linear(INPUT_DIM, HIDDEN_DIM),\n torch.nn.Linear(HIDDEN_DIM, HIDDEN_DIM),\n torch.nn.Linear(HIDDEN_DIM, OUTPUT_DIM),\n )\n model2 = copy.deepcopy(model1)\n model3 = copy.deepcopy(model1)\n model1 = model1.to(self.device)\n model2 = model2.to(self.device)\n model3 = model3.to(self.device)\n inputs = [\n torch.randn(BATCH_SIZE, INPUT_DIM).to(self.device) for _ in range(NUM_ITERS)\n ]\n # Construct `optim1` with both parameter groups upfront\n optim1 = ZeroRedundancyOptimizer(\n [\n {\"params\": [l.weight for l in model1], \"weight_decay\": 0.0},\n {\"params\": [l.bias for l in model1], \"weight_decay\": WD},\n ],\n optimizer_class=AdamW,\n lr=LR,\n )\n # Construct `optim2` by adding the second parameter after\n optim2 = ZeroRedundancyOptimizer(\n [l.weight for l in model2],\n optimizer_class=AdamW,\n lr=LR,\n weight_decay=0.0,\n )\n optim2.add_param_group({\"params\": [l.bias for l in model2], \"weight_decay\": WD})\n # Construct `optim3` as a non-sharded optimizer\n optim3 = AdamW(\n [\n {\"params\": [l.weight for l in model3], \"weight_decay\": 0.0},\n {\"params\": [l.bias for l in model3], \"weight_decay\": WD},\n ],\n lr=LR,\n )\n # Check parity over a few iterations\n for input in inputs:\n for model, optim in (\n (model1, optim1),\n (model2, optim2),\n (model3, optim3),\n ):\n optim.zero_grad()\n out = model(input)\n loss = out.sum()\n loss.backward()\n optim.step()\n for layer1, layer2, layer3 in zip(model1, model2, model3):\n torch.testing.assert_close(layer1.weight, layer2.weight)\n torch.testing.assert_close(layer1.weight, layer3.weight)\n torch.testing.assert_close(layer1.bias, layer2.bias)\n torch.testing.assert_close(layer1.bias, layer3.bias)", "title": "" }, { "docid": "373eeaabfe696ae731df420a460bdd68", "score": "0.47254878", "text": "def forward(self, nodes_batch, test=False):\n # nodes_batch = [62640, 8361, 13567]\n lower_layer_nodes = list(nodes_batch)\n nodes_batch_layers = [(lower_layer_nodes,)]\n # for each layer sample the neighbors of the previous layers\n # nodes_batch_layers is order from deeper layer to shallower layer\n layer_num_samples = [16, 4] if not test else [None, None] # to avoid OOM we have to restrict the samples of higher level\n # if testing we will not sample neighbor randomly\n\n for i in range(self.num_layers):\n lower_samp_neighs, lower_layer_nodes_dict, lower_layer_nodes = self._get_unique_neighs_list(\n lower_layer_nodes, num_sample=layer_num_samples[i])\n nodes_batch_layers.insert(0, (lower_layer_nodes, lower_samp_neighs, lower_layer_nodes_dict))\n\n assert len(nodes_batch_layers) == self.num_layers + 1\n\n pre_hidden_embs = self.raw_features\n for index in range(1, self.num_layers + 1):\n nb = nodes_batch_layers[index][0] # list of all affected nodes in this layer\n pre_neighs = nodes_batch_layers[index - 1] # list of all precursor nodes in previous layer\n # aggregate feature of neighbors from previous layer\n aggregate_feats = self.aggregate(nb, pre_hidden_embs, pre_neighs)\n sage_layer_name = 'sage_layer' + str(index)\n sage_layer = getattr(self, sage_layer_name)\n if index > 1:\n nb = self._nodes_map(nb, pre_hidden_embs, pre_neighs)\n # self.dc.logger.info('sage_layer.')\n cur_hidden_embs = sage_layer(self_feats=pre_hidden_embs[nb],\n aggregate_feats=aggregate_feats)\n pre_hidden_embs = cur_hidden_embs\n return pre_hidden_embs", "title": "" }, { "docid": "cd32587a69d5c7150bf62c64fddffe59", "score": "0.47217745", "text": "def max_pool_forward(x, pool_size, stride):\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################\n (B,H,W,C) = x.shape # get shapes and sizes\n \n new_height = 1 + (H-pool_size) / stride\n new_width = 1 + (W-pool_size) / stride\n new_height = int(new_height)\n new_width = int(new_width)\n \n out = np.zeros((B,new_height,new_width,C)) # set output array\n \n for n in range(B): # for each sample in the batch\n for h in range(new_height): # for each pixel\n for w in range(new_width):\n row_start = h*stride # find the start and end values\n row_end = h*stride + pool_size\n col_start = w*stride\n col_end = w*stride + pool_size\n \n # the portion of the x frame taken is found\n part = x[n,row_start:row_end,col_start:col_end,:]\n # the out value for the specific pixel is the max of the pool previous (hence max pooling)\n out[n,h,w,:] = np.max(part.reshape((C,pool_size**2)),axis=1)\n \n return out", "title": "" }, { "docid": "4d1c4344d4844a76d3a406d4b0ed0f9b", "score": "0.47152147", "text": "def roundRobin(units, sets=None):\n if len(units) % 2:\n units.append(None)\n count = len(units)\n sets = sets or (count - 1)\n half = count / 2\n schedule = []\n for turn in range(sets):\n pairings = []\n for i in range(half):\n pairings.append((units[i], units[count-i-1]))\n units.insert(1, units.pop())\n schedule.append(pairings)\n return schedule", "title": "" }, { "docid": "d49dc2ad7127b623a0cad137c111333b", "score": "0.47084042", "text": "def run(G,initial_condition):\n\n\tfinal_node = {} # dictionray that will hold final community values of nodes (0 or 1)\n\tt = 0 # time increment variable\n\n\t# while loop that only stops if there is a split (2 communities)\n\t# if a split doesn't occur after 10 tries, set the final node and final edge values\n\n\twhile 1 not in final_node.values() or 0 not in final_node.values():\n\n\t\tif t == 10: # break after 10 attempts\n\t\t\tbreak \n\n\t\tfinal_node, final_edge = BP(G, initial_condition) # run BP and save values\n\t\tt += 1 # increment the time variable\n\n\tfirst = tuple(x for x in final_node if final_node[x] == 0) # make a tuple of nodes with a community membreship of 0\n\tsecond = tuple(x for x in final_node if final_node[x] == 1) # make a tuple of nodes with a community membreship of 1\n\n\t # if all ten attempts were used, return a list of nodes separated by community. Because there was no split, \n\t # one list is empty and the other contains all the nodes\n\tif t == 10: \n\t\treturn [first,second]\n\t\n\t# if t != 10\n\telse:\n\t\t# make an initial results dictionary. The keys in the dictionary are lists of nodes in the community and the \n\t\t# value indicates if the community can be further split (1 = yes, 0 = no)\n\t\tresults = {first:1,second:1} # initial values indicate that the communities can possibly be further split\n\t\t\n\t\twhile 1 in results.values(): # run until no more communities can be further split\n\n\t\t\tprevious_results = results # store previous results\n\n\t\t\t# update communities that cannot be split into results dictionary\n\t\t\tresults = {x:0 for x in previous_results if not previous_results[x]}\n\n\t\t\t# for each community that can be split\n\t\t\tfor com in (x for x in previous_results if previous_results[x]):\n\t\t\t\ta,b = subgraph_run(G,com, initial_condition) # attempt to split the community by running BP on the smaller network\n\t\t\t\t# if any of the splits are empty, the original com is done, set to 0s\n\t\t\t\tif not a or not b: results[com] = 0\n\t\t\t\t# communities can keep on splitting\n\t\t\t\telse: results[a] = results[b] = 1\n\t\t\t\t\t\n\t\treturn list(results.keys()) # return list of lists of communities", "title": "" }, { "docid": "09b684ee2c8440fce685a44dad2936d2", "score": "0.47055015", "text": "def _communicate_list_to_each_rank(\n input_tensor_list, output_lists, input, pg, tensor_type=torch.int64\n):\n output_tensor_list = []\n for output_list in output_lists:\n output_tensor_list.append(\n torch.empty(output_list, dtype=tensor_type, device=input.device)\n )\n dist.all_to_all(\n output_tensor_list,\n input_tensor_list,\n group=pg,\n )\n return output_tensor_list", "title": "" }, { "docid": "538b865fe1fb568bd1b32bc6f719faf9", "score": "0.46987274", "text": "def forward(self, x):\n out = self.avg_pool(x)\n return out", "title": "" }, { "docid": "7658c98bfa521eae3c47a5e51a74949f", "score": "0.4693584", "text": "def run_network(ray_samples, view_dirs, network, chunk=1024*64):\n ray_samples_flat = torch.reshape(ray_samples, [-1, 3])\n view_dirs = view_dirs[:,None].expand(ray_samples.shape)\n view_dirs_flat = torch.reshape(view_dirs, [-1, 3])\n inputs = torch.cat([ray_samples_flat, view_dirs_flat], -1)\n outputs = torch.cat([network(inputs[i:i+chunk]) for i in range(0, inputs.shape[0], chunk)])\n outputs = torch.reshape(outputs, list(ray_samples.shape[:-1]) + [4])\n return outputs", "title": "" }, { "docid": "6eff9953fe46010b0097f26ad3685dc8", "score": "0.46877286", "text": "def test_single_input_single_batch(self):\n # Folder must be root to load in make_net properly\n if os.getcwd().split('\\\\')[-1] == 'tests': os.chdir('../..')\n \n # Get 'empty' GRU\n gru = get_gru_pytorch_copy(1)\n \n # Completely zero GRU, all inputs get ignored\n self.assertEqual(gru(tensor([[0]], dtype=float64)), 0)\n gru.hx = None # GRU keeps own state, reset it\n self.assertEqual(gru(tensor([[1]], dtype=float64)), 0)\n gru.hx = None # GRU keeps own state, reset it\n \n # Modify the GRU to have weight-arrays of one\n gru.weight_hh = tensor(ones((3, 1)), dtype=float64)\n gru.weight_ih = tensor(ones((3, 1)), dtype=float64)\n \n # Load in PyTorch native GRU to compare with\n pytorch_gru = get_pytorch_gru(1, gru)\n \n # Test if they continue to obtain the same results\n for _ in range(100):\n i = random()\n a = gru(tensor([[i]], dtype=float64))\n gru.hx = None # GRU keeps own state, reset it\n b = pytorch_gru(FloatTensor([[i]]))\n self.assertEqual(a.shape, b.shape)\n self.assertTrue(float(a) - EPSILON <= float(b) <= float(a) + EPSILON)\n \n # Set bias_ih to minus ones\n gru.bias_ih = tensor(ones((3,)), dtype=float64) * -1\n \n # Load in PyTorch native GRU to compare with\n pytorch_gru = get_pytorch_gru(1, gru)\n \n # Test if they continue to obtain the same results\n for _ in range(100):\n i = random()\n a = gru(tensor([[i]], dtype=float64))\n gru.hx = None # GRU keeps own state, reset it\n b = pytorch_gru(FloatTensor([[i]]))\n self.assertEqual(a.shape, b.shape)\n self.assertTrue(float(a) - EPSILON <= float(b) <= float(a) + EPSILON)", "title": "" }, { "docid": "baceffad1f8bc4cb9317b4503e7b00c5", "score": "0.4681768", "text": "def iterate_minibatches(x, y, batchsize, shuffle=False):\n assert len(x) == len(y)\n # if shuffle:\n # indices = np.arange(len(x))\n # np.random.shuffle(indices)\n for start_idx in range(0, len(x) - batchsize + 1, batchsize):\n if shuffle:\n indices = np.arange(len(x))\n np.random.shuffle(indices)\n idx = indices[start_idx:start_idx + batchsize]\n else:\n idx = slice(start_idx, start_idx + batchsize)\n yield x[idx], y[idx]", "title": "" }, { "docid": "a76baa93af6bdbed0fd0efa49671ea62", "score": "0.46757033", "text": "def one_round_swaps(b):\n\n # Initialized so that there's a 'previous' value for the initial one\n new_max = [b.calculate_likelihood()]\n new_groups = copy.copy(b.groups)\n\n # Iterate through the nodes\n for i, group in enumerate(b.groups):\n changes = []\n # Iterate through the possible clusters\n for j in range(b.k):\n # Not worth evaluating your current position.\n if j != group:\n # Place node i in group j\n changes.append(b.change_in_likelihood(i, j))\n else:\n changes.append(0)\n # After trying all groups, set the new group to the best option\n best = changes.index(max(changes))\n new_groups[i] = best\n # Gotta change the ACTUAL assignment -- this is cumulative\n b.set_group(i, best)\n\n # Update the likelihood by the local change\n # Remember, the list was initialized to have a 'previous' elem\n # even for the FIRST element.\n new_max.append(new_max[-1] + max(changes))\n\n mx = new_max.index(max(new_max))\n\n # If the index isn't 0, which is the 'no changes' state,\n if mx:\n # Remove the first element\n del new_max[0]\n # and pretend it wasn't there\n mx -= 1\n # so that you can easily index into the best place.\n # With n nodes, you index easily into 1:n changes -- not 0:n changes.\n best_groups = np.concatenate((new_groups[:mx+1], b.groups[mx+1:]))\n b.set_group(best_groups)\n\n # Return the likelihood corresponding to the groups\n return new_max[mx]\n else:\n # The groups don't change.\n return new_max[0]", "title": "" }, { "docid": "59eb73dfd02d51e7dda3e7a5069dc9dc", "score": "0.4675173", "text": "def forward(self, x):\n h_splits = torch.split(x, int(x.size(2) / self.local_h_num), dim=2)\n h_out = []\n\n for i in range(len(h_splits)):\n start = True # Just to see if it is the first layer in the patches\n w_splits = torch.split(h_splits[i], int(\n h_splits[i].size(3)/self.local_w_num), dim=3)\n for j in range(len(w_splits)):\n bn_out = self.bns[i*len(w_splits)+j](w_splits[j].contiguous())\n bn_out = self.relus[i*len(w_splits)+j](bn_out)\n conv_out = self.convs[i*len(w_splits)+j](bn_out)\n if start:\n h_out.append(conv_out)\n start = False\n else:\n h_out[i] = torch.cat((h_out[i], conv_out), 3)\n if i == 0:\n out = h_out[i]\n else:\n out = torch.cat((out, h_out[i]), 2)\n return(out)", "title": "" }, { "docid": "d1cc4cb827d47de45b6ccafc65432445", "score": "0.46743208", "text": "def minibatches(X, Y, batch_size):\n\tm = X.shape[0]\n\tn_batches = int(np.floor(m / batch_size))\n\trandom_indices = np.random.permutation(np.arange(m))\n\tfor i in range(n_batches):\n\t\tbatch_indices = np.arange(i * batch_size, (i + 1) * batch_size)\n\t\tbatch_indices = random_indices[batch_indices]\n\t\tyield X[batch_indices], Y[batch_indices]", "title": "" }, { "docid": "34282d90dc59e4d153cf6de16b69071a", "score": "0.4667768", "text": "def test_conv_layer_bprop(layer_class, do_cross_correlation=False):\n inputs = np.arange(96).reshape((2, 3, 4, 4))\n kernels = np.arange(-12, 12).reshape((2, 3, 2, 2))\n if do_cross_correlation:\n kernels = kernels[:, :, ::-1, ::-1]\n biases = np.arange(2)\n grads_wrt_outputs = np.arange(-20, 16).reshape((2, 2, 3, 3))\n outputs = np.array(\n [[[[ -958., -1036., -1114.],\n [-1270., -1348., -1426.],\n [-1582., -1660., -1738.]],\n [[ 1707., 1773., 1839.],\n [ 1971., 2037., 2103.],\n [ 2235., 2301., 2367.]]],\n [[[-4702., -4780., -4858.],\n [-5014., -5092., -5170.],\n [-5326., -5404., -5482.]],\n [[ 4875., 4941., 5007.],\n [ 5139., 5205., 5271.],\n [ 5403., 5469., 5535.]]]]\n )\n true_grads_wrt_inputs = np.array(\n [[[[ 147., 319., 305., 162.],\n [ 338., 716., 680., 354.],\n [ 290., 608., 572., 294.],\n [ 149., 307., 285., 144.]],\n [[ 23., 79., 81., 54.],\n [ 114., 284., 280., 162.],\n [ 114., 272., 268., 150.],\n [ 73., 163., 157., 84.]],\n [[-101., -161., -143., -54.],\n [-110., -148., -120., -30.],\n [ -62., -64., -36., 6.],\n [ -3., 19., 29., 24.]]],\n [[[ 39., 67., 53., 18.],\n [ 50., 68., 32., -6.],\n [ 2., -40., -76., -66.],\n [ -31., -89., -111., -72.]],\n [[ 59., 115., 117., 54.],\n [ 114., 212., 208., 90.],\n [ 114., 200., 196., 78.],\n [ 37., 55., 49., 12.]],\n [[ 79., 163., 181., 90.],\n [ 178., 356., 384., 186.],\n [ 226., 440., 468., 222.],\n [ 105., 199., 209., 96.]]]])\n layer = layer_class(\n num_input_channels=kernels.shape[1],\n num_output_channels=kernels.shape[0],\n input_dim_1=inputs.shape[2],\n input_dim_2=inputs.shape[3],\n kernel_dim_1=kernels.shape[2],\n kernel_dim_2=kernels.shape[3]\n )\n layer.params = [kernels, biases]\n layer_grads_wrt_inputs = layer.bprop(inputs, outputs, grads_wrt_outputs)\n assert layer_grads_wrt_inputs.shape == true_grads_wrt_inputs.shape, (\n 'Layer bprop returns incorrect shaped array. '\n 'Correct shape is \\n\\n{0}\\n\\n but returned shape is \\n\\n{1}.'\n .format(true_grads_wrt_inputs.shape, layer_grads_wrt_inputs.shape)\n )\n assert np.allclose(layer_grads_wrt_inputs, true_grads_wrt_inputs), (\n 'Layer bprop does not return correct values. '\n 'Correct output is \\n\\n{0}\\n\\n but returned output is \\n\\n{1}'\n .format(true_grads_wrt_inputs, layer_grads_wrt_inputs)\n )\n return True", "title": "" }, { "docid": "34282d90dc59e4d153cf6de16b69071a", "score": "0.4667768", "text": "def test_conv_layer_bprop(layer_class, do_cross_correlation=False):\n inputs = np.arange(96).reshape((2, 3, 4, 4))\n kernels = np.arange(-12, 12).reshape((2, 3, 2, 2))\n if do_cross_correlation:\n kernels = kernels[:, :, ::-1, ::-1]\n biases = np.arange(2)\n grads_wrt_outputs = np.arange(-20, 16).reshape((2, 2, 3, 3))\n outputs = np.array(\n [[[[ -958., -1036., -1114.],\n [-1270., -1348., -1426.],\n [-1582., -1660., -1738.]],\n [[ 1707., 1773., 1839.],\n [ 1971., 2037., 2103.],\n [ 2235., 2301., 2367.]]],\n [[[-4702., -4780., -4858.],\n [-5014., -5092., -5170.],\n [-5326., -5404., -5482.]],\n [[ 4875., 4941., 5007.],\n [ 5139., 5205., 5271.],\n [ 5403., 5469., 5535.]]]]\n )\n true_grads_wrt_inputs = np.array(\n [[[[ 147., 319., 305., 162.],\n [ 338., 716., 680., 354.],\n [ 290., 608., 572., 294.],\n [ 149., 307., 285., 144.]],\n [[ 23., 79., 81., 54.],\n [ 114., 284., 280., 162.],\n [ 114., 272., 268., 150.],\n [ 73., 163., 157., 84.]],\n [[-101., -161., -143., -54.],\n [-110., -148., -120., -30.],\n [ -62., -64., -36., 6.],\n [ -3., 19., 29., 24.]]],\n [[[ 39., 67., 53., 18.],\n [ 50., 68., 32., -6.],\n [ 2., -40., -76., -66.],\n [ -31., -89., -111., -72.]],\n [[ 59., 115., 117., 54.],\n [ 114., 212., 208., 90.],\n [ 114., 200., 196., 78.],\n [ 37., 55., 49., 12.]],\n [[ 79., 163., 181., 90.],\n [ 178., 356., 384., 186.],\n [ 226., 440., 468., 222.],\n [ 105., 199., 209., 96.]]]])\n layer = layer_class(\n num_input_channels=kernels.shape[1],\n num_output_channels=kernels.shape[0],\n input_dim_1=inputs.shape[2],\n input_dim_2=inputs.shape[3],\n kernel_dim_1=kernels.shape[2],\n kernel_dim_2=kernels.shape[3]\n )\n layer.params = [kernels, biases]\n layer_grads_wrt_inputs = layer.bprop(inputs, outputs, grads_wrt_outputs)\n assert layer_grads_wrt_inputs.shape == true_grads_wrt_inputs.shape, (\n 'Layer bprop returns incorrect shaped array. '\n 'Correct shape is \\n\\n{0}\\n\\n but returned shape is \\n\\n{1}.'\n .format(true_grads_wrt_inputs.shape, layer_grads_wrt_inputs.shape)\n )\n assert np.allclose(layer_grads_wrt_inputs, true_grads_wrt_inputs), (\n 'Layer bprop does not return correct values. '\n 'Correct output is \\n\\n{0}\\n\\n but returned output is \\n\\n{1}'\n .format(true_grads_wrt_inputs, layer_grads_wrt_inputs)\n )\n return True", "title": "" }, { "docid": "8318fbafb10106c0dff94421c8bc6b3b", "score": "0.4664602", "text": "def batch_shuffle(data, label, batch_size):\r\n # Shuffle the batch data\r\n# shuffled_data = list(zip(data, *label))\r\n# random.shuffle(shuffled_data)\r\n# tmp = list(zip(*shuffled_data))\r\n# \r\n# data_shuffled = tmp[0]\r\n# label_shuffled = tmp[1:]\r\n\r\n shuffled_data = list(zip(data, label))\r\n random.shuffle(shuffled_data)\r\n data_shuffled, label_shuffled = zip(*shuffled_data)\r\n \r\n return data_shuffled, label_shuffled", "title": "" }, { "docid": "543c1ce56ce68d04f9490c0b8c1521cd", "score": "0.46581268", "text": "def forward(self, x):\n return self.net(x) # pass through the network", "title": "" }, { "docid": "acd140e0c13e16719e142d2de2087224", "score": "0.4653015", "text": "def crossing_over(first_parent, second_parent):\n\n child = Brain()\n for layer_name, _ in child.named_parameters():\n child_params = child.state_dict()[layer_name]\n first_params = first_parent.state_dict()[layer_name]\n second_params = second_parent.state_dict()[layer_name]\n for tensor in range(len(child_params)):\n try:\n for value in range(len(child_params[tensor])):\n probability = randint(1, 100)\n if probability <= cfg.CROSSING_PROBABILITY:\n child_params[tensor][value] = second_params[tensor][value]\n else:\n child_params[tensor][value] = first_params[tensor][value]\n\n except TypeError:\n probability = randint(1, 100)\n if probability <= cfg.CROSSING_PROBABILITY:\n child_params[tensor] = second_params[tensor]\n else:\n child_params[tensor] = first_params[tensor]\n\n child.state_dict()[layer_name] = child_params\n\n return child", "title": "" }, { "docid": "067cc165200e5d97202e0655f14604c9", "score": "0.46509215", "text": "def alltoallv_cpu(rank, world_size, input_tensor_list, retain_nones=True):\n # ensure len of input_tensor_list is same as the world_size.\n assert input_tensor_list != None\n assert len(input_tensor_list) == world_size\n\n # ensure that all the tensors in the input_tensor_list are of same size.\n sizes = [list(x.size()) for x in input_tensor_list]\n for idx in range(1, len(sizes)):\n assert len(sizes[idx - 1]) == len(\n sizes[idx]\n ) # no. of dimensions should be same\n assert (\n input_tensor_list[idx - 1].dtype == input_tensor_list[idx].dtype\n ) # dtype should be same\n assert (\n sizes[idx - 1][1:] == sizes[idx][1:]\n ) # except first dimension remaining dimensions should all be the same\n\n # decide how much to pad.\n # always use the first-dimension for padding.\n ll = [x[0] for x in sizes]\n\n # dims of the padding needed, if any\n # these dims are used for padding purposes.\n diff_dims = [[np.amax(ll) - l[0]] + l[1:] for l in sizes]\n\n # pad the actual message\n input_tensor_list = [\n torch.cat((x, torch.zeros(diff_dims[idx]).type(x.dtype)))\n for idx, x in enumerate(input_tensor_list)\n ]\n\n # send useful message sizes to all\n send_counts = []\n recv_counts = []\n for idx in range(world_size):\n # send a vector, of atleast 3 elements, [a, b, ....] where\n # a = useful message dim, b = actual message outgoing message size along the first dimension\n # and remaining elements are the remaining dimensions of the tensor\n send_counts.append(\n torch.from_numpy(\n np.array([sizes[idx][0]] + [np.amax(ll)] + sizes[idx][1:])\n ).type(torch.int64)\n )\n recv_counts.append(\n torch.zeros((1 + len(sizes[idx])), dtype=torch.int64)\n )\n __alltoall_cpu(rank, world_size, recv_counts, send_counts)\n\n # allocate buffers for receiving message\n output_tensor_list = []\n recv_counts = [tsize.numpy() for tsize in recv_counts]\n for idx, tsize in enumerate(recv_counts):\n output_tensor_list.append(\n torch.zeros(tuple(tsize[1:])).type(input_tensor_list[idx].dtype)\n )\n\n # send actual message itself.\n __alltoall_cpu(rank, world_size, output_tensor_list, input_tensor_list)\n\n # extract un-padded message from the output_tensor_list and return it\n return_vals = []\n for s, t in zip(recv_counts, output_tensor_list):\n if s[0] == 0:\n if retain_nones:\n return_vals.append(None)\n else:\n return_vals.append(t[0 : s[0]])\n return return_vals", "title": "" }, { "docid": "4c1c20c2afaefff0eb76aceaf25de2b5", "score": "0.4650855", "text": "def rebalanceSample(x_nparray, y_list, sample_size = 0):\n if sample_size <= 0:\n sample_size = len(x_nparray)\n \n num_classes = len(set(y_list))\n # based on the number of classes,\n # we will sample from each class accordingly\n # to obtain the specified sample size\n subsample_size = np.ceil(float(sample_size) / num_classes)\n \n return_x = []\n return_y = []\n \n for y in set(y_list):\n # loop through each distinct class\n indices = []\n for index, item in enumerate(y_list):\n # check if the current item matches our selected class\n if item == y:\n # if so, grab the index\n indices.append(index)\n # end loop through y_list\n # now sample\n indices_sample = np.random.choice(indices, size = subsample_size,\n replace = True)\n # now obtain the values based on the indices sampled\n y_subsample = [y_list[i] for i in indices_sample]\n x_subsample = [x_nparray[i] for i in indices_sample]\n # and append\n return_x = return_x + x_subsample\n return_y = return_y + y_subsample\n \n # if our return list is larger than the specified sample size\n # we obtained too many values due to rounding error\n # and will chop off a few values at random\n while len(return_y) > sample_size:\n random_drop_index = np.random.randint(low = 0, high = len(return_y))\n return_x.pop(random_drop_index)\n return_y.pop(random_drop_index)\n \n return np.asarray(return_x), np.asarray(return_y)", "title": "" }, { "docid": "3ff76dd8f65140765f26d697d3b8629f", "score": "0.46493953", "text": "def forward(self, input, num_step, params=None, training=False, backup_running_statistics=False):\n if params is not None:\n params = extract_top_level_dict(current_dict=params)\n (weight, bias) = params[\"weight\"], params[\"bias\"]\n #print(num_step, params['weight'])\n else:\n #print(num_step, \"no params\")\n weight, bias = self.weight, self.bias\n\n if self.use_per_step_bn_statistics:\n running_mean = self.running_mean[num_step]\n running_var = self.running_var[num_step]\n if params is None:\n if not self.args.enable_inner_loop_optimizable_bn_params:\n bias = self.bias[num_step]\n weight = self.weight[num_step]\n else:\n running_mean = None\n running_var = None\n\n\n if backup_running_statistics and self.use_per_step_bn_statistics:\n self.backup_running_mean.data = copy(self.running_mean.data)\n self.backup_running_var.data = copy(self.running_var.data)\n\n momentum = self.momentum\n\n output = F.batch_norm(input, running_mean, running_var, weight, bias,\n training=True, momentum=momentum, eps=self.eps)\n\n return output", "title": "" }, { "docid": "118230119d82e09c76236aaa3a4e8732", "score": "0.46477017", "text": "def test_batches(self):\n n_segs = 2\n n_frames = 20\n prefs = [[0., 1.], [1., 0.]]\n s1s = []\n s2s = []\n for _ in range(n_segs):\n s1 = 255 * np.random.normal(loc=1.0, size=(n_frames, 84, 84, 4))\n s2 = 255 * np.random.normal(loc=-1.0, size=(n_frames, 84, 84, 4))\n s1s.append(s1)\n s2s.append(s2)\n\n # Step 1: present all trajectories as one big batch\n feed_dict = {\n self.rpn.s1: s1s,\n self.rpn.s2: s2s,\n self.rpn.pref: prefs,\n self.rpn.training: False\n }\n rs1_batch, rs2_batch, pred_batch, loss_batch = self.sess.run(\n [self.rpn.rs1, self.rpn.rs2, self.rpn.pred, self.rpn.loss],\n feed_dict)\n\n # Step 2: present trajectories individually\n rs1_nobatch = []\n rs2_nobatch = []\n pred_nobatch = []\n loss_nobatch = 0\n for i in range(n_segs):\n feed_dict = {\n self.rpn.s1: [s1s[i]],\n self.rpn.s2: [s2s[i]],\n self.rpn.pref: [prefs[i]],\n self.rpn.training: False\n }\n [rs1], [rs2], [pred], loss = self.sess.run(\n [self.rpn.rs1, self.rpn.rs2, self.rpn.pred, self.rpn.loss],\n feed_dict)\n rs1_nobatch.append(rs1)\n rs2_nobatch.append(rs2)\n pred_nobatch.append(pred)\n loss_nobatch += loss\n\n # Compare\n assert_allclose(rs1_batch, rs1_nobatch, atol=1e-5)\n assert_allclose(rs2_batch, rs2_nobatch, atol=1e-5)\n assert_allclose(pred_batch, pred_nobatch, atol=1e-5)\n assert_approx_equal(loss_batch, loss_nobatch, significant=4)", "title": "" }, { "docid": "2c0f584f9a9ebc387d71ab9f1814f487", "score": "0.46471676", "text": "def run_reclustering(clustering_round=None,\n\n clip_len=None,\n sim_matrix=None,\n seq_names=None,\n crops_dir=None,\n relative_image_pathes=None,\n num_initial_batches=None,\n num_cliques_per_initial_batch=None,\n num_samples_per_clique=None,\n anchors=None,\n\n batch_size=None,\n num_batches_to_sample=None,\n max_cliques_per_batch=None,\n output_dir=None,\n seed=None):\n generator = ClipBatchGenerator(sim_matrix=sim_matrix,\n clip_len=clip_len,\n seq_names=seq_names,\n relative_image_pathes=relative_image_pathes,\n crops_dir=crops_dir,\n num_cliques_per_initial_batch=num_cliques_per_initial_batch,\n num_samples_per_clique=num_samples_per_clique,\n anchors=anchors,\n seed=seed)\n init_batches = generator.generate_batches(num_initial_batches)\n\n clips = list()\n flipvals = list()\n labels = list()\n print 'Sampling batches'\n for i, batch in tqdm(enumerate(init_batches)):\n # print \"Sampling batch {}\".format(i)\n _clips, _flipvals, _labels = ClipBatchSampler.parse_to_list(batch)\n clips.append(_clips)\n flipvals.append(flipvals)\n labels.append(labels)\n\n for batch in init_batches[:4]:\n for clique in batch:\n clique.visualize()\n plt.show()\n\n clips = np.vstack(clips)\n flipvals = np.hstack(flipvals)\n labels = np.hstack(labels)\n assert flipvals.shape == labels.shape, 'Corrupted arguments for batch loader'\n assert clips.shape[0] == len(flipvals) == len(labels)\n return clips, flipvals, labels", "title": "" }, { "docid": "37ec37be686377c536c9afa1a0180c37", "score": "0.46450138", "text": "def __redistribute_shuffle(self, snd_pr, send_amt, rcv_pr, snd_dtype):\n rank = self.comm.rank\n send_slice = [slice(None)] * self.numdims\n keep_slice = [slice(None)] * self.numdims\n if rank == snd_pr:\n if snd_pr < rcv_pr: # data passed to a higher rank (off the bottom)\n send_slice[self.split] = slice(\n self.lshape[self.split] - send_amt, self.lshape[self.split]\n )\n keep_slice[self.split] = slice(0, self.lshape[self.split] - send_amt)\n if snd_pr > rcv_pr: # data passed to a lower rank (off the top)\n send_slice[self.split] = slice(0, send_amt)\n keep_slice[self.split] = slice(send_amt, self.lshape[self.split])\n data = self.__array[send_slice].clone()\n self.comm.Send(data, dest=rcv_pr, tag=685)\n self.__array = self.__array[keep_slice]\n if rank == rcv_pr:\n shp = list(self.gshape)\n shp[self.split] = send_amt\n data = torch.zeros(shp, dtype=snd_dtype, device=self.device.torch_device)\n self.comm.Recv(data, source=snd_pr, tag=685)\n if snd_pr < rcv_pr: # data passed from a lower rank (append to top)\n self.__array = torch.cat((data, self.__array), dim=self.split)\n if snd_pr > rcv_pr: # data passed from a higher rank (append to bottom)\n self.__array = torch.cat((self.__array, data), dim=self.split)", "title": "" }, { "docid": "ab6d1ad4bafc59543bd3fa56e19f3ca4", "score": "0.4640262", "text": "def runBaseline(groupSize):\n iteration = {\"Adams\": 0, \"Cabot\": 0, \"Currier\": 0, \"Dunster\": 0, \"Eliot\": 0, \"Kirkland\": 0, \"Leverett\": 0, \"Lowell\": 0, \n \"Mather\": 0, \"Pfoho\": 0, \"Quincy\": 0, \"Winthrop\": 0}\n allHouses = set([\"Adams\", \"Cabot\", \"Currier\", \"Dunster\", \"Eliot\", \"Kirkland\", \"Leverett\", \"Lowell\", \n \"Mather\", \"Pfoho\", \"Quincy\", \"Winthrop\"])\n numTrades = 0\n\n empty_houses = set()\n # shuffling each list, RSD mechanism implemented by each house\n # RSD mechanism for ranking within each house, if that house is chosen\n for house in allHouses: \n try:\n random.shuffle(group_prefs[house][groupSize])\n except: \n empty_houses.add(house)\n\n while len(empty_houses) < 11: \n remainingHouses = allHouses.difference(empty_houses)\n\n # RSD\n chosen_house = random.sample(remainingHouses, 1)[0]\n\n # finding first house that can be matched with available groups\n prefs = group_prefs[chosen_house][groupSize][iteration[chosen_house]]\n house_number = house_mapping[chosen_house]\n curr_house_pref = prefs[2].index(house_number) # ranking of preference on currently assigned house\n index = 0\n target_house = \"\"\n\n # checking if target house is available before currently assigned house\n for i in range(curr_house_pref + 1):\n index += 1\n if num_mapping[prefs[2][i]] not in empty_houses:\n target_house = num_mapping[prefs[2][i]]\n break\n if curr_house_pref <= index: \n group_prefs[chosen_house][groupSize].pop(iteration[chosen_house])\n if len(group_prefs[chosen_house][groupSize]) - 1 <= iteration[chosen_house]: # checking if iterations is greater than the number that want to trade\n empty_houses.add(chosen_house) # already in visited set, will not be visited later\n if len(group_prefs[chosen_house][groupSize]) <= 0:\n group_prefs[chosen_house].pop(groupSize) \n continue\n\n available_groups = group_prefs[target_house][groupSize]\n ret_pref = []\n for index1, group in enumerate(available_groups):\n if group[2].index(house_number) < group[2].index(house_mapping[group[1]]):\n ret_pref.append(group[2].index(house_number))\n \n if len(ret_pref) > 0:\n # finding the group in other house that most prefers this house\n best_match = ret_pref.index(min(ret_pref))\n\n # perform swap here\n numTrades += groupSize\n group_prefs[chosen_house][groupSize].pop(iteration[chosen_house])\n if len(group_prefs[chosen_house][groupSize]) - 1 <= iteration[chosen_house]: # checking if iterations is greater than the number that want to trade\n empty_houses.add(chosen_house) # already in visited set, will not be visited later\n if len(group_prefs[chosen_house][groupSize]) <= 0:\n group_prefs[chosen_house].pop(groupSize)\n\n # same, remove for target house as well\n numTrades += groupSize\n if best_match < iteration[target_house]:\n iteration[target_house] -= 1\n group_prefs[target_house][groupSize].pop(best_match)\n if len(group_prefs[target_house][groupSize]) - 1 <= iteration[target_house]: # checking if iterations is greater than the number that want to trade\n empty_houses.add(target_house) # already in visited set, will not be visited later\n if len(group_prefs[target_house][groupSize]) <= 0:\n group_prefs[target_house].pop(groupSize)\n else: \n iteration[chosen_house] += 1\n if len(group_prefs[chosen_house][groupSize]) - 1 <= iteration[chosen_house]: # checking if iterations is greater than the number that want to trade\n empty_houses.add(chosen_house)\n if len(group_prefs[chosen_house][groupSize]) <= 0:\n group_prefs[chosen_house].pop(groupSize)\n return numTrades", "title": "" }, { "docid": "b6592921589fb5a7139649b83a62b649", "score": "0.46363708", "text": "def generate_batches_in_farm():\n\n TUNNEL_AISLES_DICT = {\n \"Tunnel3\": [\"A\", \"B\"],\n \"Tunnel4\": [\"E\"],\n \"Tunnel5\": [\"C\"],\n \"Tunnel6\": [\"D\"],\n }\n AISLES_COLUMNS_DICT = {\"A\": 32, \"B\": 32, \"C\": 24, \"D\": 16, \"E\": 12}\n NUM_SHELVES = 4\n all_shelf_data = []\n for tunnel, aisles in TUNNEL_AISLES_DICT.items():\n for aisle in aisles:\n for column in range(1, AISLES_COLUMNS_DICT[aisle] + 1):\n for shelf in range(1, NUM_SHELVES + 1):\n crop = random.choice(CROP_TYPES)\n n_trays = 8 if tunnel == \"Tunnel3\" else 10\n tray_size = 2.4 if tunnel == \"Tunnel3\" else 3.0\n # random time in the past week\n hours_past = timedelta(hours=random.randint(1, 24 * 7))\n transfer_time = datetime.now() - hours_past\n transfer_time = transfer_time.replace(\n minute=0, second=0, microsecond=0\n )\n # 8 days after transfer time\n harvest_time = transfer_time + timedelta(days=8)\n if not crop:\n continue\n shelf_data = {\n \"zone\": tunnel,\n \"aisle\": aisle,\n \"column\": column,\n \"shelf\": shelf,\n \"crop_type_name\": crop,\n \"number_of_trays\": n_trays,\n \"tray_size\": tray_size,\n \"event_time\": transfer_time.isoformat(),\n \"expected_harvest_time\": harvest_time.isoformat(),\n }\n all_shelf_data.append(shelf_data)\n # reverse the list so that it will go forward in time\n all_shelf_data.reverse()\n return all_shelf_data", "title": "" }, { "docid": "0fc89a52db86fca31c8c06cc44cebede", "score": "0.4634024", "text": "def next_batch(self):\n # Start enqueuing and other preparation at the beginning of an epoch.\n if self.epoch_done and self.shuffle:\n # np.random.shuffle(self.ids)\n np.random.shuffle(list(self.ids))\n samples, self.epoch_done = self.prefetcher.next_batch()\n im_list, im_names, labels, mirrored = zip(*samples)\n # t = time.time()\n # Transform the list into a numpy array with shape [N, ...]\n ims = np.stack(np.concatenate(im_list))\n\n # print '---stacking time {:.4f}s'.format(time.time() - t)\n im_names = np.concatenate(im_names)\n labels = np.concatenate(labels)\n mirrored = np.concatenate(mirrored)\n return ims, im_names, labels, mirrored, self.epoch_done", "title": "" }, { "docid": "862ba7fb20c7ea5cda413dce6423f10b", "score": "0.46323767", "text": "def forward(self):\n self.fake_B = self.netG(self.real_A) # for current task", "title": "" }, { "docid": "3ba28237268b1b3e472bd784f0ee6a13", "score": "0.46315625", "text": "def forward(self, state):\n x = state\n for i, l in enumerate(self.net):\n x = self.net[i](x)\n return x", "title": "" }, { "docid": "932d62cadcbd43e6a3fc4309efaec934", "score": "0.46313816", "text": "def run(rank, size):\n torch.manual_seed(1234)\n train_set, bsz = partition_dataset()\n model = Net()\n model = model\n# model = model.cuda(rank)\n optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)\n\n num_batches = ceil(len(train_set.dataset) / float(bsz))\n for epoch in range(10):\n epoch_loss = 0.0\n for data, target in train_set:\n data, target = Variable(data), Variable(target)\n# data, target = Variable(data.cuda(rank)), Variable(target.cuda(rank))\n optimizer.zero_grad()\n output = model(data)\n loss = F.nll_loss(output, target)\n epoch_loss += loss\n loss.backward()\n average_gradients(model)\n optimizer.step()\n print('Rank ',\n dist.get_rank(), ', epoch ', epoch, ': ',\n epoch_loss / num_batches)", "title": "" }, { "docid": "42ed9e049f1e1d27629e424b56ca237a", "score": "0.46305752", "text": "def max_pool_forward_naive(x, pool_param):\n out = None\n #############################################################################\n # TODO: Implement the max pooling forward pass #\n #############################################################################\n N,C,H,W=x.shape\n \n H_out=(H-pool_param['pool_height'])/pool_param['stride'] + 1\n W_out=(W-pool_param['pool_width'])/pool_param['stride'] + 1\n \n \n \n #depth remains same, so its not modified we use stride and \n #NO padding to reduce size of image\n out=np.zeros((N,C,H_out,W_out))\n poolheight=pool_param['pool_height']\n poolwidth=pool_param['pool_width']\n #print poolheight\n for n in np.arange(0,N,1):\n \tfor height in np.arange(0,H_out,1):\n \t\tfor width in np.arange(0,W_out,1):\n \t\t\t#defining window, take all depth, based on stride to select window\n \t\t\tstride=pool_param['stride']\n \t\t\twindow=x[n,:,(height*stride):(height*stride+poolheight),(width*stride):(width*stride+poolwidth)]\n \t\t\t#print window.shape # 3 x 2 x 2 in this example \n \t\t\t#now we need to take maximum,pool height=2, poolwidth=2 =3x4 , 3 is depth , max accross each depth\n \t\t\twindow=window.reshape(C,poolheight*poolwidth)\n \t\t\t#print np.max(window,axis=0) #3 values (3 depth)\n \t\t\t#print out[n].shape\n \t\t\tout[n,:,height,width]=np.max(window,axis=1)\n #print out\n ##############################################################\n # END OF YOUR CODE #\n #############################################################################\n cache = (x, pool_param)\n return out, cache", "title": "" }, { "docid": "ecc759845a891d1bbea3b7ba096b518e", "score": "0.46287286", "text": "def forward(self, x):\n # Forward pass of the shared network.\n share_x = self.backbone(x)\n \n \n # forward pass of each head\n mean_list, var_list = [], []\n for backbone in self.backbone_list:\n mean, var = self._forward_once(backbone, share_x, self.share_fc)\n mean_list.append(mean)\n var_list.append(var)\n \n #calculate ensemble mean and var\n mean_sum = mean_list[0].clone()\n var_sum = var_list[0].clone()\n for i in range(len(mean_list)-1):\n mean_sum += mean_list[i+1].clone()\n var_sum += var_list[i+1].clone()\n mean_ens = mean_sum/len(mean_list)\n var_ens = var_sum/(len(var_list)*len(var_list))\n \n return mean_list, var_list, mean_ens, var_ens", "title": "" }, { "docid": "83c0b3e65a7cda1d545b9d1194c489ad", "score": "0.46273983", "text": "def test_gpt_neox_assignment_load_balancing(\n work: dict[str, dict[str, float]],\n ranks: list[int],\n expected: dict[str, dict[str, float]],\n) -> None:\n topology = PipeModelDataParallelTopology(1, len(ranks), 1)\n for rank in ranks:\n assignment = GPTNeoXAssignment(\n work,\n local_rank=rank,\n topology=topology,\n data_parallel_group=None,\n model_parallel_group=None,\n )\n\n for layer, factors in expected.items():\n for factor in factors:\n inv_worker = assignment.inv_worker(layer, factor)\n assert inv_worker == factors[factor]\n\n model_parallel_peers = get_group_with_rank(\n rank,\n topology.get_axis_comm_lists('model'),\n )\n assert assignment.is_grad_worker(layer) == (\n rank in model_parallel_peers\n and inv_worker in model_parallel_peers\n )", "title": "" }, { "docid": "5a70951aa2aaf7802463ecbd6c83b256", "score": "0.46206227", "text": "def forward(self, inputs: Dict) -> Dict:\n\n # Encode target agent\n target_agent_feats = inputs['target_agent_representation']\n target_agent_embedding = self.leaky_relu(self.target_agent_emb(target_agent_feats))\n _, target_agent_enc = self.target_agent_enc(target_agent_embedding)\n target_agent_enc = target_agent_enc.squeeze(0) #B,32\n\n # Encode surrounding agents\n nbr_vehicle_feats = inputs['surrounding_agent_representation']['vehicles']\n nbr_vehicle_feats = torch.cat((nbr_vehicle_feats, torch.zeros_like(nbr_vehicle_feats[:, :, :, 0:1])), dim=-1)\n nbr_vehicle_masks = inputs['surrounding_agent_representation']['vehicle_masks']\n nbr_vehicle_embedding = self.leaky_relu(self.nbr_emb(nbr_vehicle_feats))\n nbr_vehicle_enc = self.variable_size_gru_encode(nbr_vehicle_embedding, nbr_vehicle_masks, self.nbr_enc, batched=False) #B,84,32\n _, masks_for_batching_veh = self.create_batched_input(nbr_vehicle_feats, nbr_vehicle_masks)\n nbr_ped_feats = inputs['surrounding_agent_representation']['pedestrians']\n nbr_ped_feats = torch.cat((nbr_ped_feats, torch.ones_like(nbr_ped_feats[:, :, :, 0:1])), dim=-1)\n nbr_ped_masks = inputs['surrounding_agent_representation']['pedestrian_masks']\n nbr_ped_embedding = self.leaky_relu(self.nbr_emb(nbr_ped_feats))\n nbr_ped_enc = self.variable_size_gru_encode(nbr_ped_embedding, nbr_ped_masks, self.nbr_enc, batched=True) \n _, masks_for_batching_ped = self.create_batched_input(nbr_ped_feats, nbr_ped_masks) \n\n\n interaction_feats = torch.cat((target_agent_enc.unsqueeze(1),nbr_vehicle_enc), dim=1)\n target_masks = torch.ones((target_agent_enc.shape[0], 1, 1), device=target_agent_enc.device).bool()\n interaction_masks = torch.cat((target_masks, masks_for_batching_veh.squeeze(-1)), dim=1).repeat(1,1,interaction_feats.shape[-1])\n interaction_feats_batched = torch.masked_select(interaction_feats, interaction_masks!=0) \n interaction_feats_batched = interaction_feats_batched.view(-1, interaction_feats.shape[2]) # BN,32\n \n # Encode lane nodes \n lanes_graphs = inputs['lanes_graphs'].to(target_agent_feats.device) \n lane_node_feats = inputs['map_representation']['lane_node_feats']\n lane_node_masks = inputs['map_representation']['lane_node_masks'] \n lane_node_feats = torch.cat((lane_node_feats, lane_node_masks[:,:,:,:1]), dim=-1)\n batch_lane_node_masks = (~(lane_node_masks[:,:,:,0]!=0)).any(-1)\n lane_node_feats_batched = lane_node_feats[batch_lane_node_masks] # BN,20,7\n lane_node_embedding = self.leaky_relu(self.node_emb(lane_node_feats)) \n lane_node_enc = self.variable_size_gru_encode(lane_node_embedding, lane_node_masks, self.node_gru_encoder, batched=True) \n if self.hg==\"hgt\":\n lanes_graphs.nodes['l'].data['inp'] = lane_node_enc\n lanes_graphs.nodes['p'].data['inp'] = nbr_ped_enc\n lanes_graphs.nodes['v'].data['inp'] = interaction_feats_batched\n lane_node_enc, interaction_feats_batched = self.hg_encoder(lanes_graphs, out_key=['l','v'])\n else:\n h_dict = {'l': lane_node_enc, 'p': nbr_ped_enc, 'v': interaction_feats_batched }\n lane_node_enc, interaction_feats_batched, att = self.hg_encoder(lanes_graphs, h_dict) \n lane_node_enc = self.scatter_batched_input(lane_node_enc, batch_lane_node_masks.unsqueeze(-1).unsqueeze(-1))\n interaction_feats = self.scatter_batched_input(interaction_feats_batched, interaction_masks[:,:,-1:].unsqueeze(-1)) # B, N, 32\n target_agent_enc = torch.cat((target_agent_enc, interaction_feats[:,0]), dim=-1) # B, 64\n\n \n # Lane node masks\n lane_node_masks = ~lane_node_masks[:, :, :, 0].bool()\n lane_node_masks = lane_node_masks.any(dim=2)\n lane_node_masks = ~lane_node_masks\n lane_node_masks = lane_node_masks.float()\n\n # Return encodings\n encodings = {'target_agent_encoding': target_agent_enc, #before interaction\n 'context_encoding': {'combined': lane_node_enc,\n 'combined_masks': lane_node_masks, \n 'map': None,\n 'vehicles': None,\n 'pedestrians': None,\n 'map_masks': None,\n 'vehicle_masks': None,\n 'pedestrian_masks': None\n },\n 'att' : att\n }\n\n # Pass on initial nodes and edge structure to aggregator if included in inputs\n if 'init_node' in inputs:\n encodings['init_node'] = inputs['init_node']\n encodings['node_seq_gt'] = inputs['node_seq_gt']\n encodings['s_next'] = inputs['map_representation']['s_next']\n encodings['edge_type'] = inputs['map_representation']['edge_type']\n\n return encodings", "title": "" }, { "docid": "b8ddd56ad6573510fad6555ac9559102", "score": "0.46193913", "text": "def populate_last_processor():\r\n global subnetwork_container_nodes, subnetwork_container_links, nodes, links, accounted_for_links\r\n for link_row in range(len(links)):\r\n if links[link_row, li_idx] not in accounted_for_links:\r\n subnetwork_container_links[-1, link_row] = links[link_row]\r\n for link_row in range(len(subnetwork_container_links[-1])):\r\n for node_row in range(len(nodes)):\r\n if subnetwork_container_links[-1, link_row, li_Mnode_u] == nodes[node_row, ni_idx]:\r\n subnetwork_container_nodes[-1, node_row] = nodes[node_row]\r\n if subnetwork_container_links[-1, link_row, li_Mnode_d] == nodes[node_row, ni_idx]:\r\n subnetwork_container_nodes[-1, node_row] = nodes[node_row]\r\n return", "title": "" }, { "docid": "9385c20e4ce62dea3ca62d1f14957c8c", "score": "0.4611239", "text": "def forward(self, features, rois):\n\n batch_size, num_channels, data_height, data_width = features.size()\n num_rois = rois.size()[0]\n outputs = Variable(torch.zeros(num_rois, num_channels, self.pooled_height, self.pooled_width)).cuda()\n\n for roi_idx, roi in enumerate(rois):\n batch_idx = int(roi[0])\n if batch_idx > batch_size - 1:\n raise ValueError(\"Batch index out of range!\")\n upleft_x, upleft_y, downright_x, downright_y = np.round(roi[1:].cpu().numpy() * self.spatial_scale).astype(int)\n roi_width = max(downright_x - upleft_x + 1, 1)\n roi_height = max(downright_y - upleft_y + 1, 1)\n bin_size_w = float(roi_width) / float(self.pooled_width)\n bin_size_h = float(roi_height) / float(self.pooled_height)\n\n for ph in range(self.pooled_height):\n hstart = int(np.floor(ph * bin_size_h))\n hend = int(np.ceil((ph + 1) * bin_size_h))\n hstart = min(data_height, max(0, hstart + upleft_y))\n hend = min(data_height, max(0, hend + upleft_y))\n\n for pw in range(self.pooled_width):\n wstart = int(np.floor(pw * bin_size_w))\n wend = int(np.ceil((pw + 1) * bin_size_w))\n wstart = min(data_width, max(0, wstart + upleft_x))\n wend = min(data_width, max(0, wend + upleft_x))\n is_error = (hend <= hstart) or (wend <= wstart)\n\n if is_error:\n outputs[roi_idx, :, ph, pw] = 0\n\n else:\n data = features[batch_idx]\n outputs[roi_idx, :, ph, pw] = torch.max(torch.max(data[:, hstart:hend, wstart:wend], dim=1)[0], dim=2)[0].view(-1)\n\n return outputs", "title": "" }, { "docid": "43c6385ee3201be72ed0feaa98cb86f4", "score": "0.46088144", "text": "def _iterate_over_batch(\n self,\n elem: ElementType,\n is_left: bool,\n ) -> Ack:\n\n upstream_ack = AckSubject()\n\n iterable = iter(elem)\n\n # empty iterable\n try:\n val = next(iterable)\n except StopIteration:\n return continue_ack\n\n next_state = RawControlledZipStates.ElementReceived(\n val=val,\n is_left=is_left,\n ack=upstream_ack,\n iter=iterable,\n )\n\n with self.lock:\n next_state.prev_raw_state = self.state\n next_state.prev_raw_termination_state = self.termination_state\n self.state = next_state\n\n raw_prev_termination_state = next_state.prev_raw_termination_state\n prev_raw_state = next_state.prev_raw_state\n prev_state = prev_raw_state.get_measured_state(raw_prev_termination_state)\n\n if isinstance(prev_state, ControlledZipStates.Stopped):\n return stop_ack\n\n elif isinstance(prev_state, ControlledZipStates.WaitOnLeftRight):\n return upstream_ack\n\n elif is_left and isinstance(prev_state, ControlledZipStates.WaitOnLeft):\n left_val = val\n left_iter = iterable\n left_in_ack = upstream_ack\n right_val = prev_state.right_val\n right_iter = prev_state.right_iter\n right_in_ack = prev_state.right_ack\n other_upstream_ack = prev_state.right_ack\n\n elif not is_left and isinstance(prev_state, ControlledZipStates.WaitOnRight):\n left_val = prev_state.left_val\n left_iter = prev_state.left_iter\n left_in_ack = prev_state.left_ack\n right_val = val\n right_iter = iterable\n right_in_ack = upstream_ack\n other_upstream_ack = prev_state.left_ack\n\n else:\n raise Exception('unknown state \"{}\", is_left {}'.format(prev_state, is_left))\n\n # keep elements to be sent in a buffer. Only when the incoming batch of elements is iterated over, the\n # elements in the buffer are sent.\n zipped_output_buffer = []\n\n request_new_elem_from_left = False\n request_new_elem_from_right = False\n\n while True:\n\n # iterate over next series of SelectComplete (if exists)\n # break loop when encountering SelectNext or end of list\n while True:\n # collect SelectComplete, because they don't appear on the right side\n if isinstance(left_val, SelectCompleted):\n zipped_output_buffer.append(left_val)\n\n else:\n break\n\n try:\n left_val = next(left_iter)\n except StopIteration:\n request_new_elem_from_left = True\n break\n\n # break loop when there are no more right elements\n if request_new_elem_from_right or request_new_elem_from_left:\n break\n\n # left send SelectNext\n stop_right = False\n while True:\n if isinstance(right_val, SelectNext):\n # add to buffer\n zipped_output_buffer.append(left_val)\n\n elif isinstance(right_val, SelectCompleted):\n stop_right = True\n\n # always read next right value\n try:\n right_val = next(right_iter)\n except StopIteration:\n request_new_elem_from_right = True\n break\n\n if stop_right:\n break\n\n try:\n left_val = next(left_iter)\n except StopIteration:\n request_new_elem_from_left = True\n break\n\n # only send elements downstream, if there are any to be sent\n if zipped_output_buffer:\n zip_out_ack = self.observer.on_next(zipped_output_buffer)\n else:\n zip_out_ack = continue_ack\n\n # all elements in the left and right iterable are send downstream\n if request_new_elem_from_left and request_new_elem_from_right:\n next_state = RawControlledZipStates.WaitOnLeftRight()\n\n elif request_new_elem_from_left:\n next_state = RawControlledZipStates.WaitOnLeft(\n right_val=right_val,\n right_iter=right_iter,\n right_ack=right_in_ack,\n )\n\n elif request_new_elem_from_right:\n next_state = RawControlledZipStates.WaitOnRight(\n left_val=left_val,\n left_iter=left_iter,\n left_ack=left_in_ack,\n )\n\n else:\n raise Exception('at least one side should be back-pressured')\n\n with self.lock:\n # get termination state\n raw_prev_termination_state = self.termination_state\n\n # set next state\n self.state = next_state\n\n prev_termination_state = raw_prev_termination_state.get_measured_state()\n\n def stop_active_acks():\n other_upstream_ack.on_next(stop_ack)\n\n # stop back-pressuring both sources, because there is no need to request elements\n # from completed source\n if isinstance(prev_termination_state, TerminationStates.LeftCompletedState) \\\n and request_new_elem_from_left:\n\n self._signal_on_complete_or_on_error(state=next_state)\n stop_active_acks()\n return stop_ack\n\n # stop back-pressuring both sources, because there is no need to request elements\n # from completed source\n elif isinstance(prev_termination_state, TerminationStates.RightCompletedState) \\\n and request_new_elem_from_right:\n\n self._signal_on_complete_or_on_error(state=next_state)\n stop_active_acks()\n return stop_ack\n\n # in error state, stop back-pressuring both sources\n elif isinstance(prev_termination_state, TerminationStates.ErrorState):\n\n self._signal_on_complete_or_on_error(state=next_state, ex=prev_termination_state.ex)\n stop_active_acks()\n return stop_ack\n\n # finish connecting ack only if not in Stopped or Error state\n else:\n\n if request_new_elem_from_left and request_new_elem_from_right:\n\n # directly return ack depending on whether left or right called `iterate_over_batch`\n if is_left:\n zip_out_ack.subscribe(right_in_ack)\n\n else:\n zip_out_ack.subscribe(left_in_ack)\n\n return zip_out_ack\n\n # all elements in the left buffer are send to the observer, back-pressure only left\n elif request_new_elem_from_left:\n\n if is_left:\n return zip_out_ack\n\n else:\n zip_out_ack.subscribe(left_in_ack)\n return right_in_ack\n\n # all elements in the left buffer are send to the observer, back-pressure only right\n elif request_new_elem_from_right:\n\n if is_left:\n zip_out_ack.subscribe(right_in_ack)\n return left_in_ack\n\n else:\n return zip_out_ack\n\n else:\n raise Exception('illegal case')", "title": "" }, { "docid": "002e608549eb3fc819df5cbc22f609e9", "score": "0.4607364", "text": "def trainNetPool(net_pool):\n for net in net_pool:\n net.iniciarEntrenamiento()", "title": "" }, { "docid": "5457b73c4b5ba15051b3857192523aac", "score": "0.45991924", "text": "def Backward(self, input, switch_map):\n assert input.shape == self.out_shape\n\n input = self.activation(input) \n\n # reshape to fit the API of uppool\n b, c, h, w = input.shape\n input.reshape( [b*c, h, w] ) \n \n up_pooled_out = max_uppool_2d(input, switch_map, poolsize = self.poolsize)\n\n # reshape to fit the API of Backwork\n up_h, up_w = up_pooled_out.shape[-2:]\n up_pooled_out = up_pooled_out.reshape( [b, c, up_h, up_w ] )\n\n if up_pooled_out.dtype == theano.config.floatX:\n conv_out = self.deconv(up_pooled_out)\n else:\n conv_out = self.deconv(up_pooled_out.astype(theano.config.floatX))\n \n # upsample, to mimic the stride of deconv\n b, c, x, y = deconv_out.shape\n dx, dy = self.conv_stride\n\n upsampled_out = np.zeros([b,c, dx * x, dy * y ) \n upsampled_out[:,:,::dx,::dy] = deconv_out\n\n output = self.resize( upsampled_out, self.in_shape )\n return np.asarray(output, dtype = theano.config.floatX)", "title": "" }, { "docid": "a4854ed5d15f8ea2fb4f7cdfd8a08ccb", "score": "0.45965374", "text": "def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0, Y_orig = None):\n \n # np.random.seed(seed) # To make your \"random\" minibatches the same as ours\n m = X.shape[1] # number of training examples\n mini_batches = []\n \n # Step 1: Shuffle (X, Y)\n permutation = list(np.random.permutation(m))\n shuffled_X = X[:, permutation]\n shuffled_Y = Y[:, permutation].reshape((Y.shape[0],m))\n if Y_orig.any():\n shuffled_Y_orig = Y_orig[:, permutation].reshape((Y_orig.shape[0],m))\n\n # Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.\n if mini_batch_size > m or mini_batch_size == 0:\n mini_batch_size = m\n num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning\n for k in range(0, num_complete_minibatches):\n\n mini_batch_X = shuffled_X[:, k * mini_batch_size : (k+1) * mini_batch_size]\n mini_batch_Y = shuffled_Y[:, k * mini_batch_size : (k+1) * mini_batch_size]\n if Y_orig.any():\n mini_batch_Y_orig = shuffled_Y_orig[:, k * mini_batch_size : (k+1) * mini_batch_size]\n else:\n mini_batch_Y_orig = mini_batch_Y\n\n mini_batch = (mini_batch_X, mini_batch_Y, mini_batch_Y_orig)\n mini_batches.append(mini_batch)\n \n # Handling the end case (last mini-batch < mini_batch_size)\n if m % mini_batch_size != 0:\n\n mini_batch_X = shuffled_X[:, mini_batch_size * num_complete_minibatches : m]\n mini_batch_Y = shuffled_Y[:, mini_batch_size * num_complete_minibatches : m]\n if Y_orig.any():\n mini_batch_Y_orig = shuffled_Y_orig[:, mini_batch_size * num_complete_minibatches : m]\n else:\n mini_batch_Y_orig = mini_batch_Y\n\n mini_batch = (mini_batch_X, mini_batch_Y, mini_batch_Y_orig)\n mini_batches.append(mini_batch)\n \n return mini_batches", "title": "" }, { "docid": "e672d9104bd2da6f0d871a60bda31be3", "score": "0.45951387", "text": "def forward1(self,\n seq_repr: torch.Tensor,\n adj: torch.LongTensor) -> List[torch.Tensor]:\n # gcn_out = seq_repr\n out = [seq_repr]\n B, L, E = seq_repr.size()\n D = self.gcn_dim\n for layer_idx in range(self.num_blocks): # each GCN layer\n gcn_inp = out[-1]\n act_sum = torch.zeros((B, L, D), dtype=seq_repr.dtype,\n layout=seq_repr.layout, device=seq_repr.device)\n for et_idx in range(self.num_all_edge_types): # each edge type\n in_arc = self.in_proj[layer_idx][et_idx](gcn_inp)\n # (B, L, L) * (B, L, D) --> (B, L, D)\n in_arc = torch.matmul(adj[:, et_idx, :, :].to(torch.float), in_arc)\n if self.use_drop:\n in_arc = self.dropout(in_arc)\n if self.gate:\n in_arc_gate = self.in_gate_proj[layer_idx][et_idx](gcn_inp)\n # (B, L, L) * (B, L, D) --> (B, L, D)\n in_arc_gate = torch.matmul(adj[:, et_idx, :, :].to(torch.float), in_arc_gate)\n in_arc_gate = self.sigmoid(in_arc_gate)\n in_arc = in_arc * in_arc_gate\n\n out_arc = self.out_proj[layer_idx][et_idx](gcn_inp)\n out_arc = torch.matmul(adj[:, et_idx, :, :].permute(0, 2, 1).to(torch.float), out_arc)\n if self.use_drop:\n out_arc = self.dropout(out_arc)\n if self.gate:\n out_arc_gate = self.out_gate_proj[layer_idx][et_idx](gcn_inp)\n out_arc_gate = torch.matmul(adj[:, et_idx, :, :].permute(0, 2, 1).to(torch.float), out_arc_gate)\n out_arc_gate = self.sigmoid(out_arc_gate)\n out_arc = out_arc * out_arc_gate\n act_sum = act_sum + in_arc + out_arc\n if self.residual:\n act_sum = act_sum + gcn_inp\n gcn_out = self.activation(act_sum)\n\n out.append(gcn_out)\n\n return out[-1]", "title": "" }, { "docid": "1521005c77cac79b2924f54954c9f5bb", "score": "0.4588836", "text": "def prepare_data(args, graph, target_type, train_target_nodes, valid_target_nodes, pool):\n jobs = []\n for batch_id in np.arange(args.n_batch):\n p = pool.apply_async(\n node_classification_sample,\n args=(args, target_type, randint(), train_target_nodes, {1: True}),\n )\n jobs.append(p)\n p = pool.apply_async(\n node_classification_sample,\n args=(args, target_type, randint(), valid_target_nodes, {1: True}),\n )\n jobs.append(p)\n return jobs", "title": "" }, { "docid": "103764f32e2df99831c29f3eb44eef06", "score": "0.45885998", "text": "def forward(self, x):\n\n pool_2_out = self.pool_2(x)\n pool_3_out = self.pool_3(pool_2_out)\n conv_4_out = self.conv_4(pool_3_out)\n conv_5_out = self.conv_5(conv_4_out)\n\n pool_2_out = self.skip_pool_2(pool_2_out)\n pool_3_out = self.skip_pool_3(pool_3_out)\n conv_4_out = self.skip_conv_4(conv_4_out)\n conv_5_out = self.skip_conv_5(conv_5_out)\n\n fused = torch.cat((pool_2_out, pool_3_out, conv_4_out, conv_5_out))\n fused = self.fused_features(fused)\n\n return fused", "title": "" } ]
9d2f56353cb6e4881608d47089c161a0
Get handlers with message getters.
[ { "docid": "d006d47f0b167f698b4af69021ed3490", "score": "0.76746863", "text": "def get_message_handlers(self) -> List[Tuple[Callable[[Any], None], Callable]]:\n return [\n (self.handle_envelope, self.inbox.async_get),\n ]", "title": "" } ]
[ { "docid": "3d2089feac37be2e44e8248b9248cf1d", "score": "0.72188824", "text": "def get_handlers(self):\n return self.handlers", "title": "" }, { "docid": "e6fca13c850fa160f0c2b816c9e7625c", "score": "0.6957104", "text": "def handlers(self):\n return self._handlers", "title": "" }, { "docid": "92570f96fd9eaaf8491b0423fd85b6e8", "score": "0.6850771", "text": "def handlers(self):\n return self._handlers.values()", "title": "" }, { "docid": "4bc6f702664fe51524e26ad136fec234", "score": "0.67829186", "text": "def get_handlers(self):\n if self.safe:\n return self.get_safe_handlers()\n else:\n return self.log_handlers", "title": "" }, { "docid": "5d741a32d6966fc280958fbc6ef402db", "score": "0.67015237", "text": "def _get_handlers(self):\n routes = self._get_routes_config()", "title": "" }, { "docid": "d6a94992a6c2920e558637bdb9e2e20c", "score": "0.6670804", "text": "def get_handlers(self):\n return (self._get_handler(self.STDOUT),\n self._get_handler(self.STDERR),\n self._handlers_are_files())", "title": "" }, { "docid": "668c74d12309435f7809565e5f98b716", "score": "0.6427139", "text": "def get_message_handler(self, msg):\n return self.message_handler_map[msg.MessageType][1]", "title": "" }, { "docid": "f620913b5e85ba2b39ce19b014c61245", "score": "0.633441", "text": "def _get_api_handlers(self):\n cls = type(self)\n handlers = dict(cls.api_handlers_defaults)\n handlers.update(cls.api_handlers)\n return handlers", "title": "" }, { "docid": "a6777a5a5ba8aad6feaa9febf09716c2", "score": "0.6328172", "text": "def get_service_handlers() -> Dict[str, AbstractServiceHandler]:\n\t_ensure_service_handlers()\n\treturn _services", "title": "" }, { "docid": "9c0f4b034ff630155a32045de8e9c5f8", "score": "0.61322814", "text": "def getHandlers(self):\n return [\n ('/robotlog/ws', RobotlogEchoSocket),\n ('/robotlog/(.*)', StaticFileHandler)\n ]", "title": "" }, { "docid": "76f76150406f81cab995f58f555a86c1", "score": "0.60938364", "text": "def get_handlers(self, name):\n if name in self.event_hook:\n return self.event_hook[name]\n filtered = filter(lambda reg_name: self.match(name, reg_name), self.event_hook)\n name = next(filtered, None)\n return self.event_hook[name] if name is not None else list()", "title": "" }, { "docid": "f2de9f96d5f8bcf751115bfc0d0076ca", "score": "0.6090203", "text": "def messagesHandler(self):\n pass", "title": "" }, { "docid": "73302ffbd4af4cbd6b6b0f1ff04fcd13", "score": "0.6064352", "text": "def getErrorHandlers(self):\n return self.errorHandlers", "title": "" }, { "docid": "912462c3cd1978f53970052b4aebac25", "score": "0.6025434", "text": "def get_registry():\n return config[\"HANDLERS\"]", "title": "" }, { "docid": "e05547f20ef13e0e2a52f3ccab1006b8", "score": "0.6002314", "text": "def get_all_handlers():\n for handler in chain(features.get_link_types().values(),\n features.get_embed_types().values()):\n try:\n model = handler.get_model()\n except NotImplementedError:\n continue\n\n yield model, handler", "title": "" }, { "docid": "0a53e62f29016e718623e0a6a87b3144", "score": "0.59798974", "text": "def create_message_handler():\n\n def factory(modes=None):\n return _MessageHandler()\n\n yield factory", "title": "" }, { "docid": "db846acbad1b13bf5f940c048fb01d76", "score": "0.5969302", "text": "def _get_handler(self):\n raise NotImplementedError()", "title": "" }, { "docid": "5a7df370682a9253f015aace012ff4bd", "score": "0.5929931", "text": "def get_handlers():\n handlers = list()\n\n # Principal\n handlers.append((r'/', Index))\n\n # Login\n handlers.append((r'/inicio', LoginController))\n handlers.append((r'/logout', LogoutController))\n\n # Usuarios\n handlers.append((r'/manual', ManualController))\n handlers.extend(get_routes(BitacoraController))\n handlers.extend(get_routes(RolController))\n handlers.extend(get_routes(UsuarioController))\n handlers.extend(get_routes(PersonaController))\n\n # Negocio\n handlers.extend(get_routes(ClienteController))\n handlers.extend(get_routes(EmpresaController))\n handlers.extend(get_routes(IngresoController))\n handlers.extend(get_routes(InventarioController))\n handlers.extend(get_routes(ProductoController))\n handlers.extend(get_routes(SalidaController))\n handlers.extend(get_routes(SucursalController))\n handlers.extend(get_routes(TraspasoController))\n\n\n\n handlers.append((r'/resources/(.*)', StaticFileHandler, {'path': os.path.join(os.path.dirname(__file__), 'common', 'resources')}))\n\n # Recursos por submodulo\n handlers.append((r'/common/(.*)', StaticFileHandler, {'path': os.path.join(os.path.dirname(__file__), 'common', 'assets')}))\n handlers.append((r'/main/(.*)', StaticFileHandler, {'path': os.path.join(os.path.dirname(__file__), 'main', 'assets')}))\n handlers.append((r'/usuarios/(.*)', StaticFileHandler, {'path': os.path.join(os.path.dirname(__file__), 'sistema', 'usuarios')}))\n handlers.append((r'/negocio/(.*)', StaticFileHandler, {'path': os.path.join(os.path.dirname(__file__), 'sistema', 'negocio')}))\n\n return handlers", "title": "" }, { "docid": "de60505c7920b23872d93754fbaa13a4", "score": "0.5876484", "text": "def get_all_agent_handlers(self) -> List[Tuple[str, Dict]]:\n return [(doc.id, doc.to_dict()) for doc in self.db.collection(AGENT_HANDLERS).get()]", "title": "" }, { "docid": "22740f395d20d9e2f4716eaa2111dec3", "score": "0.57956594", "text": "def get_handler(self, message, **kwargs):\n handler = getattr(self, self.method_mapping[message.channel.name])\n if self.channel_session_user:\n return channel_session_user(handler)\n elif self.channel_session:\n return channel_session(handler)\n else:\n return handler", "title": "" }, { "docid": "ddfafe63729f041ae5aeaba8f4a1f5a6", "score": "0.57749367", "text": "def get_handler():\n config = get_config(parser=ConfigParser)\n base_class = get_base_class(config)\n\n class MessageHandler(base_class):\n def __init__(self, message_class=None):\n super(MessageHandler, self).__init__(message_class=EmailMessage)\n self.config = config\n\n return MessageHandler", "title": "" }, { "docid": "490217efc7f2780a2beccd759c5af85e", "score": "0.5735747", "text": "def enabled_handlers(self):\n return getattr(\n self.agent,\n constants.CONST_HANDLERS,\n constants.DEFAULT_LOG_HANDLERS,\n )", "title": "" }, { "docid": "f489013817da8a7896b314fab5dab400", "score": "0.5673094", "text": "def handlers(self):\n tornado_routes = []\n for route in self.routes_tuple:\n if not route[1] in self.routes:\n self.routes[route[1]] = {}\n if not route[0] in self.routes[route[1]]:\n self.routes[route[1]][route[0]] = {}\n if len(route) > 2:\n result = route[2]\n else:\n result = {'body': '', 'status': 200}\n\n self.routes[route[1]][route[0]] = result\n\n\n return [(route, MockedRoutesHandler, dict(handler_methods=methods)) for route, methods in self.routes.items()]", "title": "" }, { "docid": "7848ec5491630da3c7a4be1fd79a38de", "score": "0.5669896", "text": "def _dispatch_handlers(self, msg):\n\t\tdef normalize(handler):\n\t\t\t# handler might be a Handler, BoundHandler or \"sync\"\n\t\t\treturn handler.handler if isinstance(handler, BoundHandler) else handler\n\t\t# build dependency graph\n\t\tgraph = {handler: set() for handler in self.message_handlers}\n\t\tgraph['sync'] = set()\n\t\tfor handler in self.message_handlers:\n\t\t\tfor other in map(normalize, handler.after):\n\t\t\t\tif other in graph:\n\t\t\t\t\tgraph[handler].add(other)\n\t\t\tfor other in map(normalize, handler.before):\n\t\t\t\tif other in graph:\n\t\t\t\t\tgraph[other].add(handler)\n\t\t# check for cycles\n\t\tdef check_cycles(handler, chain=()):\n\t\t\tif handler in chain:\n\t\t\t\tchain_text = \" -> \".join(map(str, chain + (handler,)))\n\t\t\t\traise ValueError(\"Dependency cycle in handlers: {}\".format(chain_text))\n\t\t\tchain += handler,\n\t\t\tfor dep in graph[handler]:\n\t\t\t\tcheck_cycles(dep, chain)\n\t\tfor handler in graph:\n\t\t\tcheck_cycles(handler)\n\t\t# set up the greenlets\n\t\tgreenlets = {}\n\t\tdef wait_and_handle(handler):\n\t\t\tfor dep in graph[handler]:\n\t\t\t\tgreenlets[dep].join()\n\t\t\treturn handler.handle(self, msg)\n\t\tdef wait_for_sync():\n\t\t\tfor dep in graph['sync']:\n\t\t\t\tgreenlets[dep].join()\n\t\tfor handler in self.message_handlers:\n\t\t\tgreenlets[handler] = self._group.spawn(wait_and_handle, handler)\n\t\tgreenlets['sync'] = self._group.spawn(wait_for_sync)\n\t\t# wait for sync to finish\n\t\tgreenlets['sync'].get()", "title": "" }, { "docid": "25a01a066aa16592ec9a154c9f54292a", "score": "0.5653878", "text": "def get_poll_handlers() -> Dict[str, AbstractPollHandler]:\n\t_ensure_poll_handlers()\n\treturn _poll_sites", "title": "" }, { "docid": "640a4658be33b678ddb09c3d07113879", "score": "0.5642684", "text": "def tag_handlers(self):\n return self._handlers_stack", "title": "" }, { "docid": "3d260390f684343302f0046f426ba440", "score": "0.5632615", "text": "def route_message(self, message):\r\n handler = None\r\n for (regex, kallable) in self._routes:\r\n url_check = regex.match(message.path)\r\n\r\n if url_check:\r\n ### `None` will fail, so we have to use at least an empty list\r\n ### We should try to use named arguments first, and if they're\r\n ### not present fall back to positional arguments\r\n url_args = url_check.groupdict() or url_check.groups() or []\r\n\r\n if inspect.isclass(kallable):\r\n ### Handler classes must be instantiated\r\n handler = kallable(self, message)\r\n ### Attach url args to handler\r\n handler._url_args = url_args\r\n return handler\r\n else:\r\n ### Can't instantiate a function\r\n if isinstance(url_args, dict):\r\n ### if the value was optional and not included, filter\r\n ### it out so the functions default takes priority\r\n kwargs = dict((k, v) for k, v in url_args.items() if v)\r\n\r\n handler = lambda: kallable(self, message, **kwargs)\r\n else:\r\n handler = lambda: kallable(self, message, *url_args)\r\n return handler\r\n\r\n if handler is None:\r\n handler = self.base_handler(self, message)\r\n\r\n return handler", "title": "" }, { "docid": "01b8d00fc9b398d1a8f66d821855928d", "score": "0.5623177", "text": "def _get_handler(self):\n return self.conf(\"handler\")", "title": "" }, { "docid": "9871be1ef38ea4626536477bd5734da7", "score": "0.5611291", "text": "def _mapping(self):\n return [('message.received', self.on_new_message)]", "title": "" }, { "docid": "078483e9270b8e7e51927be0033f9af0", "score": "0.56107545", "text": "def get_handler(self, name):\n return self._registry.get(name)", "title": "" }, { "docid": "1eb9735b7552e8529efd0124640cb8fe", "score": "0.5565691", "text": "def get_handlers(self, app):\n return [(route, decorate_hub_redirect(handler))\n for route, handler\n in self.authenticator.get_handlers(app)]", "title": "" }, { "docid": "ef941f22eaae730beeabda7b333fc5a5", "score": "0.5565592", "text": "def messages(self):\n return [self]", "title": "" }, { "docid": "873f93775b10dceaa7ba758cdd8b1c44", "score": "0.5542098", "text": "def getMessages(self):\n pass", "title": "" }, { "docid": "f076dc94b41b81f75d5c08d36ff23fe6", "score": "0.553816", "text": "def handler_classes(self) -> Sequence[Type[\"RPCHandler\"]]:\n handler_classes = [\n import_string(handler_cls) for handler_cls in settings.MODERNRPC_HANDLERS\n ]\n\n if self.protocol == Protocol.ALL:\n return handler_classes\n\n return [\n cls\n for cls in handler_classes\n if cls.protocol in ensure_sequence(self.protocol)\n ]", "title": "" }, { "docid": "503be307422cc8bba314d22b28f46213", "score": "0.55162674", "text": "def get_handler(cls):\n handler = cls.trigger_inbound\n if cls.channel_session_user:\n return channel_session_user(handler)\n elif cls.channel_session:\n return channel_session(handler)\n else:\n return handler", "title": "" }, { "docid": "d80f91dfc5ce806f1fd86f5532b2d282", "score": "0.54884243", "text": "def GetMessages():\n return apis.GetMessagesModule('looker', API_VERSION_DEFAULT)", "title": "" }, { "docid": "6947565939809eedc97754a425a37fbb", "score": "0.5440942", "text": "def get_updates(self, update_handler):\n pass", "title": "" }, { "docid": "0e74c96354fd8a020ab7613eb1eb2e9e", "score": "0.5412257", "text": "def _handle_messages(self, message):\n msg_type = message.get(MSG_FIELD.TYPE, None)\n if msg_type in Network.EVENTS:\n return Network.EVENTS[msg_type](message, self._connection_handler)", "title": "" }, { "docid": "9cc83eecf581286b77d4e3f41cd06664", "score": "0.5404742", "text": "def get_handler(name):\n return _default_registry.get_handler(name)", "title": "" }, { "docid": "ae33461add396666f97e2ba1ff66512c", "score": "0.53812754", "text": "def get_handler_by_msg_type(self, msg_type):\n\n # self.log.debug(\"get_handler_by_msg_type: %s\", msg_type)\n\n handler = self.MSG_HANDLER__INVALID\n\n handler_name = \"_handle__%s\" % msg_type\n\n # self.log.debug(\"handler_name: %s\", handler_name)\n\n try:\n\n assert hasattr(self, handler_name)\n handler = getattr(self, handler_name)\n\n except AssertionError:\n\n self.log.error(\"UNKNOWN handler method %s\", handler_name)\n\n finally:\n # self.log.debug(\"handler: %s\", handler)\n return handler", "title": "" }, { "docid": "ece461b3c74488c5e999c8658d840f21", "score": "0.53518987", "text": "def create_handlers(debugging=False):\n handlers = [\n StateHandler(always, watchers),\n MailHandler(is_moved_to_ready, watchers),\n MailHandler(is_marked_blocked, everyone),\n MailHandler(is_marked_deployed, active_members_with_creator),\n FeedHandler(u'AgileZen: all', 'all', always),\n WebhookHandler(is_new) ]\n def _create_feedhandler(project_id):\n \"\"\"Bind project_id within the predicate lambda below.\"\"\"\n return FeedHandler(u'AgileZen #' + str(project_id),\n 'project-' + str(project_id),\n lambda msg: msg.project_id == project_id)\n for project_id in api.get_active_project_ids():\n handlers.append(_create_feedhandler(project_id))\n if debugging:\n handlers.append(PrintHandler()) \n return handlers", "title": "" }, { "docid": "8a58e32b1e38c431e50b6e49dbf321cc", "score": "0.5350783", "text": "def handler(self, uri, method):\n for uri_pattern, handler_class in self._uri_handlers:\n m = uri_pattern.match(uri)\n if m:\n uri_parms = m.groups()\n return handler_class, uri_parms\n raise InvalidResourceError(method, uri)", "title": "" }, { "docid": "7e07d3ee08b7fd9e01f35c89f2835292", "score": "0.5345786", "text": "def __getHandlerObj(self,handler_name):\n try:\n return self.__handler_instances[handler_name]\n except KeyError:\n raise HandlerException(\"Handler --%s-- not found\"%handler_name)", "title": "" }, { "docid": "3b2369409869325be96dd1835b7135d9", "score": "0.53348523", "text": "def get_link_handlers() -> Dict[str, AbstractInfoHandler]:\n\t_ensure_link_handlers()\n\treturn _link_sites", "title": "" }, { "docid": "8325086f7140bb95ca05c4b22faf32bf", "score": "0.5311519", "text": "def get_expected_handlers(self):\n return self._EXPECTED_HANDLERS", "title": "" }, { "docid": "a9f7f98e3a36af76cafa5f6e76980694", "score": "0.5277582", "text": "def get_methods(self):\n return []", "title": "" }, { "docid": "8cf8b3bd05aaea47f077ee9923996bf2", "score": "0.5257029", "text": "def _store_handlers(*handlers):\n global _handlers\n _handlers = handlers\n return DEFAULT", "title": "" }, { "docid": "cc7f04dc4942043b3e06a9456460f847", "score": "0.5247438", "text": "def getHandler(database):\n return Topics(database, 'topics')", "title": "" }, { "docid": "61b85525526cdadba186342c38f764f3", "score": "0.52436936", "text": "def handler_func(self):\n return self._handler_func", "title": "" }, { "docid": "61d80d0f6dcc6dd4e1708d9f084ad647", "score": "0.5226427", "text": "def field_handler(self):\n return self._field_handler", "title": "" }, { "docid": "1dfc55329ab171496424e033de533b69", "score": "0.5224737", "text": "def get_event_handlers(event):\n event_handlers = copy.copy(htk_setting('HTK_ALEXA_SKILL_EVENT_HANDLERS'))\n\n # add in additional event group handlers\n extra_event_handlers = htk_setting('HTK_ALEXA_SKILL_EVENT_HANDLERS_EXTRAS')\n for event_group, handlers in extra_event_handlers.items():\n event_handlers.update(handlers)\n\n return event_handlers", "title": "" }, { "docid": "6acc3b4d363c5969a53f24b2f5f09fb2", "score": "0.5221843", "text": "def get_handler(self, uid):\n for handler in self._handlers:\n if handler.uid == uid:\n return handler", "title": "" }, { "docid": "4faaa6058e3d3694bd7d90915696137b", "score": "0.52192414", "text": "def strHandlers(self):\n result = [\"handlers:\"]\n for event_name, handlers in self._handlers.items():\n result.append(\" %s : %s\" % (event_name, handlers.values()))\n return \"\\n\".join(result)", "title": "" }, { "docid": "7d39f7396997b894d73160d45da2c3b2", "score": "0.52063656", "text": "def _get_log_handlers():\n handlers = [\n logbook.StreamHandler(sys.stdout, level=logbook.INFO, bubble=True),\n ]\n if config.LOG_FILE_PATH:\n handlers.append(logbook.RotatingFileHandler(\n config.LOG_FILE_PATH, level=logbook.DEBUG, backup_count=1, max_size=5 * 1024 * 1024, bubble=True))\n return handlers", "title": "" }, { "docid": "722e77dba47d1de4e975cad37ebb81fe", "score": "0.5203304", "text": "def GetHandler(cmd, h_list=None):\n if h_list is None:\n h_list = HANDLERS\n\n repeat, cmd = SplitRepeat(cmd)\n if not cmd:\n return None\n\n for handler in h_list:\n if cmd[0] in handler.start_chars:\n return handler", "title": "" }, { "docid": "6ec9495a20965e693dc81eb39e542396", "score": "0.5198653", "text": "def method_handlers_generic_handler(service, method_handlers):\n from grpc import _utilities # pylint: disable=cyclic-import\n\n return _utilities.DictionaryGenericHandler(service, method_handlers)", "title": "" }, { "docid": "8237992cce1c8b05b4e0b0a757d3788c", "score": "0.5187035", "text": "def get_available_handlers(self):\n self.available_handlers = []\n path = os.path.join(self.dark_path, 'handlers')\n for d in os.listdir(path):\n dir = os.path.join(path, d)\n if os.path.isdir(dir):\n for d2 in os.listdir(dir):\n dir2 = os.path.join(dir, d2)\n if os.path.isdir(dir2):\n if os.path.isfile(os.path.join(dir2,'handler.py')):\n if not os.path.isfile(os.path.join(dir2, '__init__.py')):\n with os.path.join(dir2, '__init__.py', 'a') as outfile:\n pass\n self.available_handlers.append('%s.%s' % (d, d2))\n return self.available_handlers", "title": "" }, { "docid": "d234f45f1d8d2772004c6c1c43c392d5", "score": "0.5185479", "text": "def _process_received_message(self, message: communication.MESSAGE, *args, **kwargs):\n for handler in self._handlers.get('receive', []):\n handler(message)", "title": "" }, { "docid": "eb02e8ece095378d581ce571e870a034", "score": "0.51780516", "text": "def loadHandler( self ) : \n \n if not self.handler : \n\n # initial list of modules \n ml0 = set( sys.modules ) \n \n # new list of modules \n ml1 = set( sys.modules ) \n\n self.handler = getattr( __import__( self.module , fromlist=[self.func] ) , self.func ) \n\n # store list of loaded modules \n self.modules = ml1 - ml0 \n\n return self.handler", "title": "" }, { "docid": "c994b9f79503c08b838b4654ff893452", "score": "0.5177921", "text": "def action_handlers(self) -> Dict:\n pass", "title": "" }, { "docid": "e52d9f43e72c65bdd2a5d07da1789651", "score": "0.51656705", "text": "def on_message_received(self):\n return self._handler_manager.on_message_received", "title": "" }, { "docid": "e52d9f43e72c65bdd2a5d07da1789651", "score": "0.51656705", "text": "def on_message_received(self):\n return self._handler_manager.on_message_received", "title": "" }, { "docid": "c661f671801fb47bd65577615afda725", "score": "0.5155615", "text": "def functions(self) -> Dict[str, Function]:\r\n return self._functions", "title": "" }, { "docid": "5ee8b2b3f20fbaf89a72b5d8e50e1a9b", "score": "0.51480544", "text": "def find_handlers(cls, raw_connection: object) -> Generator[Type[\"ConnectionWrapper\"], None, None]:\n\n if cls.handles(raw_connection):\n\n yield cls\n\n for subclass in cls.__subclasses__():\n\n yield from subclass.find_handlers(raw_connection)", "title": "" }, { "docid": "0dde4b781fc21f50d411d143c819cd06", "score": "0.51466256", "text": "def dispatch(self, message, **kwargs):\n return self.get_handler(message, **kwargs)(message, **kwargs)", "title": "" }, { "docid": "2976ac3d421620a241b184da573400af", "score": "0.5139199", "text": "def _getters(cls):\n return {}", "title": "" }, { "docid": "86f923887c8e126bb6e3fde0ce2ca209", "score": "0.5133302", "text": "def get_handler(self, action):\n return getattr(self, action, None)", "title": "" }, { "docid": "a6294f79c3eaeef24ca90aa642b99e46", "score": "0.5110113", "text": "def lookup_method(self, instance):\n cls = instance.__class__\n try:\n # Do we have a method handler defined for this type name\n return self._handlers[cls.__name__]\n except KeyError:\n # No, walk the MRO.\n for klass in cls.mro()[1:]:\n entry = self._handlers.get(klass.__name__)\n if entry:\n # Save it on this type name for faster lookup next time\n self._handlers[cls.__name__] = entry\n return entry\n raise RuntimeError(\"No handler found for class %s\", cls.__name__)", "title": "" }, { "docid": "844ea7f9a3034e0ee1e83eaf2570becf", "score": "0.5093966", "text": "def _message_handler(self, message_code, message_text):\n handlers = {\n 200: self._handle_public_message,\n 201: self._handle_private_message,\n 202: self._handle_client_joined_channel,\n 203: self._handle_client_left_channel,\n 204: self._handle_other_user_nick_change\n }\n return handlers[message_code](message_text)", "title": "" }, { "docid": "9fa84fd02edb134b4498d0939bf26edf", "score": "0.50929695", "text": "def __call__(self, *args, **kwargs):\n for handler in self.handlers:\n #print handler\n handler(*args, **kwargs)", "title": "" }, { "docid": "549e770bd42db3d19d283ff73b464e45", "score": "0.50909895", "text": "def get_handler_by_id(self, handler_id):\n return self._handlers.get(handler_id)", "title": "" }, { "docid": "a2565ffa5a6fe0267779fde6a6f7a81a", "score": "0.5079786", "text": "def receive(self):\n return self.getter()", "title": "" }, { "docid": "b57a49499f38e1a47a677da584d07608", "score": "0.5079187", "text": "def get_getter(self, name):\n if name not in self._getters:\n raise ValueError(\"Name [%s] not found in getters!\" % name)\n return self._getters[name]", "title": "" }, { "docid": "68b415a250df49387306b7116780a05d", "score": "0.50658065", "text": "def view_handlers(self, handlers: List[Callable]):\n for handler in handlers:\n self.view_handler(handler)", "title": "" }, { "docid": "cc3463a21fec5e22fd3785f5661c8e57", "score": "0.50571877", "text": "def register_handlers(dispatcher: Dispatcher):\n dispatcher.add_handler(CommandHandler(\"start\", index))\n dispatcher.add_handler(CommandHandler(\"rates\", currency))\n dispatcher.add_handler(CommandHandler(\"exchange\", exchange))\n dispatcher.add_handler(CallbackQueryHandler(button))", "title": "" }, { "docid": "649e4896e6c1ed2823c9382830928869", "score": "0.5052593", "text": "def get_handler(self, sess):\n assert self.init\n return sess.run(self.data_iter.string_handle())", "title": "" }, { "docid": "643cfc4301b1112115a2021a1407f2df", "score": "0.5051359", "text": "def initialize_handlers(self):\n pass", "title": "" }, { "docid": "4773db36ec368d8e9db4398dd621abed", "score": "0.5048354", "text": "def route_handler(self):\n return self._route_handler", "title": "" }, { "docid": "1bc961aa1b3f79c6f7b8a317a9e1b983", "score": "0.5047103", "text": "def getMethods(self):\n return self.methods", "title": "" }, { "docid": "6f8f37be5c3d948f1b649970f267e839", "score": "0.5045074", "text": "def get_methods(self):\n return [(\"pids\", self.pids),\n (\"shells\", self.shells),\n (\"http\", self.http)]", "title": "" }, { "docid": "988da63371379b0988b9a93ff7129afb", "score": "0.5036895", "text": "def _init_handlers(self):\n for handler, port in zip(self.handlers, self.ports):\n self.add_handler(handler, port)", "title": "" }, { "docid": "c10004b6581dada52db3bf1c9c3b4b82", "score": "0.5036541", "text": "def get_handlers(searched_objects):\n searched_models = set()\n for obj in searched_objects:\n searched_models.add(obj._meta.model)\n searched_models.update(obj._meta.get_parent_list())\n\n for handler in chain(features.get_link_types().values(),\n features.get_embed_types().values()):\n try:\n model = handler.get_model()\n except NotImplementedError:\n continue\n\n if model in searched_models:\n yield model, handler", "title": "" }, { "docid": "d798adf233f214fa976e5a67f306f8ed", "score": "0.5032836", "text": "def get_functions(self):\n return self.functions.keys()", "title": "" }, { "docid": "f50592319b38e3668a754223f246893f", "score": "0.502896", "text": "def get_message_actions(self):\n return self._message_actions", "title": "" }, { "docid": "5d4654258c7daf1b3274e32ce5f133d8", "score": "0.50276375", "text": "def get_receive_method(self):", "title": "" }, { "docid": "066f38591bd0f647719b78082a1332a8", "score": "0.5023688", "text": "def _methods(self):\n return []", "title": "" }, { "docid": "26a5e24e0c220899632eb316bb934dad", "score": "0.5010926", "text": "def list_methods(self, prefix=\"\"):\n handlers = [\n handler_prefix + \".\" for handler_prefix in self.app.api_handlers\n if (\n handler_prefix and\n handler_prefix.startswith(prefix) and\n handler_prefix != prefix\n )\n ]\n dummy_call = Call(prefix + \"._list_methods_dummy_method\")\n handler, _ = self.edge.get_call_handler(dummy_call)\n methods = self._list_methods(handler)\n return handlers + methods", "title": "" }, { "docid": "732bd1aac5c0281b11bee16a4c83d565", "score": "0.5010044", "text": "def handle_msg(self, msg):\n type = msg['type']\n if type == 'get' or type == 'put':\n return self.handle_client(msg)\n elif type == 'heartbeat':\n return self.handle_heartbeat(msg)\n elif type == 'ready' or type == 'not ready':\n return self.handle_response(msg)\n elif type == 'election':\n return self.handle_election(msg)\n else:\n return self", "title": "" }, { "docid": "9c4177db64c9ee991f0b9bf4dfda271d", "score": "0.49952886", "text": "def init_handlers(self, config):\n\n # Order matters. The first handler to match the URL will handle the request.\n handlers = []\n handlers.extend(load_handlers('api.handlers'))\n\n # prepend base_url onto the patterns that we match\n new_handlers = []\n for handler in handlers:\n pattern = url_path_join(config['base_url'], handler[0])\n new_handler = tuple([pattern] + list(handler[1:]))\n new_handlers.append(new_handler)\n # add 404 on the end, which will catch everything that falls through\n new_handlers.append((r'(.*)', NotFoundHandler))\n return new_handlers", "title": "" }, { "docid": "9d2eba6a1b95e18eae0f835e656f7fe8", "score": "0.49911028", "text": "def getMethodDescriptors():", "title": "" }, { "docid": "54696c61a4873ad0d44e99436d5bb34b", "score": "0.49812594", "text": "def _lookup(self, tblname):\n if tblname in self._lookup_cache:\n return self._lookup_cache[tblname]\n\n handlers = []\n for (a_tblname, a_handler) in self.policy:\n if a_tblname == tblname or a_tblname == \"*\": # XXX wildcard matching XXX\n handlers.append(a_handler)\n self._lookup_cache[tblname] = handlers\n return handlers", "title": "" }, { "docid": "dfefe6be142a63a6b8548131cadffa0e", "score": "0.49773273", "text": "def dispatch(self, handlers):\n\n # Quick exit for the case where all handlers are same\n if len(handlers) == 1:\n h, = handlers\n if not isinstance(h, type):\n raise RuntimeError(\"Handler {!r} is not a type.\".format(h))\n return h\n\n # Recursively select with registered binary priority\n for i, typ in enumerate(handlers):\n\n if not isinstance(typ, type):\n raise RuntimeError(\"Handler {!r} is not a type.\".format(typ))\n\n if i == 0:\n handler = typ\n else:\n prev_handler = handler\n handler = self._dispatcher.dispatch(prev_handler, typ)\n\n if not isinstance(handler, type):\n raise RuntimeError(\n \"Dispatcher for {!r} and {!r} must return a type, but got {!r}\".format(\n prev_handler, typ, handler\n ))\n\n # return handler class\n return handler", "title": "" }, { "docid": "6a582c4c3617021cbd9d21036f07e71d", "score": "0.49772474", "text": "def _create_handlers(self):\n\n # Set up the WebServer to serve the domain models in context\n server = WebServer(\n base_url = join(os.getcwd(), self.template.base_url),\n html = self.template.html,\n context = self.context,\n trait_change_dispatch = self.trait_change_dispatch\n )\n\n return server.handlers", "title": "" }, { "docid": "494ec7238781d09fb054437a3c9abfb2", "score": "0.4976221", "text": "def _get_content_handler(self):\n pass", "title": "" }, { "docid": "2ef0460b5a448ca5dcf49e6f473e1aff", "score": "0.49716958", "text": "def init_handlers(self):\n super().init_handlers()\n self.handlers = self.new_prefixed + self.handlers\n self.log.debug(self.handlers)", "title": "" }, { "docid": "e56b9481dbde34e9bc58a0ee88fa3788", "score": "0.49691197", "text": "def handlers(self, handlers):\n self._handlers = handlers", "title": "" }, { "docid": "5613f9042a535762c50b261447595d29", "score": "0.49687412", "text": "def get(self):\n return get_all_funcionarios()", "title": "" }, { "docid": "251d0ae2d2343b1d6370948bc8721e1c", "score": "0.49469328", "text": "def val_handlers(self):\n pass", "title": "" }, { "docid": "e4bd63ebcdabda3128b966b93c7c0d7c", "score": "0.49412367", "text": "def generate(cls, handlers):\n\n if handlers:\n return handlers.pop(0)(cls.generate(handlers))", "title": "" }, { "docid": "262f7bfbd6087ab823247c0a6550e6af", "score": "0.49411702", "text": "def _get_log_handlers(log_file_directory_path):\n # Create log directory path, if it doesn't exist.\n if not os.path.exists(log_file_directory_path):\n os.makedirs(log_file_directory_path)\n log_file_path = os.path.join(log_file_directory_path, 'py-subs.log')\n # Create the handlers chain.\n return [logbook.NullHandler(),\n logbook.FileHandler(log_file_path, level=logbook.DEBUG, bubble=True),\n logbook.StreamHandler(sys.stdout, level=logbook.INFO, bubble=True)]", "title": "" } ]
3830d0d4336cd35387978d6134d9bd1b
Returns a given response header.
[ { "docid": "0f539e759758ad2c6dbf4862eea2a942", "score": "0.7335741", "text": "def getheader(self, name, default=None):\n return self.urllib3_response.getheader(name, default)", "title": "" } ]
[ { "docid": "4962b3e57245ea8b0c37467c6e765eb8", "score": "0.7827818", "text": "def header(self, name):\n key = name.upper()\n if key not in _RESPONSE_HEADER_DICT:\n key = name\n return self._headers.get(key)", "title": "" }, { "docid": "5d50e8a060a518468b44d559aa972cc0", "score": "0.7489284", "text": "def get_header(self, name):\n return self.headers.get(name)", "title": "" }, { "docid": "cfae5f4a9233166b29b542a80d2a84eb", "score": "0.7421516", "text": "def getHeader():\n return _HEADER", "title": "" }, { "docid": "590acfaf8a6542a81690686f022b6a87", "score": "0.7289839", "text": "def getHeader(self, name):\n return self.headers.get(name.lower(), None)", "title": "" }, { "docid": "1aa097451aba39f0fc8864659c55ff11", "score": "0.7258767", "text": "def header(self, header, default=None):\r\n return self._get_headers().get(header.upper(), default)", "title": "" }, { "docid": "d003af9a1966d6ab0167a9dfe123dded", "score": "0.71716225", "text": "def _get_header(self, header):\n if header is None:\n html = self.header()\n else:\n html = header\n return html", "title": "" }, { "docid": "06d9a1b5ff8ce19403cfac9eeac9ef22", "score": "0.6996634", "text": "def get_response_status_header(response: requests.Response) -> str:\n if hasattr(response, 'headers'):\n return response.headers.get(RESPONSE_STATUS_HEADER, '')\n return ''", "title": "" }, { "docid": "224b8f4cd8a95ed2a7d3a0369cdd2aeb", "score": "0.69709885", "text": "def getHeader(self, key):\n if key not in self.headers:\n raise Exception(\"No such key in the header\")\n else:\n return self.headers[key]", "title": "" }, { "docid": "b619e40f4beb0be48ba71b8308445d7c", "score": "0.69324154", "text": "def get_header(self, name, default=None):\n return self.headers.get(name, default)", "title": "" }, { "docid": "9ca01aae059a7ce5603953863219d433", "score": "0.6885053", "text": "def get_http_header(url, header_name):\n\n try:\n response = urllib2.urlopen(url)\n \n if response.info().get(header_name):\n return None\n \n except ConnectionError:\n print('Connection Error')\n except UnknownError: \n print('Unknown Error')", "title": "" }, { "docid": "500e66ddbd9e4495ad44ad9a8456a0bb", "score": "0.6843578", "text": "def get_header(self):\n return self._header", "title": "" }, { "docid": "b54b4aca7b5aac73a6f6cf4f91774c0d", "score": "0.68412644", "text": "def get_header(self, key, default = None):\n key = key.lower()\n for (_key, value) in self.headers:\n if key == _key.lower():\n return value\n return default", "title": "" }, { "docid": "180628b9bad73fef8e7943199ff3c42f", "score": "0.68161386", "text": "def header(self, key, default=None):\n return self._get_headers().get(key.upper(), default)", "title": "" }, { "docid": "46e92a4fa5c8a35af0101d03af608c80", "score": "0.6757666", "text": "def getheader(self, name, default=None):\n if not self.__headers.has_hey(name):\n return default\n else: self.__headers[name]", "title": "" }, { "docid": "600e0cf320b4bfb8f7ea04e11ccebbb6", "score": "0.6724961", "text": "def _get_request_header(request, header_name, default=''):\r\n if request is not None and hasattr(request, 'META') and header_name in request.META:\r\n return request.META[header_name]\r\n else:\r\n return default", "title": "" }, { "docid": "7affb04e07e7517a940a8df9556c8eac", "score": "0.67063326", "text": "def get_header(self, key, default=None):\n\n return self._request.headers[\n key] if key in self._request.headers else default", "title": "" }, { "docid": "2c4780bf3e967f6bcb9528d46a09e888", "score": "0.64925784", "text": "def _GetHeaderNameValue(header):\n i = header.find(':')\n if i > 0:\n return (header[:i].lower(), header[i+1:].strip())\n return None", "title": "" }, { "docid": "6c73de1e7d00b34e9105085b525701d3", "score": "0.645618", "text": "def get_header(header, pkt):\n try:\n str_pkt = str(pkt)\n\n init_header = str_pkt.index( header )\n after_header = str_pkt[ ( init_header + len(header) ) : ]\n end_header = after_header.index(const.END_LINE)\n\n val = after_header[ : end_header ]\n\n except ValueError:\n val = '-1'\n\n return val", "title": "" }, { "docid": "aaa5c464fca087d114a5c8e8a6742b5e", "score": "0.64561504", "text": "def getHeader(self):\n return self.data.header", "title": "" }, { "docid": "46d5ae80b0e771a178d1021ee66f710c", "score": "0.64383036", "text": "def get_header_value(field_name, header):\n\n # print 'field_name [%s] header [%s]' % (field_name, header)\n\n # Make sure we are only looking at the header,\n # even if the caller passes us the entire message.\n #\n pieces = header.split('\\r\\n\\r\\n', 1)\n header = pieces[0]\n\n match = re.search('^%s\\s*:\\s*([^\\s]+)\\s*$' % field_name, header,\n re.MULTILINE | re.IGNORECASE)\n\n if match:\n return match.group(1).strip()\n else:\n return '-1'", "title": "" }, { "docid": "c378fc803fff9483338152b14c3b8a95", "score": "0.64254653", "text": "def http_get_location_header(response_obj):\n return response_obj.response_object.headers['Location']", "title": "" }, { "docid": "7da6fb621f9f5d4e83a51c5d8e1922e4", "score": "0.64099556", "text": "def get_header_start(response_status):\n return '{protocol}{space}{status[0]}{space}{status[1]}'.format(\n protocol=http_protocol_version,\n space=l_s,\n status=response_status\n )", "title": "" }, { "docid": "b6a6949c625c5d534c6a14b4ca2b241f", "score": "0.6399452", "text": "def peek_header(self):\n header = None\n if self._headers:\n # returns the last element on the list\n header = self._headers[-1:]\n\n return header", "title": "" }, { "docid": "10ef2dbf5eab5f3287bd50ff86e3d4d6", "score": "0.6379365", "text": "def get_token_auth_header():\n auth = request.headers.get(\"Authorization\", None)\n if not auth:\n return \"authorization_header_missing\"\n\n parts = auth.split()\n\n if parts[0].lower() != \"bearer\":\n return \"invalid_header\"\n elif len(parts) == 1:\n return \"invalid_header\"\n elif len(parts) > 2:\n return \"invalid_header\"\n\n token = parts[1]\n return token", "title": "" }, { "docid": "13868026d1af69b2669246afbce2b71d", "score": "0.6373841", "text": "def lookup_fits_header(bucket_path):\n header = None\n request_params = dict(bucket_path=bucket_path, bucket_name=INCOMING_BUCKET)\n res = requests.post(FITS_HEADER_URL, json=request_params)\n if res.ok:\n header = res.json()['header']\n\n return header", "title": "" }, { "docid": "0bbb39610c77395d2283ffcf737bf8dd", "score": "0.63601136", "text": "def get_headers(input_header):\n if input_header:\n header = input_header\n else:\n header = create_marconi_headers()\n\n return header", "title": "" }, { "docid": "6432957e2b7119666f44ecc6882f8bd8", "score": "0.6321719", "text": "def parse_header(self, header, value, phase):\n if self.mode == 'display':\n return value\n if phase == 'request':\n if header.lower() == 'accept':\n return value if value != '*/*' else None\n elif header.lower() == 'x-csrftoken':\n if self.csrftoken:\n value = value.replace(self.csrftoken, self.doc_csrf)\n return value\n elif header.lower() == 'cookie':\n if self.csrftoken:\n value = value.replace(self.csrftoken, self.doc_csrf)\n if self.sessionid:\n value = value.replace(self.sessionid, self.doc_session)\n return '; '.join(sorted(value.split('; ')))\n elif header.lower() == 'authorization':\n if self.token:\n value = value.replace(self.token, self.doc_token)\n return value\n elif header.lower() in ('content-type', 'content-length'):\n return value\n elif header.lower() not in (\n 'accept-encoding', 'connection', 'user-agent', 'referer'):\n print('Unexpected request header %s: %s' % (header, value))\n return value\n else:\n if header.lower() in ('content-type', 'www-authenticate'):\n return value\n elif header.lower() == 'location':\n return value.replace(api, self.doc_base_url)\n elif header.lower() not in (\n 'allow', 'date', 'server', 'vary', 'x-frame-options',\n 'content-length'):\n print('Unexpected response header %s: %s' % (header, value))\n return value", "title": "" }, { "docid": "3a860609271b28ed6e9daa35ee01388a", "score": "0.6270088", "text": "def header(self):\n return self[0]", "title": "" }, { "docid": "3cee7d42767d07675c85a775de5df2aa", "score": "0.62125224", "text": "def header(self):\n return self._header", "title": "" }, { "docid": "3cee7d42767d07675c85a775de5df2aa", "score": "0.62125224", "text": "def header(self):\n return self._header", "title": "" }, { "docid": "3cee7d42767d07675c85a775de5df2aa", "score": "0.62125224", "text": "def header(self):\n return self._header", "title": "" }, { "docid": "890d8d0745511978b8c1ef7ca8a36eb0", "score": "0.6212034", "text": "def parse_header(self, header):\n\n m = re.search(HEADER_REGEX, header)\n if m:\n type = m.group(1)\n version = m.group(2)\n return type, version\n else:\n return None", "title": "" }, { "docid": "cf0b03843d616273daf045ac82db63a8", "score": "0.6210158", "text": "def getHeaderVal2(self, key):\n lowerKey = key.lower()\n if key in self.header.header.keys():\n return self.header.header[key]\n elif lowerKey in self.header.header.keys():\n return self.header.header[lowerKey]\n else:\n print('error: bStack.getHeaderVal() did not find key \"' + key + '\" in self.header.header. Available keys are:', self.header.header.keys())\n return None", "title": "" }, { "docid": "a5e575a3b3767011741fc89cd8a2bc95", "score": "0.62046266", "text": "def _read_response_header(self):\r\n length = None\r\n encoding = \"identity\"\r\n chunked = False\r\n\r\n hdr = []\r\n while True:\r\n line = self._read_line()\r\n if not line:\r\n break\r\n hdr.append(line)\r\n\r\n for line in hdr:\r\n if \"Content-Length\" in line:\r\n length = int(line[15:])\r\n if \"Content-Encoding\" in line:\r\n encoding = line[17:].strip()\r\n if \"Transfer-Encoding: chunked\" in line:\r\n chunked = True\r\n\r\n return (length, encoding, chunked)", "title": "" }, { "docid": "982cff9e39ff46b3f3b50b91c78a8227", "score": "0.617104", "text": "def get_response_headers(self, *args, **kwargs):\n if self.response_headers:\n return self._unpack_headers(self.response_headers)", "title": "" }, { "docid": "f020bafd65e84254ad6d8ed3c0e6fa11", "score": "0.6168488", "text": "def get_header(self, name, default=None, required=False):\n\n # Use try..except to optimize for the header existing in most cases\n try:\n # Don't take the time to cache beforehand, using HTTP naming.\n # This will be faster, assuming that most headers are looked\n # up only once, and not all headers will be requested.\n return self._headers[name.upper().replace('-', '_')]\n except KeyError:\n if not required:\n return default\n\n raise HTTPBadRequest('Missing header',\n 'The \"' + name + '\" header is required.')", "title": "" }, { "docid": "9ee7339d53436cdf984d57328579b4e0", "score": "0.61639225", "text": "def __getitem__(self, name):\n return self.headers[name]", "title": "" }, { "docid": "003ffd949188e31333a7c4a5683e8f76", "score": "0.61355036", "text": "def get_header(file):\n with open(file, 'r') as f:\n return f.readline()", "title": "" }, { "docid": "c602b4575ee031c25b84c2f5efc30ef9", "score": "0.61110467", "text": "def get_token_auth_header():\n auth = request.headers.get('Authorization', None)\n if not auth:\n raise AuthError({\n 'code': 'authorization_header_missing',\n 'description': 'Authorization header is expected.'\n }, 401)\n elif auth.split()[0].lower() != 'bearer':\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must start with \"Bearer\".'\n }, 401)\n elif len(auth.split()) == 1:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must be include type and token.'\n }, 401)\n elif len(auth.split()) > 2:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must be Bearer token.'\n }, 401)\n else:\n token = auth.split()[1]\n return token", "title": "" }, { "docid": "cc85428523410c1c906ec15ffb81a3ea", "score": "0.61088824", "text": "def header(self):\r\n return self.__header", "title": "" }, { "docid": "5605b5955a99ff4a39da7d55dbe24306", "score": "0.60934275", "text": "def parse_authorization_header(header):\n \n re_header = re.compile('UsernameToken\\s+Username=\"(.*)\",\\s+PasswordDigest=\"(.*)\",\\s+Created=\"(.*)\",\\s+Nonce=\"(.*)\"')\n \n match = re_header.match(header)\n if match:\n return match.groups()\n return None", "title": "" }, { "docid": "5d1c2e52935bd71b7c693a781a6a2b3c", "score": "0.60634726", "text": "def get_token_auth_header():\n auth = request.headers.get(\"Authorization\", None)\n print(auth)\n\n if not auth:\n raise AuthError({\"code\": \"authorization_header_missing\",\n \"description\":\n \"Authorization header is expected\"}, 401)\n \n parts = auth.split()\n \n if parts[0].lower() != \"bearer\":\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Authorization header must start with\"\n \" Bearer\"}, 401)\n elif len(parts) == 1:\n raise AuthError({\"code\": \"invalid_header\",\n \"description\": \"Token not found\"}, 401)\n elif len(parts) > 2:\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Authorization header must be\"\n \" Bearer token\"}, 401)\n\n token = parts[1]\n return token", "title": "" }, { "docid": "a5b4df87778cbea28fe145c633f71435", "score": "0.6057926", "text": "def get_header(filename):\n if not os.path.isfile(filename):\n sys.exit('ERROR: input {} does not exist'.format(filename))\n try:\n hdr = dcm.read_file(filename)\n return hdr\n except:\n sys.exit('ERROR: failed to parse {}'.format(filename))", "title": "" }, { "docid": "61811b59131fa6ae206a5a91df3057d8", "score": "0.6052161", "text": "def mail_header(self):\n return self._hdr", "title": "" }, { "docid": "658464ce4c162369bde664dc460650d9", "score": "0.6050239", "text": "def header(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"header\")", "title": "" }, { "docid": "31b9d24b66994a399ee6e246f84d339b", "score": "0.60405517", "text": "def getHeader(key):", "title": "" }, { "docid": "1837fb03b7e058700875117d9db9a421", "score": "0.60300654", "text": "def get_header(self, target: str):\n\t\ttry:\n\t\t\tif not self.check_magic(target):\n\t\t\t\tprint(\"File magic invalid.\")\n\t\t\t\treturn False\n\t\t\twith open(target, \"rb+\") as archive:\n\t\t\t\theader = archive.read(HEADER_LENGTH)\n\t\t\t\treturn self.unpack_header(header)\n\t\texcept Exception as e:\n\t\t\tprint(f\"Failed to get header for {target} - {e}\")\n\t\t\treturn None", "title": "" }, { "docid": "70ff39b96a889d75527409b31e96a62d", "score": "0.6028418", "text": "def __getitem__(self, key):\n\n return self._headers[key.lower()]", "title": "" }, { "docid": "da394d7140a4bd519f0174e39e00ba65", "score": "0.6013571", "text": "def get_auth_header(self):\n if not self.verify():\n return None\n\n auth_val = self.encode_auth_header_val()\n if not auth_val:\n return None\n\n return {'Authorization': auth_val.replace('\\n', '')}", "title": "" }, { "docid": "ff9c4e8795d38ebf3dc3944bd79a52c9", "score": "0.6006714", "text": "def get_token_auth_header():\n auth = request.headers.get('Authorization', None)\n if not auth:\n raise AuthError({\n 'code': 'authorization_header_missing',\n 'description': 'Authorization header is expected.'\n }, 401)\n\n parts = auth.split()\n if parts[0].lower() != 'bearer':\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must start with \"Bearer\".'\n }, 401)\n\n elif len(parts) == 1:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Token not found.'\n }, 401)\n\n elif len(parts) > 2:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must be bearer token.'\n }, 401)\n\n token = parts[1]\n return token", "title": "" }, { "docid": "a2d68db7aab98e4e0f5bb7b021759805", "score": "0.5986861", "text": "def get_token_auth_header():\n auth = request.headers.get(\"Authorization\", None)\n if not auth:\n raise AuthError({\"code\": \"authorization_header_missing\",\n \"description\":\n \"Authorization header is expected\"}, 401)\n\n parts = auth.split()\n\n if parts[0].lower() != \"bearer\":\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Authorization header must start with\"\n \" Bearer\"}, 401)\n elif len(parts) == 1:\n raise AuthError({\"code\": \"invalid_header\",\n \"description\": \"Token not found\"}, 401)\n elif len(parts) > 2:\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Authorization header must be\"\n \" Bearer token\"}, 401)\n\n token = parts[1]\n return token", "title": "" }, { "docid": "81d5621fef105f9f6013d84bfb8dcfd6", "score": "0.5933432", "text": "def get_token_auth_header():\n auth = request.headers.get('Authorization', None)\n if not auth:\n raise AuthError({'code': 'authorization_header_missing',\n 'description': 'Authorization header is expected'}, 401)\n\n parts = auth.split()\n\n if parts[0].lower() != 'bearer':\n raise AuthError({'code': 'invalid_header',\n 'description': 'Authorization header must start with Bearer'}, 401)\n\n if len(parts) < 2:\n raise AuthError({'code': 'invalid_header',\n 'description': 'Token not found after Bearer'}, 401)\n\n if len(parts) > 2:\n raise AuthError({'code': 'invalid_header',\n 'description': 'Authorization header is an invalid token structure'}, 401)\n\n return parts[1]", "title": "" }, { "docid": "e07cd2bdeb31e5ef0db9fa3d33a9f193", "score": "0.5927519", "text": "def get_token_auth_header():\n # Get authorization form request header\n auth = request.headers.get('Authorization', None)\n # Check if authorization header exists\n if not auth:\n raise AuthError({\n 'code': 'authorization_header_missing',\n 'description': 'Authorization header is MISSING!'\n }, abort(401))\n # If bearer token, then first part of string = 'bearer'\n parts = auth.split()\n if parts[0].lower() != 'bearer':\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must start with \"Bearer\"'\n }, abort(401))\n # Authorization header string length must be 2\n elif len(parts) != 2:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must be a BEARER token'\n }, abort(401))\n\n token = parts[1]\n return token", "title": "" }, { "docid": "4caa536c3f6a8a2996b61ae60f88e3a6", "score": "0.59211224", "text": "def parse_header(self):", "title": "" }, { "docid": "0b09ec53a066209585876c066192d8cf", "score": "0.5880845", "text": "def _extract_code(header):\n code = \"\"\n matcher = re.compile(\">([^\\s]*).*\")\n m = matcher.match(header)\n try:\n code = m.group(1)\n except AttributeError as e:\n raise ValueError(f\"No code in header: {header}\") from e\n return (code.lower())", "title": "" }, { "docid": "2b899ded9206d27b1e537856fab134f8", "score": "0.5859933", "text": "def get_header(filepath):\n header = None\n for i, x in enumerate(open(filepath)):\n if i == 0:\n header = x\n return(header)", "title": "" }, { "docid": "9ec1d1643da363a1f9774cc0e7a950da", "score": "0.58497316", "text": "def get_header( self ):\n\t\tkey = self.key\n\t\tvalue = self.value\n\t\tpath = self.path\n\t\texpires = self.expires.strftime( \"%a, %d-%m-%y %H:%M:%S GMT\" )\n\t\treturn ( \"Set-Cookie\", \"%(key)s=%(value)s; Path=%(path)s; Expires=%(expires)s;\" % locals() )", "title": "" }, { "docid": "307990c6c5e052805cdf56318402a702", "score": "0.58480704", "text": "def get_request_uri(header):\n\n match = re.match('^GET /([^\\s]*) HTTP/1.[10]\\s*$', header, re.MULTILINE)\n if match:\n return match.group(1)\n else:\n return None", "title": "" }, { "docid": "5ebd85ba68c4ad6ed7f45370e6279861", "score": "0.5831813", "text": "def get(self, name, failobj=None):\n name = name.lower()\n for k, v in self._headers:\n if k.lower() == name:\n return v\n return failobj", "title": "" }, { "docid": "06b5ccf2a18c004731b5e7e3ac784795", "score": "0.5820113", "text": "def hand_header(request):\n hh = PokerStarsHandHistory(request.instance.hand_text)\n hh.parse_header()\n return hh", "title": "" }, { "docid": "a67f55a157e59fcad78dfc553673facf", "score": "0.5808738", "text": "def process_upstream_headers(\n self, *, scope: Scope, proxy_response: aiohttp.ClientResponse\n ) -> Headerlike:\n return proxy_response.headers # type: ignore", "title": "" }, { "docid": "6559599a0015d0faf218e1eaf8f2832b", "score": "0.58025235", "text": "def get_headers(req):\n user = req.headers.get('X-User-ID', None)\n tenant = req.headers.get('X-Tenant-ID', None)\n return user, tenant", "title": "" }, { "docid": "0be8c649f1874803995d481b5ecc20e9", "score": "0.579759", "text": "def parse_header(authorization_header, exception_class=HTTPUnauthorized):\n if authorization_header:\n parts = authorization_header.split()\n\n if parts[0].lower() != 'bearer' or len(parts) == 1 or len(\n parts) > 2:\n raise exception_class(\"invalid_header\")\n\n jwt_token = parts[1]\n try:\n # Decode token\n payload = decode_token(jwt_token)\n return jwt_token, payload\n except jwt.ExpiredSignature: # pragma: no cover\n raise exception_class(\"token is expired\")\n\n except jwt.InvalidAudienceError: # pragma: no cover\n raise exception_class(\"incorrect audience\")\n\n except jwt.DecodeError: # pragma: no cover\n raise exception_class(\"token signature is invalid\")\n\n except jwt.InvalidIssuerError: # pragma: no cover\n raise exception_class(\"token issuer is invalid\")\n\n except Exception as exc: # pragma: no cover\n raise exception_class(exc)\n else: # pragma: no cover\n raise exception_class(\"Authorization header not present\")", "title": "" }, { "docid": "c28d9bc112c3f7ffab3930996a2ba45f", "score": "0.5792205", "text": "def get_reply_header_text(self):\r\n\r\n header_tuples = self.get_reply_headers()\r\n\r\n headers = [self.response(self.reply_code)]\r\n headers += [\"%s: %s\" % h for h in header_tuples]\r\n return '\\r\\n'.join(headers) + '\\r\\n\\r\\n'", "title": "" }, { "docid": "ec36ee81fb9370a1e7b70b40ed6b6112", "score": "0.5777906", "text": "def headers(self):\n return(self.__response.headers)", "title": "" }, { "docid": "a3424d9e1c2a0ec669aecb278e563fd5", "score": "0.57767", "text": "def get_header_value(self,name,headers):\n\t\tfor header in headers:\n\t\t\tif header['name'] == name:\n\t\t\t\treturn header['value']", "title": "" }, { "docid": "5ed1bb18f817538f46a5d999575a8b58", "score": "0.5776579", "text": "def getheaders(self):\n return self.urllib3_response.getheaders()", "title": "" }, { "docid": "5ed1bb18f817538f46a5d999575a8b58", "score": "0.5776579", "text": "def getheaders(self):\n return self.urllib3_response.getheaders()", "title": "" }, { "docid": "5ed1bb18f817538f46a5d999575a8b58", "score": "0.5776579", "text": "def getheaders(self):\n return self.urllib3_response.getheaders()", "title": "" }, { "docid": "6c174d82b978d998a7dad8255dac24f0", "score": "0.57720387", "text": "def extract_header(message_dict):\n header = message_dict[\"structured_text\"][\"header\"]\n\n return header", "title": "" }, { "docid": "e90314ca4324ee571e5bc14d1432bc07", "score": "0.57712615", "text": "def filter_headers(self, header):\n if header == \"Ticker symbol\":\n return \"symbol\"\n elif header == \"GICS Sector\":\n return \"sector\"\n elif header == \"Security\":\n return \"name\"\n elif header == \"GICS Sub Industry\":\n return \"industry\"\n else:\n return header", "title": "" }, { "docid": "1c1af4f3473972a897debb07b1ee2d96", "score": "0.5769977", "text": "def getLastHeader(self, headerName: str, default = None):\n return self._headers.get(headerName, default)", "title": "" }, { "docid": "e97acfaf3fe1e0366a4eea6e80337bc2", "score": "0.5769082", "text": "def get_request_header(conn_socket: socket.socket) -> str:\n raw_double_new_line = \"\\r\\n\\r\\n\".encode(HttpServer.FORMAT)\n raw_request_header = conn_socket.recv(HttpServer.HEADER)\n\n while raw_double_new_line not in raw_request_header:\n raw_request_header += conn_socket.recv(HttpServer.HEADER)\n\n return raw_request_header.decode(HttpServer.FORMAT)", "title": "" }, { "docid": "8fc2d1267c366eb75efeba078f4797fa", "score": "0.5768455", "text": "def get_data_for_header(req):\n try:\n user_id = req.user\n except KeyError as e:\n msg = req.get_error_msg(e)\n return send_error_response(msg)\n try:\n header_data = dict()\n lang = rt.get_state(user_id).language\n #TODO change on database access\n header_data['languages'] = common_getter.get_languages_list(pt, lang)\n header_data['user'] = common_getter.get_user_info(pt, user_id, lang)\n header_data['client'] = common_getter.get_client_info(pt, user_id, lang)\n return send_success_response(header_data)\n except Exception as e:\n msg = req.get_error_msg(e, lang=lang)\n return send_error_response(msg)", "title": "" }, { "docid": "295f8dea91fe89ae230cc7452c0075f8", "score": "0.57583654", "text": "def get_heading(self):\n return self.heading[0]", "title": "" }, { "docid": "6b4e7ec8099858ef523794e57e3f7439", "score": "0.5758109", "text": "def get_api_header(token):\n return {\n 'Authorization': 'Token ' + str(token)}", "title": "" }, { "docid": "93b3e24b8ac3fa7eb093d350993a6d56", "score": "0.5749581", "text": "def header(self, **args):\n return self.pageConfig['header'] % self.pageConfig", "title": "" }, { "docid": "d692abfd905231bee0321b5619560344", "score": "0.57381684", "text": "def get_jwt_header(self):\n if self.jwt_header:\n return self.jwt_header\n self.jwt_header = self.get_jwt_token_from_secret_file(str(self.jwtfile))\n return self.jwt_header", "title": "" }, { "docid": "027efa6e2c186fb4300a3fc61d78fef2", "score": "0.5732351", "text": "def _extract_etag(response):\n if response and response.headers:\n return response.headers.get(\"etag\")\n\n return None", "title": "" }, { "docid": "39fe291b19b46b52190341c1ed142cea", "score": "0.57322663", "text": "def extract_bearer_token(request):\n return request.headers['Authorization'].split(\" \")[-1].strip()", "title": "" }, { "docid": "94965ae1dc02355e1403e90f2205794e", "score": "0.5727842", "text": "def get_authorization_header(client, user):\n # obtain authorization token\n response = client.post(\n reverse('token-obtain'),\n data={'username': user.username, 'password': user.raw_password},\n content_type='application/json'\n )\n token = response.json()['access']\n return {'HTTP_AUTHORIZATION': f'Bearer {token}'}", "title": "" }, { "docid": "0c13cdd82f417ecff3d8c623e025de24", "score": "0.57213336", "text": "def auth_header(self):\n return self._auth_header", "title": "" }, { "docid": "1b483ebb8f729a50363a86999f7282f8", "score": "0.5704076", "text": "def get_headers(self, ):\n return self.attrs.get(self.AttributeNames.HEADERS, None)", "title": "" }, { "docid": "4b3c34d01849f9d7d3bf4c3d5e1b7569", "score": "0.5691332", "text": "def test_response_header(BASE_URL, COUNTRY_CODE):\n # make request\n result = requests.get(f'{BASE_URL}{COUNTRY_CODE}')\n assert result.headers['Content-Type'] == 'application/json'", "title": "" }, { "docid": "6055b298e3d0a0de4a700e1d6cf3e1ba", "score": "0.5682932", "text": "def _retrieve_token(request):\n auth_string = request.headers.get('Authorization')\n try:\n match = re.match(\"Bearer (.+)\", auth_string)\n except TypeError:\n match = None\n if match:\n return match.groups()[0]", "title": "" }, { "docid": "3942f16900693f67351396fc95fda8b2", "score": "0.5682016", "text": "def view_headers():\n\n return jsonify(get_dict('headers'))", "title": "" }, { "docid": "306d98d59aca5615a2c21c1c01472939", "score": "0.5673929", "text": "def _get_header_index(self, columnname):\n\n return self.headers.index(columnname)", "title": "" }, { "docid": "5355dbd4aeeb7cc84e70e1b63d2cf49b", "score": "0.56722826", "text": "def get_authenticate_header(self):\n pass", "title": "" }, { "docid": "f63dbd4898f6a81db6f365d10b2b68e2", "score": "0.5657514", "text": "def get(self, header_dict=dict()):\n header_dict = header_dict.copy()\n header_dict['User-Agent'] = random.sample(self.user_agents, 1)[0]\n return header_dict", "title": "" }, { "docid": "5a879f52208bac615546b161b7c8c7c2", "score": "0.56512576", "text": "def get_headers(self):\n \n return self.headers", "title": "" }, { "docid": "d2a44e58007b9ba475b91cf9d33e9029", "score": "0.56388617", "text": "def extract_from_header(header, left, right):\n return re.search(r'{}(.*?){}'.format(left, right), header).group(1)", "title": "" }, { "docid": "4bb7df2f9fcd5319c78709a5c8e434ae", "score": "0.56280214", "text": "async def jwt_header(\n Authorization: Optional[str] = Header(None),\n) -> Optional[RawAuth]:\n if not Authorization:\n return None\n\n parts = Authorization.split()\n if parts[0].lower() != \"bearer\":\n log.debug(\"Authorization header Failed, lacked bearer\")\n return None\n if len(parts) != 2:\n log.debug(\"Authorization header Failed, not 2 parts\")\n return None\n else:\n log.debug(\"Got header:Authorization with a JWT\")\n log.debug(\"jwt_header(): %s\", Authorization)\n return RawAuth(\n rawjwt=parts[1],\n rawheader=Authorization,\n via=\"header\",\n key=\"Authorization\",\n )", "title": "" }, { "docid": "fc0b06215930a9642612313ba6a39f47", "score": "0.56257826", "text": "def header_statement(stmt, accel):\n if not isinstance(stmt, CommentNode): None\n if not stmt.value.startswith('#$'): None\n\n header = stmt.value[2:].lstrip()\n if not directive.startswith('header'): None\n\n return stmt.value", "title": "" }, { "docid": "b368249141b22b8671abbde54ba959c3", "score": "0.56085163", "text": "def getheader(header_text, default=\"ascii\"):\n # Borrowed from: http://ginstrom.com/scribbles/2007/11/19/parsing-multilingual-email-with-python/\n\n headers = email.Header.decode_header(header_text)\n header_sections = [unicode(text, charset or default)\n for text, charset in headers]\n return u\" \".join(header_sections)", "title": "" }, { "docid": "a9a6a29fe013bae30ac8120070464667", "score": "0.5600225", "text": "def read_header(file_path):\n with open(file_path, 'r') as f:\n header = f.readline()\n return header.strip()", "title": "" }, { "docid": "7286881a98f31f59abe8df9911ce49d4", "score": "0.55964553", "text": "def get_file_name_from_resposne(r):\n if not r: \n return None\n return get_file_name_from_cd(r.headers.get())", "title": "" }, { "docid": "e4bc366615fc5574c588f95913a9d0e9", "score": "0.55924976", "text": "def decode_header(byte_iter):\n try:\n return MMSDecoder.decode_mms_header(byte_iter)\n except wsp_pdu.DecodeError:\n return wsp_pdu.Decoder.decode_header(byte_iter)", "title": "" }, { "docid": "7abb0a71eb3739fdae7a2c7f166b839f", "score": "0.5587723", "text": "def authentication_header():\n with open(KEY_FILE, \"r\") as file:\n header = json.load(file)\n return header", "title": "" } ]
dbaad6bac4211be8b5d715909a0ae04e
Collects and formats population data for France by commune (source INSEE). Reads the information location and outputs in the `source_config.py` file.
[ { "docid": "47ba6c20e1c1ca5e6cd3d37a1c03495d", "score": "0.660819", "text": "def make_population_commune():\n print('>> Handling \"INSEE / population / commune\" data for year :')\n latest_year = str(max(\n [int(item) for item in source_config.population_data_raw_file[\n 'commune'].keys()]))\n raw_pop_file = source_config.population_data_raw_file['commune'][latest_year]\n pop_processed_annee_file = source_config.population_processed_file[\n 'commune'][latest_year]['annee']\n pop_processed_dept_file = source_config.population_processed_file[\n 'commune'][latest_year]['region']\n pop_data = pd.read_excel(raw_pop_file, skiprows=5)\n pop_output = [\n ['code_geo', 'region', 'departement', 'libelle_geo',\n 'type_geo', 'annee', 'population']]\n stub_cols = ['CODGEO', 'REG', 'DEP', 'LIBGEO']\n for col in [col for col in pop_data.columns if col not in stub_cols]:\n if col.startswith('PMUN'):\n year = 2000 + int(col[-2:])\n elif col.startswith('PSDC'):\n year = 1900 + int(col[-2:])\n elif col.startswith('PTOT'):\n if len(col) == 6:\n year = 1900 + int(col[-2:])\n else:\n year = int(col[-4:])\n print('\\t- {}'.format(year))\n selected_df = pop_data[stub_cols + [col]]\n for index, row in selected_df.iterrows():\n pop_output.append(\n [row['CODGEO'], row['REG'], row['DEP'], row['LIBGEO'],\n 'commune', year, row[col]])\n pop_formatted_data = pd.DataFrame(pop_output[1:], columns=pop_output[0])\n pop_formatted_data['annee'] = pop_formatted_data.annee.astype(str)\n print('>> Writing yearly data...')\n yearly_dfs = {}\n for annee in pop_formatted_data['annee'].unique():\n yearly_dfs[annee] = pop_formatted_data[\n pop_formatted_data.annee == annee]\n write_excel_file_sheets(yearly_dfs, output_file=pop_processed_annee_file)\n print('>> Writing data by departement...')\n communes_dfs = {}\n for departement in pop_formatted_data['departement'].unique():\n communes_dfs[departement] = pop_formatted_data[\n pop_formatted_data.departement == departement]\n write_excel_file_sheets(communes_dfs, output_file=pop_processed_dept_file)", "title": "" } ]
[ { "docid": "7982c373c8c406cf15bdf6e8007ff6d2", "score": "0.56587607", "text": "def extract_data(file):\n countries = [\n \"Brunei Darussalam\",\n \"Cambodia\",\n \"Indonesia\",\n \"Lao People's Democratic Republic\",\n \"Malaysia\",\n \"Myanmar\",\n \"Philippines\",\n \"Singapore\",\n \"Thailand\",\n \"Viet Nam\",\n ]\n\n population_data = {\n \"gTitle\": \"ASEAN Countries Population For Year 2014\",\n \"xLabels\": [\n \"Brunei\",\n \"Combodia\",\n \"Indonesia\",\n \"Laos\",\n \"Malaysia\",\n \"Myanmar\",\n \"Philippines\",\n \"Singapore\",\n \"Thailand\",\n \"Vietnam\",\n ],\n \"xText\": \"Countries\",\n \"yText\": \"Population in millions\",\n }\n population = []\n with open(file, mode=\"r\") as csv_file:\n csv_reader = csv.DictReader(csv_file)\n\n for row in csv_reader:\n if row[\"Region\"] in countries and row[\"Year\"] == \"2014\":\n value = int(float(row[\"Population\"]) / 1000)\n population.append(value)\n\n population_data[\"data\"] = population\n\n return population_data", "title": "" }, { "docid": "910956475442c4c952004ced05f48e6f", "score": "0.5367511", "text": "def load(self):\n\n data = data_loader(settings.REGULATIONS_DATA_PATH.format(self.source_file))\n urls = list(data[\"UK Reg\"])\n for url in urls:\n if url is not nan:\n self.append_url_title(url)\n\n data_writer(\n settings.REGULATIONS_DATA_PATH.format(self.output_file), self.documents\n )", "title": "" }, { "docid": "ebece9976d655df5c82714cc0215edff", "score": "0.53563803", "text": "def main_pr1():\n\t# these were the cities from class:\n\t# Origins = ['Pittsburgh,PA','Boston,MA','Seattle,WA'] # starts\n\t# Dests = ['Claremont,CA','Atlanta,GA'] # goals\n\t#\n\t# Origins are rows...\n\t# Dests are columns...\n\tpass", "title": "" }, { "docid": "6f2aa9397dab50de08dfff9f820d4655", "score": "0.5331167", "text": "def parse_csv_and_return_data(source_urls, csv_file_path, country_table, figure_column_name):\n\n # Define all table columns as lists\n global figure\n list_date = [\"Date\"]\n list_country_origin = [\"country_origin_id\"]\n list_country_destination = [\"country_destination_id\"]\n list_figures = [figure_column_name]\n\n # Iterate all incoming source urls\n for source_url in source_urls:\n\n with open(source_url, \"r\") as file:\n reader = csv.reader(file)\n # Skip the first row\n next(reader)\n\n country_destination_row = next(reader) # gets the destination country row\n\n # Read date from file\n date = source_url[-8:-4] +\"-01-01\"\n\n if figure_column_name == \"migration\":\n column_end = -2\n if figure_column_name == \"remittance\":\n column_end = -1\n\n # Parse and store names of destination countries excluding first cell and World total\n country_destination_names = []\n for element in country_destination_row[1:column_end]:\n country_destination_names.append(element)\n\n\n for row in reader:\n # read all rows until \"World\" or \"TOTAL\" or \"Unidentified*\"\n if row[0] == \"World\" or row[0] == \"TOTAL\" or row[0] == \"Unidentified*\": break\n\n # Reade country name and then retrieve id from country_table\n country_origin_name = row[0]\n country_table_index = country_table[1].index(country_origin_name)\n country_origin_name_id = country_table[0][country_table_index]\n\n index_country_destination = 0\n for element in row[1:column_end]:\n # add country_destination_id\n country_destination_name = country_destination_names[index_country_destination]\n try:\n country_table_index = country_table[1].index(country_destination_name)\n except ValueError:\n continue\n country_destination_name_id = country_table[0][country_table_index]\n list_country_destination.append(country_destination_name_id)\n # add date\n list_date.append(date)\n # add country_origin_id\n list_country_origin.append(country_origin_name_id)\n # add migration figure\n figure = element.strip().replace(\",\",\"\").replace(\"*\",\"\").replace(\"N/A\", \"\")\n list_figures.append(figure)\n\n index_country_destination+=1\n\n final_table = [list_date, list_country_origin, list_country_destination, list_figures]\n\n # Export migration(country_origin_id, country_destination_id, migration) table into csv format\n with open(csv_file_path + figure_column_name + \".csv\", \"w\", newline=\"\") as file:\n writer = csv.writer(file, delimiter=\";\")\n for i in range(len(final_table[0])):\n writer.writerow([x[i] for x in final_table])\n\n return final_table", "title": "" }, { "docid": "5a9f4ca8bb83e00b76b4ca64e0890317", "score": "0.5277028", "text": "def parsing_data():\n\n countries_list = []\n\n with open(INPUT_CSV, 'r') as f:\n # Start reading, skip first line\n reader = csv.reader(f)\n next(f)\n for row in reader:\n # Add relevant info to list per country\n if len(row) > 1:\n country = row[0].strip()\n # Check country name for commas and such and fix\n if ',' in country:\n for i in range(len(country) - 3):\n if country[i] == ',':\n country = (country[(i + 1):] + ' ' +\n country[:(i)]).strip()\n # Get other relevant variables\n region = row[1].strip()\n inf_mortality = isolate_digit(row[7])\n pop_density = isolate_digit(row[4])\n gdp = isolate_digit(row[8])\n\n # Adding list to list\n country_list = [country, region, pop_density, inf_mortality,\n gdp]\n countries_list.append(country_list)\n\n with open(OUTPUT_CSV, 'w') as g:\n save_csv(g, countries_list)", "title": "" }, { "docid": "9fc3f1e13bcf223c1bf235345800b240", "score": "0.52317905", "text": "def read_population_file(year, variant=\"Medium\"):\n population = {}\n\n print(f\"Reading population data for {year}, {variant} scenario\")\n with resources.open_text(\n \"data\", \"WPP2019_TotalPopulationBySex.csv\"\n ) as fid:\n rows = csv.DictReader(fid)\n\n # Read data, filter the correct year\n for row in rows:\n if (\n int(row[\"LocID\"]) < 900\n and row[\"Time\"] == year\n and row[\"Variant\"] == variant\n ):\n pop = round(float(row[\"PopTotal\"]) * 1000)\n population[row[\"Location\"]] = pop\n\n return population", "title": "" }, { "docid": "316ab62722ffb8648dabe4a32c664e44", "score": "0.5224759", "text": "def get_source_data(self):\n\n self.journals_s = []\n self.stored_sql_s = []\n self.codes_s = []\n self.categories_s = []\n self.code_text_s = []\n self.annotations_s = []\n self.code_image_s = []\n self.code_av_s = []\n self.cases_s = []\n self.case_text_s = []\n self.attribute_types_s = []\n self.attributes_s = []\n cur_s = self.conn_s.cursor()\n # Database version must be v5 or higher\n cur_s.execute(\"select databaseversion from project\")\n version = cur_s.fetchone()\n if version[0] in (\"v1\", \"v2\", \"v3\", \"v4\"):\n self.summary_msg += _(\"Need to update the source project database.\") + \"\\n\"\n self.summary_msg += _(\"Please open the source project using QualCoder. Then close the project.\") + \"\\n\"\n self.summary_msg += _(\"This will update the database schema. Then try merging again.\")\n self.summary_msg += _(\"Project not merged\") + \"\\n\"\n return False\n # Journal data\n sql_journal = \"select name, jentry, date, owner from journal\"\n cur_s.execute(sql_journal)\n res_journals = cur_s.fetchall()\n for i in res_journals:\n src = {\"name\": i[0], \"jentry\": i[1], \"date\": i[2], \"owner\": i[3]}\n self.journals_s.append(src)\n # Stored sql data\n sql_stored_sql = \"select title, description, grouper, ssql from stored_sql\"\n cur_s.execute(sql_stored_sql)\n res_stored_sqls = cur_s.fetchall()\n for i in res_stored_sqls:\n src = {\"title\": i[0], \"description\": i[1], \"grouper\": i[2], \"ssql\": i[3]}\n self.stored_sql_s.append(src)\n # Source data\n sql_source = \"select id, name, fulltext,mediapath,memo,owner,date,av_text_id from source\"\n cur_s.execute(sql_source)\n res_source = cur_s.fetchall()\n # Later update av_text_id\n for i in res_source:\n src = {\"id\": i[0], \"newid\": -1, \"name\": i[1], \"fulltext\": i[2], \"mediapath\": i[3], \"memo\": i[4],\n \"owner\": i[5], \"date\": i[6], \"av_text_id\": i[7], \"av_text_filename\": \"\"}\n self.source_s.append(src)\n # The av_text_id is not enough to recreate linkages. Need the referenced text file name.\n for i in self.source_s:\n if i['av_text_id'] is not None:\n cur_s.execute(\"select name from source where id=?\", [i['av_text_id']])\n res = cur_s.fetchone()\n if res is not None:\n i['av_text_filename'] = res[0]\n # Category data\n sql_codecats = \"select catid, supercatid, name, memo, owner, date from code_cat\"\n cur_s.execute(sql_codecats)\n res_codecats = cur_s.fetchall()\n for i in res_codecats:\n ccat = {\"catid\": i[0], \"supercatid\": i[1], \"supercatname\": None,\n \"name\": i[2], \"memo\": i[3], \"owner\": i[4], \"date\": i[5], }\n self.categories_s.append(ccat)\n # Remove categories from the source list, that are already present in the destination database\n cur_d = self.app.conn.cursor()\n cur_d.execute(\"select name from code_cat\")\n res_dest_catnames = cur_d.fetchall()\n dest_cat_names_list = [r[0] for r in res_dest_catnames]\n '''for r in res_dest_catnames:\n dest_cat_names_list.append(r[0])'''\n temp_source_cats = []\n for cat in self.categories_s:\n if cat['name'] not in dest_cat_names_list:\n temp_source_cats.append(cat)\n self.categories_s = temp_source_cats\n # Add reference to linked supercat using category name\n for cat in self.categories_s:\n cur_s.execute(\"select name from code_cat where catid=?\", [cat['supercatid']])\n res = cur_s.fetchone()\n if res is not None:\n cat['supercatname'] = res[0]\n # Code data\n sql_codenames = \"select cid, name, memo, owner, date, color, catid from code_name\"\n cur_s.execute(sql_codenames)\n res_codes = cur_s.fetchall()\n for i in res_codes:\n code_s = {\"cid\": i[0], \"newcid\": -1, \"name\": i[1], \"memo\": i[2], \"owner\": i[3], \"date\": i[4], \"color\": i[5],\n \"catid\": i[6], \"catname\": None}\n self.codes_s.append(code_s)\n # Get and fill category name if code is in a category\n for code_s in self.codes_s:\n cur_s.execute(\"select name from code_cat where catid=?\", [code_s['catid']])\n res = cur_s.fetchone()\n if res is not None:\n code_s['catname'] = res[0]\n # Code text data\n sql_codetext = \"select cid, fid, seltext, pos0, pos1, owner, date, memo, important from code_text\"\n cur_s.execute(sql_codetext)\n res_codetext = cur_s.fetchall()\n for i in res_codetext:\n ct = {\"cid\": i[0], \"newcid\": -1, \"fid\": i[1], \"newfid\": -1, \"seltext\": i[2], \"pos0\": i[3], \"pos1\": i[4],\n \"owner\": i[5], \"date\": i[6], \"memo\": i[7], \"important\": i[8]}\n self.code_text_s.append(ct)\n # Text annotations data\n sql_annotations = \"select fid, pos0, pos1, memo, owner, date from annotation\"\n cur_s.execute(sql_annotations)\n res_annot = cur_s.fetchall()\n for i in res_annot:\n an = {\"fid\": i[0], \"newfid\": -1, \"pos0\": i[1], \"pos1\": i[2], \"memo\": i[3], \"owner\": i[4], \"date\": i[5]}\n self.annotations_s.append(an)\n # Code image data\n sql_code_img = \"select cid, id, x1, y1, width, height, memo, date, owner, important from code_image\"\n cur_s.execute(sql_code_img)\n res_code_img = cur_s.fetchall()\n for i in res_code_img:\n cimg = {\"cid\": i[0], \"newcid\": -1, \"fid\": i[1], \"newfid\": -1, \"x1\": i[2], \"y1\": i[3],\n \"width\": i[4], \"height\": i[5], \"memo\": i[6], \"date\": i[7], \"owner\": i[8], \"important\": i[9]}\n self.code_image_s.append(cimg)\n # Code AV data\n sql_code_av = \"select cid, id, pos0, pos1, owner, date, memo, important from code_av\"\n cur_s.execute(sql_code_av)\n res_code_av = cur_s.fetchall()\n for i in res_code_av:\n c_av = {\"cid\": i[0], \"newcid\": -1, \"fid\": i[1], \"newfid\": -1, \"pos0\": i[2], \"pos1\": i[3],\n \"owner\": i[4], \"date\": i[5], \"memo\": i[6], \"important\": i[7]}\n self.code_av_s.append(c_av)\n # Case data\n sql_cases = \"select caseid, name, memo, owner, date from cases\"\n cur_s.execute(sql_cases)\n res_cases = cur_s.fetchall()\n for i in res_cases:\n c = {\"caseid\": i[0], \"newcaseid\": -1, \"name\": i[1], \"memo\": i[2], \"owner\": i[3], \"date\": i[4]}\n self.cases_s.append(c)\n sql_case_text = \"select caseid, fid, pos0, pos1 from case_text\"\n cur_s.execute(sql_case_text)\n res_case_text = cur_s.fetchall()\n for i in res_case_text:\n c = {\"caseid\": i[0], \"newcaseid\": -1, \"fid\": i[1], \"newfid\": -1, \"pos0\": i[2], \"pos1\": i[3]}\n self.case_text_s.append(c)\n # Attribute type data\n sql_attr_type = \"select name, memo, date, owner, caseOrFile, valuetype from attribute_type\"\n cur_s.execute(sql_attr_type)\n res_attr_type_s = cur_s.fetchall()\n keys = 'name', 'memo', 'date', 'owner', 'caseOrFile', 'valuetype'\n temp_attribute_types_s = []\n for row in res_attr_type_s:\n temp_attribute_types_s.append(dict(zip(keys, row)))\n # Remove matching attribute type names\n cur_d = self.app.conn.cursor()\n cur_d.execute(\"select name from attribute_type\")\n res_attr_name_dest = cur_d.fetchall()\n attribute_names_dest = [r[0] for r in res_attr_name_dest]\n '''for r in res_attr_name_dest:\n attribute_names_dest.append(r[0])'''\n self.attribute_types_s = []\n for r in temp_attribute_types_s:\n if r['name'] not in attribute_names_dest:\n self.attribute_types_s.append(r)\n # Attribute data\n sql_attributes = \"select name, attr_type, value, id, date ,owner from attribute\"\n cur_s.execute(sql_attributes)\n res_attributes = cur_s.fetchall()\n for i in res_attributes:\n attribute = {\"name\": i[0], \"attr_type\": i[1], \"value\": i[2], \"id\": i[3], \"newid\": -1, \"date\": i[4],\n \"owner\": i[5]}\n self.attributes_s.append(attribute)\n return True", "title": "" }, { "docid": "bfc8ca2f4191daa43801ca1690faded9", "score": "0.521376", "text": "def get_population_data(params):\n try:\n data = []\n # Get institution filters - TNCI - charitable\n report_region = params['report_region']\n org_unit = params['org_unit']\n org_type = params['org_type']\n inst_type = params['institution_type']\n if org_type == 'ALL':\n si_list = ['TNRH', 'TNRB', 'TNRR', 'TNRS', 'TNAP']\n inst_list = ['TNRC'] if inst_type == 'TNCI' else si_list\n else:\n inst_list = [str(inst_type)]\n # Get all org units under this category\n if report_region == 4:\n org_list = [int(org_unit)]\n else:\n org_qs = RegOrgUnit.objects.filter(\n is_void=False, org_unit_type_id__in=inst_list)\n if report_region in [2, 3]:\n sub_ids = params['sub_county_id']\n orgs_geos = RegOrgUnitGeography.objects.filter(\n is_void=False, area_id__in=sub_ids)\n orgs_geo = orgs_geos.values_list('org_unit_id', flat=True)\n uniq_orgs = list(set(orgs_geo))\n org_qs = org_qs.filter(org_unit_type_id__in=uniq_orgs)\n orgs_list = org_qs.values_list('id', flat=True)\n # This is a hack since Danet put this field as char and not int\n org_list = [str(o_list) for o_list in orgs_list]\n # Filter by date only first\n start_date = params['start_date']\n end_date = params['end_date']\n ip_queryset = OVCPlacement.objects.filter(\n residential_institution_name__in=org_list,\n is_void=False, admission_date__range=(start_date, end_date))\n place_ids = []\n for cl in ip_queryset:\n item = {}\n item['cat'] = cl.admission_type\n item['sex'] = cl.person.sex_id\n item['age'] = cl.person.years\n # For generating summaries\n item['kid'] = cl.person.id\n item['cid'] = cl.placement_id\n place_ids.append(cl.placement_id)\n data.append(item)\n # Last period population data\n dis_lists = OVCDischargeFollowUp.objects.filter(\n is_void=False, date_of_discharge__lt=start_date,\n placement_id_id__in=place_ids)\n dis_list = dis_lists.values_list('placement_id_id', flat=True)\n old_queryset = OVCPlacement.objects.filter(\n residential_institution_name__in=org_list,\n is_void=False, admission_date__lt=start_date)\n old_queryset = old_queryset.exclude(placement_id__in=dis_list)\n old_data = []\n for ol in old_queryset:\n oitem = {}\n oitem['cat'] = ol.admission_type\n oitem['sex'] = ol.person.sex_id\n oitem['age'] = ol.person.years\n oitem['kid'] = ol.person.id\n oitem['cid'] = ol.placement_id\n old_data.append(oitem)\n # All discharges\n dis_queryset = OVCDischargeFollowUp.objects.filter(\n is_void=False, date_of_discharge__range=(start_date, end_date),\n placement_id_id__in=place_ids)\n dis_data = []\n for dl in dis_queryset:\n ditem = {}\n ditem['cat'] = dl.type_of_discharge\n ditem['sex'] = dl.person.sex_id\n ditem['age'] = dl.person.years\n ditem['kid'] = dl.person.id\n ditem['cid'] = dl.placement_id_id\n dis_data.append(ditem)\n # Get all deaths within this period\n death_qs = OVCAdverseEventsFollowUp.objects.filter(\n is_void=False, adverse_condition_description='AEDE',\n adverse_event_date__range=(start_date, end_date),\n placement_id_id__in=place_ids)\n death_data = []\n for ds in death_qs:\n eitem = {}\n eitem['cat'] = ds.adverse_condition_description\n eitem['sex'] = ds.person.sex_id\n eitem['age'] = ds.person.years\n eitem['kid'] = ds.person.id\n eitem['cid'] = ds.placement_id_id\n death_data.append(eitem)\n raw_data = data_from_results(data)\n raw_old = data_from_results(old_data)\n raw_dis = data_from_results(dis_data)\n raw_death = data_from_results(death_data)\n raw_vals = {'data': raw_data, 'odata': raw_old,\n 'ddata': raw_dis, 'death': raw_death}\n return raw_vals\n except Exception, e:\n print 'Get institution data error - %s' % (str(e))\n raise e", "title": "" }, { "docid": "4cb64683cd47aae99995968bbcae99c9", "score": "0.5181293", "text": "def load_reference_data(self, config, collection):\n self.initialize_functions_dict(\n config.get_functions_file(collection)\n )\n self.initialize_proteins_dict(\n config.get_protein_list_file(collection)\n )", "title": "" }, { "docid": "bd403e7fb7ede9f796fad9b37066032e", "score": "0.5169473", "text": "def read_population_data(self, file):\n with open(file, encoding=\"utf-8-sig\") as f:\n reader = csv.DictReader(f)\n for row in reader:\n self.population[row[\"Country\"]] = row[\"Population\"]", "title": "" }, { "docid": "fcfe0cc671f4d8d206704861080a2283", "score": "0.50875056", "text": "def get_population_urban_features_filename(city_ref_file, data_source):\n\t# Folder exists?\n\timport os\n\tif not(os.path.isdir(storage_folder + \"/\" + data_source)): \n\t\tos.makedirs(storage_folder + \"/\" + data_source)\n\treturn storage_folder + \"/\" + data_source + \"/\" + city_ref_file + \"_urban_features.\" + geo_format", "title": "" }, { "docid": "e4fd044d636cb1589067fb02cb601b68", "score": "0.5064202", "text": "def get_population():\n return pd.read_csv(find_package_file(\"samples/population.csv\")).set_index(\n \"Country\"\n )[\"SP.POP.TOTL\"]", "title": "" }, { "docid": "288c2963e864651a70cc6a9af0574f4c", "score": "0.5050471", "text": "def run_source(kind, pl, data_in, most_suitable, n_plant_raster, discount_rate, result):\n print(\n \"Financial parameters: \"\n f'setup_costs={data_in[\"setup_costs\"]} '\n f'yearly_cost={data_in[\"tot_cost_year\"]} '\n f'plant_life={data_in[\"financing_years\"]}'\n )\n pl.financial = plant.Financial(\n investement_cost=int(data_in[\"setup_costs\"] * pl.peak_power),\n yearly_cost=data_in[\"tot_cost_year\"],\n plant_life=data_in[\"financing_years\"],\n )\n\n if most_suitable.max() > 0:\n result[\"indicator\"].extend(\n ro.get_indicators(kind, pl, most_suitable, n_plant_raster, discount_rate)\n )\n print(f'indicator={json.dumps(result[\"indicator\"])}')\n\n if pl.prof is not None:\n # default profile\n tot_profile = pl.prof[\"electricity\"].values * pl.n_plants\n default_profile, unit, con = ru.best_unit(\n tot_profile, \"kW\", no_data=0, fstat=np.median, powershift=0\n )\n print(\n \"Horuly profile \"\n f\"tot_profile={tot_profile} \"\n f\"default_profile={default_profile} \"\n f\"unit={unit} \"\n f\"con={con}\"\n )\n\n graph = ro.line(\n x=ro.reducelabels(pl.prof.index.strftime(\"%d-%b %H:%M\")),\n y_labels=[\"{} {} profile [{}]\".format(kind, pl.resolution[1], unit)],\n y_values=[default_profile],\n unit=unit,\n xLabel=pl.resolution[0],\n yLabel=\"{} {} profile [{}]\".format(kind, pl.resolution[1], unit),\n )\n\n # monthly profile of energy production\n\n df_month = pl.prof.groupby(pd.Grouper(freq=\"M\")).sum()\n df_month[\"output\"] = df_month[\"electricity\"] * pl.n_plants\n monthly_profile, unit, con = ru.best_unit(\n df_month[\"output\"].values,\n \"kWh\",\n no_data=0,\n fstat=np.median,\n powershift=0,\n )\n print(f\"monthly_profile={monthly_profile} \" f\"unit={unit} \" f\"con={con}\")\n\n graph_month = ro.line(\n x=df_month.index.strftime(\"%b\"),\n y_labels=[\n \"\"\"\"{} monthly energy\n production [{}]\"\"\".format(\n kind, unit\n )\n ],\n y_values=[monthly_profile],\n unit=unit,\n xLabel=\"Months\",\n yLabel=\"{} monthly profile [{}]\".format(kind, unit),\n )\n\n graphics = [graph, graph_month]\n else:\n graphics = []\n\n result[\"graphics\"] = graphics\n print(\"Computed correctly!\")\n print(result)\n return result", "title": "" }, { "docid": "e40da8c8834a874f2398c4864c831d9e", "score": "0.50308037", "text": "def extract_census_fields(self, years=[], sep=None, save_csv=True):\n\t\tif len(years) < 1:\n\t\t\tprint()\n\t\t\tprint(\"No year list passed, using ALL_YEARS from config file\")\n\t\t\tyears = self.config_data[\"ALL_YEARS\"]\n\n\t\tif sep is None:\n\t\t\tsep = self.config_data[\"SEP\"]\n\n\t\tif sep == \"|\":\n\t\t\tfile_ending = \"txt\"\n\t\telse:\n\t\t\tfile_ending = \"csv\"\n\n\t\tdata_path = self.config_data[\"CENSUS_PATH\"] #set path to data files\n\n\n\t\tfield_dict_2003 = self.config_data[\"extract_fields_2003_on\"] #fields to extract from file\n\t\tfield_dict_2002 = self.config_data[\"extract_fields_2002_prior\"] #fields to extract from file\n\n\t\tfield_names_2003 = list(field_dict_2003.keys())\n\t\tfield_names_2002 = list(field_dict_2002.keys())\n\n\t\tfield_nums_one_idx_2003 = list(field_dict_2003.values())\n\t\tfield_nums_one_idx_2002 = list(field_dict_2002.values())\n\n\t\tfield_nums_2003 = [int(num) - 1 for num in field_nums_one_idx_2003] #adjust for non-0 indexing in FFIEC file dictionary\n\t\tfield_nums_2002 = [int(num) - 1 for num in field_nums_one_idx_2002] #adjust for non-0 indexing in FFIEC file dictionary\n\n\t\treturn_dict = {} #for returning year keyed dataframes of census data\n\t\tfor year in years:\n\t\t\t#set file name\n\t\t\tprint()\n\t\t\tprint(\"processing data for {year}\".format(year=year))\n\t\t\t#data are loaded as objects to preserve integrity of geographic identifiers with leading 0s\n\t\t\tif int(year) >= 2012:\n\t\t\t\tprint(\"using CSV data file\")\n\t\t\t\tcensus_data = pd.read_csv(data_path + \"census_data_{year}.csv\".format(year=year), \n\t\t\t\t\t\t\t\t\t\t usecols=field_nums_2003, \n\t\t\t\t\t\t\t\t\t\t header=None, \n\t\t\t\t\t\t\t\t\t\t dtype=object,\n\t\t\t\t\t\t\t\t\t\t sep=\",\") #csv is the base format after extraction, don't change this unless you really mean it\n\n\t\t\t\tcensus_data = census_data[field_nums_2003]\n\t\t\t\tcensus_data.columns = field_names_2003\n\n\t\t\telse:\n\t\t\t\t#load fixed width spec for old FFIEC census data (only verified on 2006 year)\n\t\t\t\tprint(\"using fixed width data file\")\n\t\t\t\tif int(year) >= 2003:\n\t\t\t\t\t#set fixed width format spec\n\t\t\t\t\tfwf_spec = pd.read_csv(self.config_data[\"ffiec_census_2006_fwf_spec\"])\n\n\t\t\t\telse:\n\t\t\t\t\t#set fixed width format spec\n\t\t\t\t\tfwf_spec = pd.read_csv(self.config_data[\"ffiec_census_2002_fwf_spec\"])\t\n\n\t\t\t\t\t\t\t\t\t\n\t\t\t\tcensus_data = pd.read_fwf(data_path + \"census_data_{year}.dat\".format(year=year), \n\t\t\t\t\t\t\t\t\t\t widths=fwf_spec[\"Length\"], \n\t\t\t\t\t\t\t\t\t\t header=None, \n\t\t\t\t\t\t\t\t\t\t dtype=object)\n\n\t\t\t\tif int(year) >= 2003:\n\t\t\t\t\t#remove fields not in extract dictionary\n\t\t\t\t\tcensus_data = census_data[field_nums_2003]\n\t\t\t\t\t#set column names\n\t\t\t\t\tcensus_data.columns = field_names_2003\n\n\t\t\t\telse:\n\t\t\t\t\t#remove fields not in extract dictionary\n\t\t\t\t\tcensus_data = census_data[field_nums_2002]\n\t\t\t\t\t#set column names\n\t\t\t\t\tcensus_data.columns = field_names_2002\n\n\t\t\tif save_csv:\n\t\t\t\tcensus_data.to_csv(self.config_data[\"OUT_PATH\"] + \"census_data_extract_{year}.txt\".format(year=year), \n\t\t\t\t\t\t\t\t index=False,\n\t\t\t\t\t\t\t\t sep=\"|\")\n\t\t\t\tcensus_data.to_csv(self.config_data[\"OUT_PATH\"] + \"census_data_extract_{year}.csv\".format(year=year), \n\t\t\t\t\t\t\t\t index=False,\n\t\t\t\t\t\t\t\t sep=\",\")\n\t\t\treturn_dict[year] = census_data #add data extract to return dictionary\n\n\t\tif self.config_data[\"DEBUG\"]:\n\t\t\tprint()\n\t\t\tprint(\"field names to extract\")\n\t\t\tprint(field_names_2002)\n\t\t\tprint(field_names_2003)\n\t\t\tprint()\n\t\t\tprint(\"field numbers from file schema (not 0 adjusted\")\n\t\t\tprint(field_nums_one_idx_2003)\n\t\t\tprint(field_nums_one_idx_2002)\t\t\t\n\n\t\treturn return_dict", "title": "" }, { "docid": "1e99b57371456e7028ca853a7bf07ba0", "score": "0.50267905", "text": "def _ParseCities(self, datafile):\r\n count_under = 0\r\n count_over = 0\r\n us_postal_places = [d.name for d in self._us_postal]\r\n self._data = []\r\n with open(datafile, 'r') as f:\r\n for line in f.readlines():\r\n fields = line.split('\\t')\r\n population = int(fields[14])\r\n if fields[8] == 'US':\r\n population *= options.options.us_city_boost\r\n if population < options.options.pop_filter:\r\n continue\r\n geo_id = fields[0]\r\n place = self._CleanName(fields[1].decode('utf-8'))\r\n cc = fields[8]\r\n lat = float(fields[4])\r\n lon = float(fields[5])\r\n pop = int(fields[14])\r\n\r\n # Attempt to find the place in the us postal database if cc='US'.\r\n state, county = (None, None)\r\n if cc == 'US':\r\n left_index = bisect_left(us_postal_places, place)\r\n right_index = bisect_right(us_postal_places, place)\r\n if left_index != len(self._us_postal) and right_index != len(self._us_postal):\r\n best_distance = None\r\n for i in range(left_index, right_index):\r\n best_index = -1\r\n distance = s2.DistanceBetweenLocations(lat, lon, self._us_postal[i].lat, self._us_postal[i].lon)\r\n if best_distance == None or distance < best_distance:\r\n state, county = (self._us_postal[i].state, self._us_postal[i].county)\r\n best_distance = distance\r\n best_index = i\r\n if distance > 1000:\r\n count_over += 1\r\n else:\r\n count_under += 1\r\n\r\n datum = GeoDatum(geo_id, place, lat, lon, cc, state, county, pop)\r\n self._data.append(datum)\r\n # Look for ascii name different from utf-8 name.\r\n if options.options.ascii_names and fields[1] != fields[2]:\r\n self._data.append(datum._replace(name=self._CleanName(fields[2].decode('utf-8'))))\r\n # Process alternate names if enabled.\r\n if options.options.alt_names:\r\n for alt_name in [self._CleanName(x.decode('utf-8')) for x in fields[3].split(',') if x]:\r\n self._data.append(datum._replace(name=alt_name))\r\n\r\n logging.info('%d US cities matched within 1km, %d didn\\'t' % (count_under, count_over))\r\n self._data = sorted(self._data, key=attrgetter('name'), cmp=locale.strcoll)\r\n logging.info('parsed and sorted %d places from world cities database' % len(self._data))", "title": "" }, { "docid": "f5ba3580934bdb39edca3585d7c978bd", "score": "0.5020366", "text": "def load_POP(region, unit=1.E3):\n fname = region+'_POP.dat'\n POP_data = np.genfromtxt(fname, skip_header=1, usecols=(0,1), unpack=True,\n dtype=[('Year','i'),('POP','f')])\n Year, POP = POP_data['Year'], unit*POP_data['POP']\n # POP normalized to its initial entry\n P = POP/POP[0]\n # Annual growth rate in %\n gr = np.zeros(len(POP) - 1)\n for i in range(len(gr)): \n gr[i] = 100.*(POP[i+1]/POP[i] - 1) \n\n return Year, POP, P, gr", "title": "" }, { "docid": "115d61a870141bc18c55f574585f2c20", "score": "0.49399337", "text": "def process_pop_displacement(filepath: str) -> Dict[str, List[float]]:\r\n dataset_dict = {}\r\n with open(filepath) as file:\r\n reader = csv.reader(file)\r\n next(reader)\r\n for row in reader:\r\n country_code = row[0]\r\n percent_pop_displacement = [float(row[i]) for i in range(2, 7)]\r\n dataset_dict[country_code] = percent_pop_displacement\r\n\r\n return dataset_dict", "title": "" }, { "docid": "ddb2faf6b18cdabad63542e687260f7e", "score": "0.49009374", "text": "def netl_eia_parse(*, source, year, **_):\n # load the csv file\n DATA_FILE = f\"NETL-EIA_powerplants_water_withdraw_consume_data_\" \\\n f\"{year}.csv\"\n df_load = pd.read_csv(f\"{externaldatapath}{DATA_FILE}\",\n index_col=0, low_memory=False)\n\n # subset df\n df = df_load[['Year', 'Month', '860 Cooling Type 1',\n 'Generator Primary Technology',\n 'Water Consumption Intensity Adjusted (gal/MWh)',\n 'Water Withdrawal Intensity Adjusted (gal/MWh)',\n 'Total net generation (MWh)',\n 'Total water discharge (million gallons) calc',\n 'Water Source Name', 'Water Type', 'County', 'State_y'\n ]].copy(deep=True)\n\n # multiply to get total water rather than rate so can sum to national level\n cols_to_multiply = ['Water Consumption Intensity Adjusted (gal/MWh)',\n 'Water Withdrawal Intensity Adjusted (gal/MWh)']\n for c in cols_to_multiply:\n df[c] = df[c] * df['Total net generation (MWh)']\n # strip 'intensity' and '/MWh' from col name\n df = df.rename(columns={\n c: c.replace('Intensity Adjusted ', '').replace('/MWh', '')})\n\n # aggregate to annual\n df = df.drop(columns=['Month', 'Total net generation (MWh)'])\n df2 = df.groupby(\n ['Year', '860 Cooling Type 1', 'Generator Primary Technology',\n 'Water Source Name', 'Water Type', 'County', 'State_y']).agg(\n {'Water Consumption (gal)': 'sum', 'Water Withdrawal (gal)': 'sum',\n 'Total water discharge (million gallons) calc': 'sum'}).reset_index()\n # drop 'calc' from column name\n df2 = df2.rename(columns={'Total water discharge (million gallons) calc':\n 'Total water discharge (million gallons)'})\n # drop rows where no water withdrawal data\n df3 = df2[df2['Water Withdrawal (gal)'] != 0].reset_index(drop=True)\n\n # make column lower case\n df3['Water Source Name'] = \\\n df3['Water Source Name'].apply(lambda x: x.lower())\n\n ground = 'wells|well|ground|gw|aquifer'\n surface = 'river|lake|reservoir|ocean|canal|creek|pond|bay|neosho|' \\\n 'stanton|gulf|pool|waterway|trinity|harbor|mississippi|' \\\n 'channel|laguna|sound|water way|coastal water authority|' \\\n 'folsom south'\n public_supply = 'municipal|muncpl|municipality|potw|city|muncipality|' \\\n 'municiple|city|west kern|wheeler ridge|sayreville'\n reclaimed = 'water treatment|chemicals|nwtp'\n storm = 'storm water'\n\n # assign compartments\n df3['Compartment'] = ''\n df3['Compartment'] = np.where(\n df3[\"Water Type\"] == 'Reclaimed', \"Reclaimed\", df3['Compartment'])\n df3['Compartment'] = np.where(\n (df3['Compartment'] == '') & (df3['Water Source Name'].str.contains(\n ground)), \"Ground\", df3['Compartment'])\n df3['Compartment'] = np.where(\n (df3['Compartment'] == '') & (df3['Water Source Name'].str.contains(\n surface)), \"Surface\", df3['Compartment'])\n df3['Compartment'] = np.where(\n (df3['Compartment'] == '') & (df3['Water Source Name'].str.contains(\n public_supply)), \"Public Supply\", df3['Compartment'])\n df3['Compartment'] = np.where(\n (df3['Compartment'] == '') & (df3['Water Source Name'].str.contains(\n reclaimed)), \"Reclaimed\", df3['Compartment'])\n df3['Compartment'] = np.where(\n (df3['Compartment'] == '') & (df3['Water Source Name'].str.contains(\n storm)), \"Stormwater\", df3['Compartment'])\n df3['Compartment'] = np.where(df3[\"Water Source Name\"] == 'lake wells',\n \"Surface\", df3['Compartment'])\n\n # assign fips\n fips = get_county_FIPS()\n us_abb = pd.DataFrame(us_state_abbrev.items(),\n columns=['State', 'State_y'])\n fips = fips.merge(us_abb, left_on='State', right_on='State')\n df3 = df3.merge(fips, left_on=['State_y', 'County'],\n right_on=['State_y', 'County'])\n\n # melt df\n df4 = pd.melt(df3,\n id_vars=['Year', '860 Cooling Type 1',\n 'Generator Primary Technology', 'Water Type',\n 'County', 'State_y', 'Water Source Name', 'State',\n 'FIPS', 'Compartment'], var_name='FlowName')\n\n # Assign ACB and APB columns\n df4['ActivityConsumedBy'] = np.where(df4['FlowName'].isin(\n ['Water Consumption (gal)', 'Water Withdrawal (gal)']),\n df4['Generator Primary Technology'], None)\n df4['ActivityProducedBy'] = np.where(df4['ActivityConsumedBy'].isnull(),\n df4['Generator Primary Technology'],\n None)\n\n # split flowname col into flowname and unit\n df4['Unit'] = df4['FlowName'].str.split('(').str[1]\n df4['Unit'] = df4['Unit'].apply(lambda x: x.replace(\")\", \"\")).str.strip()\n df4['FlowName'] = df4['FlowName'].str.split('(').str[0]\n df4['FlowName'] = df4['FlowName'].str.strip()\n # update water flownames\n df4['FlowName'] = np.where(df4['FlowName'].str.contains('Water|water'),\n df4['Water Type'] + \" \" + df4['FlowName'],\n df4['FlowName'])\n df4['FlowName'] = df4['FlowName'].apply(lambda x: x.title())\n\n # modify compartment if consumptive\n df4['Compartment'] = np.where(df4[\"FlowName\"].str.contains('Consumption'),\n \"Air\", df4['Compartment'])\n\n df4['Class'] = np.where(df4['FlowName'].str.contains('Water|discharge'),\n \"Water\", \"Energy\")\n df4['SourceName'] = source\n df4['DataReliability'] = 1\n df4['DataCollection'] = 5\n df4['FlowType'] = \"ELEMENTARY_FLOW\"\n df4['Description'] = 'Cooling Type: ' + df4['860 Cooling Type 1']\n df4 = df4.rename(columns={\"value\": \"FlowAmount\", \"FIPS\": \"Location\"})\n df4 = df4.drop(\n columns=['860 Cooling Type 1', 'Generator Primary Technology',\n 'Water Type', 'County', 'State', 'State_y',\n 'Water Source Name'])\n df4 = assign_fips_location_system(df4, str(year))\n\n return df4", "title": "" }, { "docid": "a9d7101142d8ae147261fa267b829484", "score": "0.48945856", "text": "def main(output_fpath):\n logger = logging.getLogger(__name__)\n\n wiki_page = 'List_of_United_States_cities_by_population'\n logger.info('Downloading Wikipedia Article: ' + wiki_page)\n\n (fetch_wiki(wiki_page)\n .pipe(clean)\n .pipe(add_features)\n .to_csv(output_fpath, index=False))", "title": "" }, { "docid": "f4e03b3eda5779f9ca38c8ed2f490e4e", "score": "0.48902237", "text": "def main(configfile_name):\n # # # get the configfile as a command-line parameter\n #configfile_name = sys.argv[1]\n # first read the configfile in\n config = configparser.ConfigParser()\n config.read(configfile_name)\n print(\"{0} : Processing dataset {1}\".format(dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), config.get('Source file', 'id')))\n\n # first read the data in\n reader = dfh.CSVimporter()\n # read generic settings from the ini file\n # configfile_name = 'test.ini'\n # configfile_name = 'C11.ini'\n\n\n reader.read_file_options_from_file(configfile_name)\n if not os.path.exists(reader.result_dir):\n os.makedirs(reader.result_dir)\n #reader.fault_columns = [8,9,10]\n #reader.replace_faults = True\n # set filename separately\n #reader.filename = '../data/full_mean_dataset.csv'\n #read data\n reader.read_data()\n data = reader.full_data\n headers = reader.headers\n\n # print(headers)\n # create a new instance of the AEP loss counter\n aepc = aep.AEPcounter()\n\n # TODO:\n # figure out a netter way for this.\n if reader.replace_faults:\n aepc.fault_dict = reader.fault_dict\n # read data structure from file\n aepc.set_data_options_from_file(configfile_name)\n # read binning options from file\n aepc.set_binning_options_from_file(configfile_name)\n # read filter settings from file\n aepc.set_filtering_options_from_file(configfile_name)\n # read options related to IPS, if the \"Icing\" section does not exist, set IPS to False\n aepc.set_ips_options_from_file(configfile_name)\n # aepc.starttimestamp = dt.datetime(2015, 1, 1, 0, 0, 0)\n # aepc.stoptimestamp = dt.datetime(2015, 10, 1, 0, 0, 0)\n # calculate air density correction based on site height using the formula from the spec\n temperature_corrected_data = aepc.air_density_correction(data)\n # temperature_corrected_data = data.copy()\n # filter the corrected data based on state variable values\n #state_filtered_data = aepc.state_filter_data(temperature_corrected_data)\n time_limited_data = aepc.time_filter_data(temperature_corrected_data)\n state_filtered_data = aepc.state_filter_data(time_limited_data)\n\n # filter the data based on power level,\n # remove datapoints where output power is below 0.01 * aepc.rated_power\n #power_level_filtered_data = aepc.power_level_filter(state_filtered_data, 0.01)\n power_level_filtered_data = aepc.power_level_filter(state_filtered_data)\n # create power curves. This bins the data according to wind speed and direction and does some\n # filtering and interpolation to fill over gaps on source data.\n\n # only use the part of data where temperature is above 3 degrees celsius for the power curve\n # use the full dataset for refernce use time limited for loss calculation\n s_reference_data = aepc.state_filter_data(temperature_corrected_data)\n d_reference_data = aepc.temperature_filter_data(s_reference_data)\n pd_reference_data = aepc.power_level_filter(d_reference_data)\n reference_data = aepc.diff_filter(pd_reference_data)\n pc = aepc.count_power_curves(reference_data)\n # rfw.write_power_curve_file('../results/power_curve.txt', pc, aepc)\n # save data sizes into a list in order, original, filtered, reference\n data_sizes = [len(data), len(state_filtered_data), len(reference_data)]\n\n # find stoppages as defined in the specification\n # find power drops and flag them\n if aepc.stop_filter_type == 0:\n stops = aepc.find_icing_related_stops(state_filtered_data, pc)\n pow_alms1 = aepc.power_alarms(power_level_filtered_data, pc)\n status_timings = None\n status_stops = None\n elif (aepc.stop_filter_type == 2) or (aepc.stop_filter_type == 1):\n status_stops = aepc.status_code_stops(time_limited_data, pc)\n stops = aepc.find_icing_related_stops(state_filtered_data, pc)\n pow_alms1 = aepc.power_alarms(power_level_filtered_data, pc)\n status_timings = aepc.power_loss_during_alarm(status_stops)\n else:\n stops = None\n status_stops = None\n status_timings = None\n pow_alms1 = aepc.power_alarms(power_level_filtered_data, pc)\n if aepc.heated_site:\n ips_on_flags = aepc.status_code_stops(time_limited_data, pc, filter_type='ips')\n ips_timings = aepc.power_loss_during_alarm(ips_on_flags, ips_alarm=True)\n else:\n ips_on_flags = None\n ips_timings = None\n if aepc.ice_detection:\n ice_detected = aepc.status_code_stops(time_limited_data, pc, filter_type='icing')\n ice_timings = aepc.power_loss_during_alarm(ice_detected)\n else:\n ice_detected = None\n ice_timings = None\n # find over production incidents and flag them\n pow_alms2 = aepc.power_alarms(power_level_filtered_data, pc, over=True)\n # find start and stop times of alarms in the structure containing the power drop flags\n alarm_timings = aepc.power_loss_during_alarm(pow_alms1)\n stop_timings = aepc.power_loss_during_alarm(stops)\n over_timings = aepc.power_loss_during_alarm(pow_alms2)\n\n # re-do the reference dataset\n #new_ref = aepc.increase_reference_dataset(time_limited_data, stop_timings, alarm_timings, over_timings)\n #new_pc = aepc.count_power_curves(new_ref)\n\n rfw = dfh.Result_file_writer()\n rfw.set_output_file_options(configfile_name)\n\n if rfw.summaryfile_write:\n summary_status, summary_filename, summary_error = rfw.summary_statistics(aepc, time_limited_data, reference_data, pc, alarm_timings, stop_timings, over_timings, status_timings, ice_timings, ips_timings, data_sizes)\n if summary_status:\n print(\"{0} : Summary written successfully into: {1}\".format(dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), summary_filename))\n else:\n print(\"{0} : Problem writing summary: {1}\".format(dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), summary_error))\n\n if rfw.power_curve_write:\n power_curve_status, pc_filename, pc_error = rfw.write_power_curve(aepc,pc)\n if power_curve_status:\n print('{0} : Power curve written successfully into: {1}'.format(dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), pc_filename))\n else:\n print('{0} : Problem writing power curve: {1}'.format(dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), pc_error))\n\n if rfw.icing_events_write:\n # TODO: write these to one file\n prod_loss_trunk = '_losses.csv'\n stops_trunk = '_stops.csv'\n status_trunk = '_status.csv'\n ips_trunk = '_ips.csv'\n icing_trunk = '_ice_det.csv'\n losses_filename = aepc.result_dir + aepc.id + prod_loss_trunk\n stops_filename = aepc.result_dir + aepc.id + stops_trunk\n status_filename = aepc.result_dir + aepc.id + status_trunk\n ips_filename = aepc.result_dir + aepc.id + ips_trunk\n icing_filename = aepc.result_dir + aepc.id + icing_trunk\n loss_status, loss_write_error = rfw.write_alarm_timings(losses_filename, alarm_timings)\n if loss_status:\n print('{0} : Icing loss statistics written successfully into: {1}'.format(dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), losses_filename))\n else:\n print('{0} : Error writing icing loss statistics: {1}'.format(dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), loss_write_error))\n stop_status, stop_write_error = rfw.write_alarm_timings(stops_filename, stop_timings)\n if stop_status:\n print('{0} : Icing stops statistics written successfully into: {1}'.format(dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), stops_filename))\n else:\n print('{0} : Error writing icing stop statistics: {1}'.format(dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), stop_write_error))\n if aepc.status_stop_index[0] > 0:\n status_status, status_write_error = rfw.write_alarm_timings(status_filename, status_timings)\n if status_status:\n print('{0} : Status Code statistics written successfully into: {1}'.format(dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), status_filename))\n else:\n print('{0} : Error writing Status Code statistics: {1}'.format(dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), status_write_error))\n if aepc.heated_site:\n ips_status, ips_write_error = rfw.write_alarm_timings(ips_filename, ips_timings)\n if ips_status:\n print('{0} : Status Code statistics written successfully into: {1}'.format(dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), ips_filename))\n else:\n print('{0} : Error writing Status Code statistics: {1}'.format(dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), ips_write_error))\n if aepc.ice_detection:\n icing_status, icing_write_error = rfw.write_alarm_timings(icing_filename, ice_timings)\n if icing_status:\n print('{0} : Status Code statistics written successfully into: {1}'.format(dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), icing_filename))\n else:\n print('{0} : Error writing Status Code statistics: {1}'.format(dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), icing_write_error))\n\n #TODO: make ice detector and IPS OPTIONAL, Now the code inserts dummy values for IPS. Not a clean solution\n monthly_stat_status, stat_filename, stat_write_error = rfw.write_monthly_stats(time_limited_data, pc, aepc, pow_alms1, stops, status_stops, ips_on_flags, ice_detected)\n if monthly_stat_status:\n print('{0} : Monthly icing loss timeseries written into: {1}'.format(dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), stat_filename))\n else:\n print('{0} : Error writing loss timeseries: {1}'.format(dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), stat_write_error))\n\n if rfw.alarm_time_series_file_write:\n # write out the results\n combined_ts = aepc.combine_timeseries(pow_alms1,stops,pow_alms2)\n alarm_timeseries_filename = aepc.result_dir + aepc.id + '_alarms.csv'\n ts_write_status, ts_write_error = rfw.write_alarm_file(alarm_timeseries_filename, combined_ts)\n if ts_write_status:\n print('{0} : Time series written successfully into: {1}'.format(dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), alarm_timeseries_filename))\n else:\n print('{0} : Error writing time series file: {1}'.format(dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), ts_write_error))\n\n if rfw.filtered_raw_data_write:\n filtered_data_filename = aepc.result_dir + aepc.id + '_filtered.csv'\n new_data = rfw.insert_fault_codes(aepc.time_filter_data(temperature_corrected_data), aepc, reader)\n raw_write_status, raw_write_error = rfw.write_time_series_file(filtered_data_filename, new_data, headers,aepc,pc)\n if raw_write_status:\n print('{0} : Filtered data written succesfully to: {1}'.format(dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),filtered_data_filename))\n else:\n print('{0} : Error writeing raw data: {1}'.format(dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),raw_write_error))\n\n if rfw.pc_plot_picture:\n if aepc.heated_site:\n rfw.generate_standard_plots(temperature_corrected_data, pc, aepc, pow_alms1, pow_alms2, stops, data_sizes,\n alarm_timings, over_timings, stop_timings, ips_on_flags, True)\n else:\n rfw.generate_standard_plots(temperature_corrected_data, pc, aepc, pow_alms1, pow_alms2, stops, data_sizes,\n alarm_timings, over_timings, stop_timings, None, True)", "title": "" }, { "docid": "a8bef5e2eaeb7bb79068f2fe014ab8fe", "score": "0.4889916", "text": "def process_source(self, sourcefile, limit=-1):\n header_names = ['place_id', 'name', 'feat_class', 'feat_code', 'adm1',\n 'cc', 'FIPS_cc', 'source', 'name_bias', 'id_bias', 'name_type', 'lat', 'lon']\n with open(sourcefile, \"r\", encoding=\"UTF-8\") as fh:\n df = get_csv_reader(fh, delim=\",\", columns=header_names)\n self.purge()\n for row in df:\n if row[\"place_id\"] == \"place_id\":\n continue\n self.rowcount += 1\n pl = as_place(row, source=\"db\")\n pl.id = GENERATED_BLOCK + self.rowcount\n pl.search_only = 0\n pl.name_group = \"\"\n pl.adm2 = \"\"\n pl.name_script = SCRIPT_CODES.get(\"LATIN\")\n yield pl", "title": "" }, { "docid": "a374f1fcb540b676e73d063808d3501d", "score": "0.4887667", "text": "def on_but_srcs(self) :\n self.exportLocalPars()\n txt = '\\n' + 50*'-' + '\\nSources from DB:\\n' \\\n + cp.blsp.txt_of_sources_in_run()\n logger.info(txt, __name__)", "title": "" }, { "docid": "6b4b9b96542d13172877937f585cc952", "score": "0.48608214", "text": "def initialize_country(self, name : str, population : int , yearly_change: float, net_change : int, density : int, land_area : int, migrants, fertilisation_rate : float, med_age : int, urban_pop : float, world_share : float):\n self.name = str.lower(name)\n self.pop_total = population\n self.yearly_change = float(yearly_change) if yearly_change is not None else None \n self.net_change = net_change\n self.pop_density = density\n self.land_area = land_area\n self.migrant_variation = migrants\n self.fertilisation_rate = float(fertilisation_rate) if fertilisation_rate is not None else None \n self.middle_age = int(med_age) if med_age is not None else None \n self.urban_pop = float(urban_pop) if urban_pop is not None else None \n self.world_share = float(world_share) if world_share is not None else None", "title": "" }, { "docid": "471428ac1030c2f77488f01f98d4cc4a", "score": "0.48595542", "text": "def __init__(self, source_file):\n\n if not os.path.isfile(source_file):\n print(\"Oops.\")\n print(\"File '\" + source_file + \"' not found\")\n sys.exit(1)\n\n self.families = {}\n self.individuals = {}\n\n self.families, self.individuals = ged_parser.build_lists_from_file(\n source_file)", "title": "" }, { "docid": "81ee08447fbc0a7b1b9c04440bcd2a3e", "score": "0.48352608", "text": "def get_cities():\n\n data = \"\"\n with urllib.request.urlopen('https://gist.githubusercontent.com/Miserlou/11500b2345d3fe850c92/raw/e36859a9eef58c231865429ade1c142a2b75f16e/gistfile1.txt') as text:\n data = text.read().decode('utf-8')\n\n if not data:\n print(\"No data read from URL, closing\")\n return []\n\n data = data.split('\\n')\n\n #list of dictionaries of cities\n cities = []\n for line in data:\n #only take lines that have text\n if line:\n column_data = line.split(',')\n #check for the right number of columns\n if len(column_data) == 5:\n #check that the first column is the rank\n if column_data[0].isnumeric():\n cities.append({RANK:column_data[0],\n CITY:column_data[1],\n STATE:column_data[2],\n POPULATION:column_data[3]})\n return cities", "title": "" }, { "docid": "eb5435194aa33caecb94e110d3a43c78", "score": "0.48342243", "text": "def update():\n filename = 'http://download.ip2location.com/lite/IP2LOCATION-LITE-DB1.CSV.ZIP'\n if len(sys.argv) > 1:\n filename = sys.argv[1]\n if filename.startswith(('http://', 'https://')):\n file = io.BytesIO(urllib.request.urlopen(filename).read())\n else:\n file = open(filename, 'rb')\n if filename.lower().endswith('.zip'):\n text = zipfile.ZipFile(file).read('IP2LOCATION-LITE-DB1.CSV')\n else:\n text = file.read()\n # generate geoip_db.go\n ips, geo = [], []\n for line in io.BytesIO(text):\n parts = line.strip().decode().split(',')\n ip = parts[0].strip('\"')\n country = parts[2].strip('\"')\n if country == '-':\n country = 'ZZ'\n ips.append(ip)\n geo.append(country)\n with open('geoip_db.go', 'wb') as file:\n file.write(('''package geoip\n\nvar ips = []uint32{%s}\nvar geo = []byte(\"%s\")\n''' % (','.join(ips), ''.join(geo))).encode())", "title": "" }, { "docid": "98db6c7e97bb66c0eb211f57a1bce937", "score": "0.48316938", "text": "def load_companies():\n\n print(\"Company\")\n\n # opening seed file with the csv library and csv reader. \n with open('seed_production/WorkingDataSet_2-19-2020.csv') as csv_file:\n # below line will load 'test' seed file\n # with open('seed_test/programs_test_csv.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n \n utility_list = []\n contractor_list = []\n pv_manuf_list = []\n invert_manuf_list = []\n\n for row in csv_reader:\n if line_count == 0:\n # print(f'Column names are {\", \".join(row)}')\n line_count += 1\n else:\n utility_abreviation = row[1]\n contractor_company = row[44]\n pv_manuf_company = row[49]\n invert_manuf_company = row[76]\n\n # print functions for debugging:\n # print(f' LINE COUNT: {line_count}')\n # print(f'{utility_abreviation}')\n # print(f'{contractor_company}')\n # print(f'{pv_manuf_company}')\n # print(f'{invert_manuf}')\n\n #append companies to the appropriate lists:\n if utility_abreviation not in utility_list:\n utility_list.append(utility_abreviation)\n\n if contractor_company not in contractor_list:\n contractor_list.append(contractor_company)\n\n if pv_manuf_company not in pv_manuf_list:\n pv_manuf_list.append(pv_manuf_company)\n\n if invert_manuf_company not in invert_manuf_list:\n invert_manuf_list.append(invert_manuf_company)\n\n # increase the line count by 1\n line_count += 1\n\n # print funtions for debugging:\n # print(f'utility list: {utility_list}')\n # print(f'contractor_list: {contractor_list}')\n # print(f'pv_manuf_list: {pv_manuf_list}')\n # print(f'invert_manuf_list:{invert_manuf_list}')\n\n # print(f'Processed {line_count} lines.')\n\n for utility in utility_list:\n # convert company abbreviations into company names\n \n name = get_utility_name(utility)\n\n # create instances of the company class to add to the database \n company = Company(name=name,\n company_type = 'Utility and Energy Producer')\n # print(company)\n\n # Add to the session to the database - NEED TO ADD\n db.session.add(company)\n\n for contractor in contractor_list:\n # create instances of the company class to add to the database \n company = Company(name=contractor,\n company_type = 'Solar Contractor')\n # print(company)\n\n # Add to the session to the database - NEED TO ADD\n db.session.add(company)\n\n for pv_manuf in pv_manuf_list:\n # create instances of the company class to add to the database \n company = Company(name=pv_manuf, \n company_type = 'photovoltaic manufacture')\n\n # Add to the session to the database - NEED TO ADD\n db.session.add(company)\n\n for invert_manuf in invert_manuf_list:\n # create instances of the company class to add to the database \n company = Company(name=invert_manuf,\n company_type = 'inverter manufacturer')\n # print(company)\n\n # Add to the session to the database - NEED TO ADD\n db.session.add(company)\n\n # Commit our work so it saves to the database\n db.session.commit()\n print(f'Company line_count: {line_count}')", "title": "" }, { "docid": "376cf5147ef07974ed104ce2831768dc", "score": "0.48311025", "text": "def make_confiance_menages():\n print('>> Handling \"INSEE Confiance Menages\" data for year...')\n raw_cols = [\n 'date',\n 'confiance_menage_synthetique',\n 'niveau_vie_passe',\n 'niveau_vie_perspective',\n 'chomage_perspective',\n 'prix_passe',\n 'prix_perspective',\n 'opportunite_achat_important',\n 'opportunite_epargne',\n 'epargne_capa_actuelle',\n 'finance_pers_passe',\n 'finance_pers_perspective',\n 'epargne_capa_perspective'\n ]\n latest_year = str(max(\n [int(item) for item in source_config.confiance_data_url[\n 'menage'].keys()]))\n latest_month = max([int(item) for item in source_config.confiance_data_url[\n 'menage'][latest_year].keys()])\n raw_confiance_file = source_config.confiance_data_raw_file['menage'][latest_year][latest_month]\n confiance_raw = pd.read_excel(raw_confiance_file, skiprows=5)\n confiance_raw.columns = raw_cols\n confiance_processed_file_prefix = source_config.confiance_data_processed_file[\n 'menage'][latest_year][latest_month]\n res = defaultdict(list)\n for col_name in raw_cols[1:]:\n for index, row in confiance_raw.iterrows():\n date_year = pd.to_datetime(row['date']).year\n date_month = pd.to_datetime(row['date']).month\n res[col_name].append([date_year, date_month, col_name, row[col_name]])\n output_data = pd.DataFrame(res[col_name], columns=['year', 'month', 'indicator_type', 'indicator_value'])\n output_file = os.path.join(confiance_processed_file_prefix, 'insee_confiance_{}.xls'.format(col_name))\n write_excel_file(output_data, output_file=output_file)", "title": "" }, { "docid": "e775d858fbda9ec16d6cef311ac8ea10", "score": "0.48274285", "text": "def load_BDEW_style_profiles(source_file, weather_data, cfg, houses_dict,\n energy_type):\n energy_types_annual = {'Q_TWW_TT': 'Q_TWW_a', 'W_TT': 'W_a'}\n energy_type_annual_str = energy_types_annual[energy_type]\n\n settings = cfg['settings']\n # For the 'noarch' conda build, access the file as pkg resource object\n with pkg_resources.resource_stream('lpagg', source_file) as resource:\n source_df = pd.read_excel(resource, sheet_name=None,\n skiprows=[0], header=[0, 1], index_col=[0],\n skipfooter=1,\n )\n weather_daily = (weather_data.resample('D', label='right', closed='right')\n .mean(numeric_only=True))\n # print(weather_daily)\n\n houses_list = settings['houses_list_BDEW']\n multiindex = pd.MultiIndex.from_product([houses_list, [energy_type]],\n names=['house', 'energy'])\n ret_profiles = pd.DataFrame(index=weather_data.index,\n columns=multiindex)\n if len(houses_list) == 0: # Skip\n return ret_profiles\n\n for house_name in houses_list:\n if pd.isna(houses_dict[house_name][energy_type_annual_str]):\n continue\n elif houses_dict[house_name][energy_type_annual_str] == 0:\n continue\n\n house_type = houses_dict[house_name]['house_type']\n if house_type not in source_df.keys():\n # Only use 'H0G', 'G0G', 'G1G', ...\n logger.warning('house_type \"'+str(house_type)+'\" not found in '\n 'profile sources: '+str(source_df.keys()))\n continue\n\n # Create the yearly profile for the current house\n profile_year = pd.Series(dtype='object')\n for date in weather_daily.index:\n weekday = weather_data.loc[date]['weekday_BDEW']\n season = weather_data.loc[date]['season_BDEW']\n # Important: In order identify the weekday of the resampled days,\n # we labled them 'right'. From now on we need the label 'left',\n # so we substract '1 day' from each date:\n date -= pd.Timedelta('1 day')\n\n source_profile = source_df[house_type][season, weekday]\n\n # Combine date and time stamps\n profile_daily = source_profile.copy()\n index_new = []\n for time_idx in source_profile.index:\n try:\n # Turn time stamp into a time difference (delta)\n delta = pd.to_timedelta(str(time_idx))\n if delta == pd.to_timedelta(0):\n # Delta of zero must be a full day\n delta = pd.to_timedelta('24 h')\n except Exception:\n # The last entry of each profile ('0:00') is sometimes\n # stored as a full date (1900-1-1 00:00) in Excel\n delta = pd.to_timedelta('24 h')\n\n # Create full time stamp of date and time for the new index\n datetime_idx = date + delta\n index_new.append(datetime_idx)\n\n profile_daily.index = index_new\n\n # Append to yearly profile\n profile_year = pd.concat([profile_year, profile_daily])\n\n # Convert unit from 'W' to 'kWh'\n freq = pd.infer_freq(profile_year.index)\n freq = pd.to_timedelta(to_offset(freq))\n profile_year *= freq / (pd.Timedelta('1 hours') * 1000) # W to kWh\n\n # Resample to the desired frequency (time intervall)\n profile_year = lpagg.misc.resample_energy(\n profile_year, settings['interpolation_freq'])\n\n # Store in DataFrame that will be returned\n ret_profiles[house_name, energy_type] = profile_year\n\n return ret_profiles", "title": "" }, { "docid": "562101817a292e96133f44962acd920d", "score": "0.48134565", "text": "def main():\n data_file = _allZip\n if not os.path.exists(_allZip):\n data_file = download_all_countries()\n\n read_geonames(data_file)", "title": "" }, { "docid": "aadca3b74d1273c2037a4cc862cd7394", "score": "0.48007455", "text": "def extract_file(self):\n\n\t\t# file manager\n\t\tos.chdir(self.source_directory)\n\t\tfichier = open(self.file, \"r\")\n\t\traw_data = fichier.readlines()\n\t\tfichier.close()\n\n\t\t# source manager\n\t\tif self.source == \"abcbourse\" : \n\t\t\tfor i,_ in enumerate(raw_data):\n\t\t\t\traw_data[i] = raw_data[i].replace(\"\\n\",\"\")\n\t\t\t\traw_data[i] = raw_data[i].replace(\";\",\",\")\n\t\t\t\traw_data[i] = raw_data[i].split(\",\")\n\n\t\t\tself.size = len(raw_data)\n\t\t\tself.name = raw_data[0][0]\n\n\t\t\tfor i in range(self.size):\n\n\t\t\t\tfor j in range(2, len(raw_data[i])-2,2): \n\t\t\t\t\traw_data[i][j] = int(raw_data[i][j]) + (int(raw_data[i][j+1])/100)\n\t\t\t\tfor j in [9,7,5,3,0]: \n\t\t\t\t\tdel raw_data[i][j]\n\t\t\t\traw_data[i].insert(0, i+1)\n\n\t\t\tfor i in range(self.size): \n\t\t\t\traw_data[i][len(raw_data[i])-1] = int(raw_data[i][len(raw_data[i])-1]) +1\n\n\t\t# implement this later\n\t\telif self.source == \"yahoo_finance\": \n\t\t\traise ValueError\n\t\telif self.source == \"Google_finance\": \n\t\t\traise ValueError\n\t\telse : \n\t\t\traise ValueError\n\n\t\tself.raw_data = raw_data", "title": "" }, { "docid": "6780f572df4d09dd52ffe5be3739a496", "score": "0.47997156", "text": "def main(country_iso3, admin_level, suffix, config_file=\"config.yml\"):\n parameters = parse_yaml(config_file)[country_iso3]\n country = parameters[\"country_name\"]\n\n FEWS_PROCESSED_FOLDER = f\"{country}/Data/FewsNetProcessed/\"\n GIPC_PROCESSED_FOLDER = f\"{country}/Data/GlobalIPCProcessed/\"\n processed_fews_path = (\n f\"{FEWS_PROCESSED_FOLDER}{country}_fewsnet_admin{admin_level}{suffix}.csv\"\n )\n processed_globalipc_path = (\n f\"{GIPC_PROCESSED_FOLDER}{country}_globalipc_ADMIN{admin_level}{suffix}.csv\"\n )\n\n RESULT_FOLDER = f\"{country}/Data/IPC_trigger/\"\n Path(RESULT_FOLDER).mkdir(parents=True, exist_ok=True)\n\n # 3p = IPC level 3 or higher, 2m = IPC level 2 or lower\n ipc_cols = [\n f\"{period}_{i}\"\n for period in [\"CS\", \"ML1\", \"ML2\"]\n for i in [1, 2, 3, 4, 5, \"3p\", \"2m\"]\n ] + [\n f\"perc_{period}_{i}\"\n for period in [\"CS\", \"ML1\", \"ML2\"]\n for i in [1, 2, 3, 4, 5, \"3p\", \"2m\"]\n ]\n pop_cols = [\n f\"pop_{period}\" for period in [\"CS\", \"ML1\", \"ML2\", f\"ADMIN{admin_level}\"]\n ]\n\n # TODO: implement ADMIN0 in preprocess scripts, and select it here as well\n adm_cols = [f\"ADMIN{a}\" for a in range(1, int(admin_level) + 1)]\n\n # initialize dataframes such that can later check if they are filled with data\n df_fewss = None\n df_gipcs = None\n\n if os.path.exists(processed_fews_path):\n df_fews = pd.read_csv(processed_fews_path, index_col=0)\n # TODO: adjust column names in process_fewsnet.py instead\n df_fews = df_fews.rename(\n columns={\n parameters[\"shp_adm1c\"]: \"ADMIN1\",\n parameters[\"shp_adm2c\"]: \"ADMIN2\",\n \"adjusted_population\": f\"pop_ADMIN{admin_level}\",\n }\n )\n df_fews = add_columns(df_fews, \"FewsNet\")\n df_fewss = df_fews[[\"date\", \"Source\"] + adm_cols + pop_cols + ipc_cols]\n\n if os.path.exists(processed_globalipc_path):\n df_gipc = pd.read_csv(processed_globalipc_path, index_col=0)\n df_gipc = add_columns(df_gipc, \"GlobalIPC\")\n df_gipcs = df_gipc[[\"date\", \"Source\"] + adm_cols + pop_cols + ipc_cols]\n\n if df_fewss is not None and df_gipcs is not None:\n df_comb = pd.concat([df_fewss, df_gipcs])\n df_comb_trig = compute_trigger(df_comb)\n\n elif df_fewss is not None:\n df_comb = df_fewss\n df_comb_trig = compute_trigger(df_comb)\n logger.warning(\"No Global IPC data found\")\n\n elif df_gipcs is not None:\n df_comb = df_gipcs\n df_comb_trig = compute_trigger(df_comb)\n logger.warning(\"No FewsNet data found\")\n\n else:\n df_comb_trig = pd.DataFrame()\n logger.warning(\"No data found\")\n\n df_comb_trig.to_csv(\n f\"{RESULT_FOLDER}trigger_results_admin{admin_level}{suffix}.csv\", index=False\n )", "title": "" }, { "docid": "7abec95812260656cac05e8ed3b835f5", "score": "0.4781446", "text": "def collect_data_points():\n files = []\n for (dirpath, dirnames, filenames) in walk('./input/'):\n files.extend(filenames)\n if 'README.md' in files:\n files.remove('README.md')\n \n with open('./input/' + files[0]) as f:\n lines = list(reader(f, delimiter=';'))\n\n occupation_dict = {}\n occupation_count = {}\n state_count = {}\n certified_count = 0\n indices = populate_indices(lines[0])\n \n for line in lines[1:]:\n if line[indices['status']] != 'CERTIFIED':\n continue \n \n soc = clean_soc(line[indices['soc']]) \n job_name = line[indices['job_name']]\n job_state = line[indices['job_state']].upper()\n if not match('\\d{2}-\\d{4}', soc) or not match('^[A-Z]{2}$', job_state):\n continue\n \n certified_count += 1\n \n if soc not in occupation_dict:\n occupation_dict[soc] = job_name\n occupation_count[soc] = 0\n elif len(occupation_dict[soc]) == 0:\n occupation_dict[soc] = job_name\n occupation_count[soc] += 1\n \n if job_state not in state_count:\n state_count[job_state] = 0\n state_count[job_state] += 1\n return {\n 'occupation_dict' : occupation_dict,\n 'occupation_count': occupation_count,\n 'state_count' : state_count,\n 'certified_count': certified_count\n }", "title": "" }, { "docid": "5808d58158fb963b9ea4261344547727", "score": "0.47798878", "text": "def getdata() -> List:\n res = list()\n\n # get CO2 data\n res.extend(co2_read_csv('data/annual-co-emissions-by-region.csv',\n 'Brazil'))\n\n # get deforestation data\n res.extend(deforestation_read_csv('data/deforestation.csv', 'Estimated Natural Forest Cover'))\n\n # get precipitation data\n res.extend(precipitation_read_hdf('data/3B43_rainfall', (0.553222, -65.162917),\n (-4.070444, -52.109639)))\n\n return res", "title": "" }, { "docid": "4996698a6abe843de0813b3e979d75a6", "score": "0.47666872", "text": "def collect_data(cfg):\n # use different function according to env type\n if cfg.type == \"gym\":\n return collect_env_data_gym(cfg)\n elif cfg.type == \"custom\":\n return collect_env_data_custom(cfg)\n else:\n raise NotImplementedError", "title": "" }, { "docid": "45553c90c9e6cb4b7af276f7e3b52b7b", "score": "0.47597235", "text": "def test_city_country_population(self):\n formatted_location = get_city_country('seoul', 'korea', 9_967_677)\n self.assertEqual(\n formatted_location, 'Seoul, Korea - Population 9967677')", "title": "" }, { "docid": "dad096f616496a937f51da04509c0aea", "score": "0.4751114", "text": "def getGenInfo(self, gen_data):\n\t\t# Make sure values in Gen MW column are float values, not strings\n\t\tgen_data['Gen MW'] = pd.to_numeric(gen_data['Gen MW'], errors='ignore')\n\t\t\n\t\t# Get generators in each area listed in SYSTEM_AREA_NAMES, if specified\n\t\ttry:\n\t\t\t# Get possible area names\n\t\t\tall_area_names = set(gen_data['Area Name'])\n\t\t\t\n\t\t\t# Create list of area names that should be kept based on user specification and \n\t\t\t# those available in gens.csv\n\t\t\tkeep_area_names = [area_name for area_name in all_area_names if area_name in SYSTEM_AREA_NAMES]\n\t\t\t\n\t\t\t# Get generator information for only the areas listed in keep_area_names\n\t\t\t# Provided user has specified correct areas to be considered\n\t\t\tif len(keep_area_names):\n\t\t\t\tgens = gen_data[gen_data['Area Name'].isin(keep_area_names)]\n\t\t\telse:\n\t\t\t\t# Deal with the case where the area name(s) listed in SYSTEM_AREA_NAMES do not\n\t\t\t\t# match any of the areas listed in the gens.csv file.\n\t\t\t\tgens = gen_data.copy()\n\t\t\t\tprint('\\n-----------------------------WARNING MESSAGE-----------------------------')\n\t\t\t\tprint('The program assumed you wanted to include generator data for\\n'\n\t\t\t\t\t 'all areas in the gens.csv file, because it could not find the\\n'\n\t\t\t\t\t 'area name(s) you listed. Make sure you have specified the correct\\n' \n\t\t\t\t\t 'area(s) you want to use in the userdef.py file for the variable,\\n'\n\t\t\t\t\t 'SYSTEM_AREA_NAMES, and then run the program again.')\n\t\t\t\tprint('-----------------------------WARNING MESSAGE-----------------------------\\n')\n\t\t\t\n\t\texcept NameError:\n\t\t\t# Exception to deal with the case that the user accidentally deleted the variable\n\t\t\t# SYSTEM_AREA_NAMES from the userdef.py file.\n\t\t\tgens = gen_data.copy()\n\t\t\tprint('\\n-----------------------------WARNING MESSAGE-----------------------------')\n\t\t\tprint('The program assumed you wanted to include generator data for\\n'\n\t\t\t\t 'all areas in the gens.csv file, because it could not find your\\n'\n\t\t\t\t 'definition of the variable SYSTEM_AREA_NAMES in the userdef.py\\n'\n\t\t\t\t 'It is possible you accidentally deleted it. So, create a new\\n'\n\t\t\t\t 'one in the USER SPECIFICATIONS section and include the areas you\\n'\n\t\t\t\t 'want, and run the program again.\\n'\n\t\t\t\t \"Eg. SYSTEM_AREA_NAMES = ['MY AREA NAME']\")\n\t\t\tprint('-----------------------------WARNING MESSAGE-----------------------------\\n')\n\n\t\t\n\t\tdef getGenBuses(gen_data):\n\t\t\t\"\"\"\n\t\t\tReturn the substation name and the bus numbers that reside in it, as a data frame\n\t\t\t\n\t\t\tParameters\n\t\t\t----------\n\t\t\tgen_data:\tdataframe with generator information including, bus number, \n\t\t\t\t\t\tgenerating amount, substation name, area name\n\t\t\t\n\t\t\tOutput\n\t\t\t------\n\t\t\tDataframe indexed by substation name and column with a list of all \n\t\t\tthe generator buses at each substation\n\t\t\t\"\"\"\n\t\t\t# Initialize dictionary to store the bus numbers for each substation\n\t\t\tbuses_by_sub = {}\n\t\t\n\t\t\t# Loop through each unique substation\n\t\t\tfor sub in gen_data['Sub Name'].unique():\n\t\t\n\t\t\t\t# As long as the substation name is a string (eliminate any 'nan' float values)\n\t\t\t\tif isinstance(sub, str):\n\t\t\t\n\t\t\t\t\t# Extract data for that specific substation\n\t\t\t\t\tsub_data = gen_data[gen_data['Sub Name'] == sub]\n\t\t\t\n\t\t\t\t\t# Initialize empty list to store bus numbers for the current substation\n\t\t\t\t\tbus_list = []\n\t\t\t\n\t\t\t\t\t# Loop through all the buses in each substation\n\t\t\t\t\tfor bus_num in sub_data['Bus Number']:\n\t\t\t\t\t\tbus_list.append(str(bus_num))\n\t\t\t\n\t\t\t\t\t# Save bus list to full dictionary\n\t\t\t\t\tbuses_by_sub[sub] = ', '.join(bus_list)\n\n\t\t\t# Make dictionary of bus numbers at each substaion into a dataframe\n\t\t\tbuses_by_sub = pd.DataFrame.from_dict(buses_by_sub, orient='index')\n\t\t\tbuses_by_sub.sort_index(axis=0, inplace=True)\n\t\t\tbuses_by_sub.columns = ['Bus Numbers']\n\t\t\t\n\t\t\treturn buses_by_sub\n\n\t\t# Count number of generators and total gen MW, save bus numbers and area name at each substation\n\t\tnum_gens = gens['Sub Name'].value_counts()\n\t\tnum_gens.name = 'Num Gens'\n\t\tmw_cap = gens.groupby(['Sub Name'])['Gen MW'].sum()\n\t\tmw_cap.name = 'Total Gen MW'\n\t\tbus_nums = getGenBuses(gens)\n\t\tarea_names = gens[['Sub Name', 'Area Name']].drop_duplicates().dropna()\n\t\tarea_names.set_index('Sub Name', inplace=True)\n\n\t\t# Merge all data into one dataframe\n\t\tgen_data_final = pd.merge(area_names, pd.DataFrame(bus_nums), left_index=True, right_index=True)\n\t\tgen_data_final = pd.merge(gen_data_final, pd.DataFrame(mw_cap), left_index=True, right_index=True)\n\t\tgen_data_final = pd.merge(gen_data_final, pd.DataFrame(num_gens), left_index=True, right_index=True) \n\t\tgen_data_final = gen_data_final[['Area Name', 'Total Gen MW', \n\t\t\t\t\t\t\t\t\t\t 'Num Gens', 'Bus Numbers']].sort_values(by='Total Gen MW', ascending=False)\n\t\t\n\t\treturn gen_data_final", "title": "" }, { "docid": "763d069d954faa4546da905072fec052", "score": "0.4745015", "text": "def _parse_org_name_to_country_map(\n f: Iterator[str]) -> Dict[str, Tuple[str, str]]:\n # pyformat: disable\n # pyformat: enable\n line = next(f)\n\n while line != ORG_TO_COUNTRY_HEADER:\n # Throw away starter comment lines\n line = next(f)\n\n org_name_to_country_map: Dict[str, Tuple[str, str]] = {}\n\n while line != AS_TO_ORG_HEADER:\n org_id, changed_date, org_name, country, source = line.split(\"|\")\n org_name_to_country_map[org_id] = (org_name, country)\n\n line = next(f)\n\n return org_name_to_country_map", "title": "" }, { "docid": "9d825abedd3e18813a369fbfaf6b9878", "score": "0.473997", "text": "def build_input_database():\n # Load input database, where we will store the data.\n db_name = get_new_database_name()\n database = Database(db_name)\n\n # Load Excel sheets into the database.\n excel_glob = os.path.join(constants.EXCEL_PATH, \"*.xlsx\")\n excel_sheets = glob.glob(excel_glob)\n for file_path in excel_sheets:\n filename = os.path.basename(file_path)\n header_row = HEADERS_LOOKUP[filename] if filename in HEADERS_LOOKUP else 0\n data_title = OUTPUT_NAME[filename] if filename in OUTPUT_NAME else filename\n file_df = pd.read_excel(\n pd.ExcelFile(file_path),\n header=header_row,\n index_col=1,\n sheet_name=TAB_OF_INTEREST[filename],\n )\n print(\"Reading '%s' tab of '%s' file\" % (TAB_OF_INTEREST[filename], filename))\n file_df.to_sql(data_title, con=database.engine, if_exists=\"replace\")\n\n # Load CSV files into the database\n csv_glob = os.path.join(constants.EXCEL_PATH, \"*.csv\")\n csv_sheets = glob.glob(csv_glob)\n for file_path in csv_sheets:\n file_title = os.path.basename(file_path).split(\".\")[0]\n file_df = pd.read_csv(file_path)\n print(\"Reading '%s' file\" % (file_path))\n\n file_df.to_sql(file_title, con=database.engine, if_exists=\"replace\")\n\n # Add mapped ISO3 code tables that only contain the UN country code\n table_names = [\"crude_birth_rate\", \"absolute_deaths\", \"total_population\"]\n for table_name in table_names:\n print(\"Creating country code mapped database for\", table_name)\n # Create dictionary structure to map from un three numeric digit codes to iso3 three alphabetical digit codes.\n map_df = database.db_query(table_name=\"un_iso3_map\")[\n [\"Location code\", \"ISO3 Alpha-code\"]\n ].dropna()\n table_df = database.db_query(table_name=table_name)\n table_with_iso = pd.merge(\n table_df, map_df, left_on=\"Country code\", right_on=\"Location code\"\n )\n # Rename columns to avoid using spaces.\n table_with_iso.rename(columns={\"ISO3 Alpha-code\": \"iso3\"}, inplace=True)\n # Remove index column to avoid creating duplicates.\n if \"Index\" in table_with_iso.columns:\n table_with_iso = table_with_iso.drop(columns=[\"Index\"])\n\n # Create a new 'mapped' database structure\n table_with_iso.to_sql(table_name + \"_mapped\", con=database.engine, if_exists=\"replace\")\n\n return database", "title": "" }, { "docid": "a6ee67b4b2d9cee5503ad2994eaff1ed", "score": "0.47326326", "text": "def get_equity_company_profile(equity, language='english'):\n\n available_sources = {\n 'english': 'Investing',\n 'en': 'Investing',\n 'spanish': 'Bolsa de Madrid',\n 'es': 'Bolsa de Madrid',\n }\n\n if not equity:\n raise ValueError(\"ERR#013: equity parameter is mandatory and must be a valid equity name.\")\n\n if language.lower() not in available_sources.keys():\n raise ValueError(\"ERR#014: the specified language is not valid, it can just be either spanish (es) or english (en).\")\n\n selected_source = available_sources[language.lower()]\n\n resource_package = __name__\n resource_path = '/'.join(('resources', 'es', 'equities.csv'))\n if pkg_resources.resource_exists(resource_package, resource_path):\n equities = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path))\n else:\n equities = pd.DataFrame(ts.get_equity_names())\n\n if equities is None:\n raise IOError(\"ERR#001: equities object not found or unable to retrieve.\")\n\n if unidecode.unidecode(equity.lower()) not in [unidecode.unidecode(value.lower()) for value in equities['name'].tolist()]:\n raise RuntimeError(\"ERR#018: equity \" + equity.lower() + \" not found, check if it is correct.\")\n\n company_profile = {\n 'url': None,\n 'desc': None\n }\n\n for row in equities.itertuples():\n if unidecode.unidecode(row.name.lower()) == unidecode.unidecode(equity.lower()):\n if selected_source == 'Bolsa de Madrid':\n url = \"http://www.bolsamadrid.es/esp/aspx/Empresas/FichaValor.aspx?ISIN=\" + row.isin\n\n company_profile['url'] = url\n\n head = {\n \"User-Agent\": ua.get_random(),\n \"X-Requested-With\": \"XMLHttpRequest\",\n \"Accept\": \"text/html\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Connection\": \"keep-alive\",\n }\n\n req = requests.get(url, headers=head, timeout=5)\n\n if req.status_code != 200:\n raise ConnectionError(\"ERR#015: error \" + str(req.status_code) + \", try again later.\")\n\n root_ = fromstring(req.text)\n\n path_ = root_.xpath(\".//td[contains(@class, 'Perfil')]\")\n\n if path_:\n company_profile['desc'] = str(path_[0].text_content())\n\n return company_profile\n else:\n return company_profile\n elif selected_source == 'Investing':\n url = \"https://www.investing.com/equities/\" + row.tag + \"-company-profile\"\n\n company_profile['url'] = url\n\n head = {\n \"User-Agent\": ua.get_random(),\n \"X-Requested-With\": \"XMLHttpRequest\",\n \"Accept\": \"text/html\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Connection\": \"keep-alive\",\n }\n\n req = requests.get(url, headers=head, timeout=5)\n\n if req.status_code != 200:\n raise ConnectionError(\"ERR#015: error \" + str(req.status_code) + \", try again later.\")\n\n root_ = fromstring(req.text)\n\n path_ = root_.xpath(\".//*[@id=\\\"profile-fullStory-showhide\\\"]\")\n\n if path_:\n company_profile['desc'] = str(path_[0].text_content())\n\n return company_profile\n else:\n return company_profile", "title": "" }, { "docid": "4334cba1e5938ba7efe67e788dd78650", "score": "0.47238487", "text": "def censuscode(\n in_file, out_file,\n lookup_streets, lookup_nums, geo_level=\"blkgrp\",\n record_id=\"record_id\", zip_code=\"zip_code\", address=\"address\", street=\"street\", street_num=\"street_num\"\n):\n\n info = Log(__name__, \"censuscode\").info\n info(\"Censuscoding\", in_file)\n\n addresses = pd.read_csv(\n in_file,\n low_memory=False,\n usecols=[record_id, zip_code, address]\n )\n N = [len(addresses)]\n\n info(\"Parsing street name and number from address field\")\n parsed = pd.DataFrame(addresses[address].str.upper().str.extract(\"([0-9A-Z ]+)\", expand=False).fillna(\"\").apply(split_address).tolist())\n if \"StreetNamePreDirectional\" in parsed.columns:\n addresses[street] = np.where(parsed.StreetNamePreDirectional.notnull(), parsed.StreetNamePreDirectional + \" \" + parsed.StreetName, parsed.StreetName)\n else:\n addresses[street] = parsed.StreetName\n addresses[street_num] = np.where(parsed.AddressNumber.str.isdigit(), parsed.AddressNumber, np.nan)\n\n with open(out_file + \".log\", \"w\") as log:\n\n info(\"Loading lookup files\")\n streets = lookup_streets.drop_duplicates([\"street\", \"zip\"])\n print(len(streets), \"distinct street names\", file=log)\n nums = lookup_nums.drop_duplicates([\"street_num\", \"street\", \"zip\"])\n print(len(nums), \"distinct street name/numbers\", file=log)\n\n info(\"Building range look-up for street nums\")\n num_lookup = {}\n for index, group in nums.groupby([\"street\", \"zip\"]):\n group = group.sort_values(\"street_num\")\n num_lookup[index] = (group[\"street_num\"].values, group[geo_level].values)\n print(len(num_lookup), \"look-ups for street number ranges\", file=log)\n\n info(\"Filtering records with non-missing zip codes\")\n addresses = addresses[addresses[zip_code].notnull()]\n N.append(len(addresses))\n print(N[-1], \"records with non-missing zip codes\", file=log)\n\n info(\"Filtering records with valid integer zip codes\")\n if addresses[zip_code].dtype == \"O\":\n addresses[zip_code] = addresses[zip_code].str.extract(\"(\\d+)\", expand=False)\n addresses = addresses[addresses[zip_code].notnull()]\n addresses[zip_code] = addresses[zip_code].astype(int)\n addresses = addresses[addresses[zip_code].isin(streets.zip.unique())]\n N.append(len(addresses))\n print(N[-1], \"records with valid integer zip codes\", file=log)\n\n info(\"Filtering records with valid street names\")\n addresses[street] = addresses[street].str.upper().str.extract(\"([0-9A-Z ]+)\", expand=False)\n addresses = addresses[addresses[street].notnull()]\n N.append(len(addresses))\n print(N[-1], \"records with valid street names\", file=log)\n\n info(\"Merge 1 on distinct street name\")\n addresses = addresses.merge(streets,\n how=\"left\",\n left_on=[street, zip_code],\n right_on=[\"street\", \"zip\"],\n validate=\"many_to_one\")\n assert len(addresses) == N[-1]\n merged = addresses[geo_level].notnull()\n addresses.loc[merged, [record_id, zip_code, geo_level]].to_csv(out_file, float_format=\"%.0f\", index=False)\n print(\"merged\", merged.sum(), \"records on distinct street name\", file=log)\n\n # Remove merged addresses.\n addresses = addresses[~merged]\n del addresses[geo_level]\n N.append(len(addresses))\n print(N[-1], \"records remaining\", file=log)\n\n # Keep records with valid integer street nums.\n if addresses[street_num].dtype == \"O\":\n addresses[street_num] = addresses[street_num].str.extract(\"(\\d+)\", expand=False)\n addresses = addresses[addresses[street_num].notnull()]\n addresses[street_num] = addresses[street_num].astype(int)\n N.append(len(addresses))\n print(N[-1], \"records with valid integer street nums\", file=log)\n\n info(\"Merge 2 on distinct street name/num\")\n addresses = addresses.merge(nums,\n how=\"left\",\n left_on=[street_num, street, zip_code],\n right_on=[\"street_num\", \"street\", \"zip\"],\n validate=\"many_to_one\")\n assert len(addresses) == N[-1]\n merged = addresses[geo_level].notnull()\n addresses.loc[merged, [record_id, zip_code, geo_level]].to_csv(out_file, float_format=\"%.0f\", index=False, mode=\"a\", header=False)\n print(\"merged\", merged.sum(), \"records on distinct street name/num\", file=log)\n\n # Remove merged addresses.\n addresses = addresses[~merged]\n del addresses[geo_level]\n N.append(len(addresses))\n print(N[-1], \"records remaining\", file=log)\n\n info(\"Merge 3 with street number range search\")\n merged = []\n for _, row in addresses.iterrows():\n l = num_lookup.get((row[street], row[zip_code]))\n if l is not None:\n i = np.searchsorted(l[0], row[street_num], side=\"right\")\n merged.append((row[record_id], row[zip_code], l[1][max(0, i-1)]))\n print(\"merged\", len(merged), \"records on nearest street name/num\", file=log)\n with open(out_file, \"a\") as f:\n for row in merged:\n print(*row, sep=\",\", file=f)\n N.append(N[-1] - len(merged))\n print(N[-1], \"records remain unmerged\", file=log)\n print(\"overall match rate: {:.1f}%\".format(100.0 * (N[0] - (N[0] - N[2]) - N[-1]) / N[0]), file=log)\n\n info(\"Done.\")", "title": "" }, { "docid": "c54955f560276c19c1f1a118655f67ec", "score": "0.47205225", "text": "def main(varfile, nsites, poplists, outfile):\n npop = len(poplists)\n with open(outfile, 'w') as f:\n f.write(f'[loci]={nsites}\\n\\n[populations]={npop}\\n\\n')\n for npop, popfile in enumerate(poplists, 1):\n print(f'pop{npop}: {popfile}')\n f.write(f'[pop]={npop}\\n')\n samples = [x.strip() for x in open(popfile)]\n for line in produce_record(varfile, samples, nsites):\n f.write(line)\n f.write('\\n')", "title": "" }, { "docid": "211b30e851854da30129b7d1f44f7728", "score": "0.4715706", "text": "def test_projects_sources_read(self):\n pass", "title": "" }, { "docid": "7994bb1593acde8e26b07d79e5a757f7", "score": "0.46965274", "text": "def populate_mapping(args):\n run_path = os.path.normpath(os.path.expanduser(args.run_path))\n # TODO - handle '.' in reference\n ref = run_path.split('/')[-1].split('.')[0]\n infile = glob.glob(run_path+'/nway-SNPs_INDELS-comparison/*.any.withref')\n assert len(infile) == 1\n infile = infile[0]\n ref = os.path.join(run_path+'/', ref+'/reference.gbk')\n with database.make_connection() as connection:\n parsed, stats = core.nesoni_report_to_JSON(core.nway_reportify(infile))\n # Insert all variants\n chunks = misc.chunk_list(parsed, BLOCKS)\n for chunk in chunks:\n r.table('determined_variants').insert(chunk).run(connection)\n print \"Mapping statistics\"\n print \"Strain,Variants\"\n for sid, count in stats.items():\n print \"%s,%s\" % (sid, count)\n strain_JSON = {\"StrainID\": sid,\n \"VarCount\": count,\n \"id\": sid}\n r.table('strains_under_investigation').insert(strain_JSON).run(connection)\n # Now, do the reference\n ref, ref_meta = core.reference_genome_features_to_JSON(ref)\n # Do we already have a reference stored as current?\n need_update = True\n try:\n r.table('references').get(\"current_reference\").has_fields(\"reference_id\").run(connection)\n except RqlRuntimeError:\n need_update = False\n if need_update:\n stored = r.table('references').get(\"current_reference\").pluck(\"reference_id\", \"revision\").run(connection)\n # Do we need to update...\n if (stored[\"reference_id\"] != ref[\"id\"] or\n stored[\"revision\"] != ref[\"revision\"]):\n r.table('references').insert(ref).run(connection)\n r.table('references').get(\"current_reference\").update({\"reference_id\": ref[\"id\"], \"revision\": ref[\"revision\"]}).run(connection)\n r.table('reference_features').insert(ref_meta).run(connection)\n else:\n print (\"Current stored reference and reference for this run \"\n \"are the same \\n Not doing anything\")\n else:\n r.table('references').insert(ref).run(connection)\n r.table('references').insert({\"id\": \"current_reference\", \"reference_id\": ref[\"id\"], \"revision\": ref[\"revision\"]}).run(connection)\n r.table('reference_features').insert(ref_meta).run(connection)\n # This is for adding the coverage to the strains_under_investigation\n strains = r.table('strains_under_investigation').pluck(\"StrainID\").run(connection)\n cur_ref = r.table('references').get('current_reference').run(connection)\n ref = cur_ref[\"reference_id\"]+\"_\"+str(cur_ref[\"revision\"])\n for e in strains:\n # open the userplot of the current reference & strain\n not_called = mapping.get_N_char_positions(run_path, e['StrainID'])\n ranges = misc.get_intervals(not_called)\n r.table('strains_under_investigation').get(e['StrainID']).update({\"reference\": ref, \"coverage\": json.dumps(ranges)}).run(connection)", "title": "" }, { "docid": "834c0ea56772c2927afe0ab736b1bbff", "score": "0.4694539", "text": "def main():\n\n usgs_data_list = dataGrabber()\n if usgs_data_list:\n dataParser(usgs_data_list)", "title": "" }, { "docid": "c9bf4dba37965804738a06d4a73ab98f", "score": "0.46934667", "text": "def extract_data_sources_from_config(graphic_config):\n if DATA_SOURCES in graphic_config:\n data_source_dict = graphic_config[DATA_SOURCES]\n data_sources = {data_source_dict[MAIN_DATA_SOURCE].get(DATA_SOURCE_TYPE)}\n additional_data_source_list = data_source_dict.get(ADDITIONAL_DATA_SOURCES, [])\n for additional_data_source_dict in additional_data_source_list:\n data_sources.add(additional_data_source_dict.get(DATA_SOURCE_TYPE))\n return list(data_sources)\n else:\n return []", "title": "" }, { "docid": "f4c0fa37401dbc89cc4acd75a83daffd", "score": "0.46868148", "text": "def setup_data(es_with_collector):\n country_uk = constants.Country.united_kingdom.value.id\n country_us = constants.Country.united_states.value.id\n country_anguilla = constants.Country.anguilla.value.id\n uk_region = constants.UKRegion.south_east.value.id\n CompanyFactory(\n name='abc defg ltd',\n trading_names=['helm', 'nop'],\n address_1='1 Fake Lane',\n address_town='Downtown',\n address_country_id=country_uk,\n registered_address_country_id=country_uk,\n uk_region_id=uk_region,\n )\n CompanyFactory(\n name='abc defg us ltd',\n trading_names=['helm', 'nop', 'qrs'],\n address_1='1 Fake Lane',\n address_town='Downtown',\n address_country_id=country_us,\n registered_address_country_id=country_us,\n )\n CompanyFactory(\n name='archived',\n trading_names=[],\n address_1='Main Lane',\n address_town='Somewhere',\n address_country_id=country_anguilla,\n registered_address_country_id=country_anguilla,\n archived=True,\n )\n es_with_collector.flush_and_refresh()", "title": "" }, { "docid": "c3131c675810853e8bd4409df6c95e61", "score": "0.46844223", "text": "def read_data(source: str):\n if source == 'cora':\n return load_cora()\n elif source == 'finefoods':\n return load_finefoods()\n raise NotImplementedError", "title": "" }, { "docid": "3eabbaefe542d67f8fb84f71fd751875", "score": "0.46801597", "text": "def regionsWithData(**kwargs):\n #parser = createParser()\n #args = parser.parse_args(sysargs)\n if __name__ != \"__main__\":\n kwargs = argprase_kwargs(kwargs, parseArguments)\n args = argparse.Namespace(**kwargs)\n compressed_input = False\n #if args.vcfname is None:\n # instream = sys.stdin\n #elif args.vcfname[-3:] == '.gz':\n # compressed_input = True\n # instream = gzip.open(args.vcfname,'r')\n # else:\n # instream = open(args.vcfname,'r')\n\n\n if args.vcfname is None:\n vcfname = '-'\n else:\n vcfname = args.vcfname\n\n popmodel = None\n if args.popname is not None:\n popmodel = read_single_model(args.popname,args.modelname)\n\n vcf_in = VcfReader(vcfname,\n popmodel=popmodel,\n index=args.tabix_index)\n\n if args.outname is not None:\n outstream = open(args.outname,'w')\n else:\n outstream = sys.stdout\n\n #sample_count = 20\n #missing_data_count = [0 for i in range(sample_count+1)]\n sample_count = 0\n indel_count = 0\n\n #missing_data_per_indiv = [0 for i in range(sample_count)]\n\n prev_miss_pos = 0\n prev_full_pos = 0\n prev_pos = 0\n prev_chrom = ''\n\n in_section = False\n #May be skipping first site, fine here but issue elsewhere\n for record in vcf_in.reader:\n missing_for_site = False\n missing_at_site = 0\n #for i in range(9,len(la)):\n for i in range(len(record.samples)):\n #geno = la[i]\n #if '.' in geno:\n if record.samples[i].alleles[0] in [None,'N']:\n missing_at_site += 1\n if missing_at_site > args.missing_count:\n missing_for_site = True\n break\n #cur_pos = int(la[1])\n cur_pos = record.pos\n #if prev_pos == 0 or prev_chrom != la[0]:\n if prev_pos == 0 or prev_chrom != record.chrom:\n if prev_pos != 0:\n start_pos = ((prev_miss_pos+prev_full_pos+1)//2 if args.extend else prev_full_pos)\n end_pos = prev_pos\n outstream.write(outputLine(prev_chrom,start_pos,end_pos,args))\n prev_miss_pos = cur_pos\n prev_full_pos = cur_pos\n #prev_chrom = la[0]\n prev_chrom = record.chrom\n if not missing_for_site:\n in_section = True\n prev_pos = cur_pos\n continue\n if missing_for_site:\n if in_section:\n in_section = False\n if args.extend:\n start_pos = (prev_miss_pos+prev_full_pos+1)//2\n end_pos = (prev_pos+cur_pos)//2\n else:\n start_pos = prev_full_pos\n end_pos = prev_pos\n diff = end_pos - start_pos\n if diff > args.window_size:\n outstream.write(outputLine(record.chrom,start_pos,end_pos,args))\n else:\n if not in_section:\n prev_miss_pos = prev_pos\n prev_full_pos = cur_pos\n in_section = True\n prev_pos = cur_pos\n #line = getl(instream,compressed_input)\n if in_section:\n if args.extend:\n start_pos = (prev_miss_pos+prev_full_pos+1)//2\n else:\n start_pos = prev_full_pos\n end_pos = prev_pos\n outstream.write(outputLine(prev_chrom,start_pos,end_pos,args))\n try:\n outstream.close()\n except:\n pass", "title": "" }, { "docid": "f1cc426ac3b16f4165d51029ff68f3c1", "score": "0.46760735", "text": "def main():\n (options, args) = process_options()\n if len(args) == 0:\n input_filename = '../data/facts_2008_09_nz.csv'\n else:\n input_filename = args[0]\n\n if not os.path.isdir('../data/desc'):\n os.makedirs('../data/desc')\n options.verbose = True\n read_coins_csv(input_filename, '2008_09', options.verbose)\n input_filename = '../data/facts_2009_10_nz.csv'\n read_coins_csv(input_filename, '2009_10', options.verbose)", "title": "" }, { "docid": "0b10a9bfe11b118b20a085182ebe7e90", "score": "0.46746534", "text": "def get_countrydict():\n\n countrydict = countryinfo('Test')\n\n file = os.path.join(analysisdir,'country_dictionary.dat')\n \n try:\n countrydict = cPickle.load(open(file, 'rb'))\n except: \n db = dbf.Dbf(os.path.join(analysisdir,'GRIDCTRY.DBF'))\n\n countrydict = {}\n for n, rec in enumerate(db):\n code = rec['COUNTRY']\n gridid = str(int(rec['GRID']))\n\n if code in ['Czech Republic', 'Slovakia']:\n rec = fix_eu(rec)\n\n rate_in_gr = rec['RATE_IN_GR'] * 1.e-2\n\n i = int(gridid[-3::])\n j = int(gridid[0:-3])\n lat = -91 + j + 0.5\n lon = -181 + i + 0.5\n if code in countrydict: \n a = countrydict[code]\n else:\n a = countryinfo(code)\n\n\n shared_border = False\n shared_water = False\n if rec['COVER_ID'] == 0.0:\n shared_border = False\n shared_water = True\n if rec['COVER_ID'] >= 2.0:\n shared_border = True\n if rec['COVER_ID'] >= 10.0:\n shared_water = True\n\n a.add_gridinfo(i - 1, j - 1, rate_in_gr, shared_border, shared_water)\n\n countrydict[code] = a\n\n db.close()\n\n cPickle.dump(countrydict, open(file, 'wb'), -1)\n\n return countrydict", "title": "" }, { "docid": "418b9b227d6e2e61464247acba356401", "score": "0.46730128", "text": "def add_authors_location_inplace(self, G: nx.Graph): \n trees = {}\n print('Compiling addresses')\n for n in tqdm(G):\n G.nodes[n]['data']['countries'] = []\n G.nodes[n]['data']['coordinates'] = []\n addresses = []\n if ('data' in G.nodes[n]) and ('Affiliation' in G.nodes[n]['data']) and (G.nodes[n]['data']['Affiliation'] is not None) and (len(G.nodes[n]['data']['Affiliation']) > 0):\n # aff = G.nodes[n]['data']['Affiliation'].replace('(Reprint Author)', '').replace(\n # \".,\", ',').replace(\"'\", '') # O'lastname / Jesusm. / Redone. mess sentence identification\n # doc = nlp(aff)\n # add = ''\n for add in G.nodes[n]['data']['Affiliation']:\n # special cases: NY 10012 USA / LOS ANGELES,CA.\n vals = [x.strip(' \\n.') for x in add.split(',')]\n # ,CA.\n if (len(vals[-1]) == 2) and vals[-1].upper() in _US:\n addresses.append(\n [titlecase(vals[-2]), vals[-1], 'United States'])\n elif (vals[-1].upper().endswith(' USA')):\n addresses.append(\n [titlecase(vals[-2]), vals[-1].split(' ')[0], 'United States'])\n else:\n if len(vals) > 3: # name, dept, etc\n addresses.append([titlecase(x) for x in vals[-3:]])\n else:\n addresses.append([titlecase(x) for x in vals])\n\n for vals in addresses:\n G.nodes[n]['data']['countries'].append(vals[-1]) \n v = [x.lower() for x in vals]\n if len(v) < 3:\n v = ['', ]*(3-len(v)) + v\n # not really city / state / country, but it is easier to\n # code with names\n # there is probably a shorter way of doing this using defaultdicts\n country = _find(trees, v[-1])\n if country is None:\n country = v[-1]\n trees[country] = {}\n\n state = _find(trees[country], v[-2])\n if state is None:\n state = v[-2]\n trees[country][state] = {}\n\n city = _find(trees[country][state], v[-3])\n if city is None:\n city = v[-3]\n trees[country][state][city] = []\n\n trees[country][state][city].append(n)\n\n \n for country in trees:\n if not trees[country]:\n continue\n\n cached = _find(self._parts_by_country, country)\n if (cached is None) and (self._gmaps is not None):\n i = 0\n j = 0\n geo = []\n while not geo:\n sample_state = list(trees[country].keys())[i]\n sample_city = list(trees[country][sample_state])[j]\n parts = [sample_city, sample_state, country]\n geo = self._google(parts)\n j += 1\n if j == len(trees[country][sample_state].keys()):\n j = 0\n i += 1\n if i == len(trees[country]):\n print('Could not find \"{0}\" as a country, please check the affiliation field.'.format(country))\n print('(It happens when a author has a dot at the end of a long abreviation)')\n print(trees[country])\n break\n if geo is not None:\n self._parts_by_country[country] = _count_useful_parts(parts, geo)\n self._save_state()\n\n print('Getting coordinates')\n for country in tqdm(trees):\n cached = _find(self._parts_by_country, country)\n if cached is None:\n continue\n\n to_use = self._parts_by_country[cached]\n\n for state in trees[country]:\n for city in trees[country][state]:\n parts = [city, state, country][-to_use:]\n parts = ['', ]*(3-len(parts)) + parts\n geo = self._cache_search(parts)\n if (geo is None) and (self._gmaps is not None):\n geo = self._google(parts)\n if not geo:\n continue\n self._cache_add(parts, geo)\n\n if geo is not None:\n for n in trees[country][state][city]:\n G.nodes[n]['data']['coordinates'].append(\n [geo['geometry']['location']['lng'], geo['geometry']['location']['lat']])\n\n return(G)", "title": "" }, { "docid": "500f8ee0a225e6bfce9b06eaf4f27156", "score": "0.46726024", "text": "def extract(self):\n for source in self.sources:\n if not self.args.source or source in self.args.source.split(\",\"):\n config = self.sources[source]\n if self.sources[source][\"type\"] == \"api\":\n self.extracted[source] = self.extract_via_api(source, config)\n elif self.sources[source][\"type\"] == \"gcs\":\n self.extracted[source] = self.extract_via_gcs(source, config)\n elif self.sources[source][\"type\"] == \"bq\":\n self.extracted[source] = self.extract_via_bq(source, config)\n elif self.sources[source][\"type\"] == \"file\":\n self.extracted[source] = self.extract_via_fs(source, config)\n elif self.sources[source][\"type\"] == \"const\":\n self.extracted[source] = self.extract_via_const(source, config)", "title": "" }, { "docid": "1275fa994f665b9508652009b2f8fe3d", "score": "0.4671499", "text": "def create_config(srcdir, fmtclass, exclude=set(), filters=[]):\n formatter = fmtclass()\n for community, data in get_communities_data(srcdir, exclude):\n try:\n domains = data['domains']\n nameservers = data['nameservers']\n except (TypeError, KeyError):\n continue\n\n formatter.add_comment(\"\\n%s\\n\" % community)\n\n servers = filter(lambda d: all(f(d) for f in filters), nameservers)\n servers = list(servers)\n\n if len(domains) == 0:\n formatter.add_comment(\"No valid domains found\")\n elif len(servers) == 0:\n formatter.add_comment(\"No valid servers found\")\n else:\n formatter.add_data(domains, servers)\n\n print(formatter.finalize())", "title": "" }, { "docid": "52df9915a88ca63bffe13cd1b1b50ff8", "score": "0.46703088", "text": "def add_population_data(df, drop=False):\n\n data_path = os.path.join('data', 'Additional_Context_Data_Global.csv')\n\n more_df = pd.read_csv(data_path)\n if drop:\n more_df.dropna(inplace=True)\n new_df = more_df.merge(df,\n how='left',\n left_on=['CountryName', 'CountryCode'],\n right_on=['CountryName', 'CountryCode']\n )\n return new_df", "title": "" }, { "docid": "561f6dbccdf00793fb6e26e4d9f84624", "score": "0.4666822", "text": "def main(source, dem_group):\n df = gen_migration_data(source)\n df = standardize_names(df)\n generate_map(df, dem_group)", "title": "" }, { "docid": "45be85579db4ed9f36761ced9917f133", "score": "0.4666384", "text": "def main():\n\n user_choice = input('Name of input file: ')\n print()\n read_file = readfile(user_choice)\n\n print(\"The original list of cities is: \")\n print_list(read_file)", "title": "" }, { "docid": "eadeb3da4b465a5ba2a2aa2e6e135866", "score": "0.46622217", "text": "def GEO_0005 (Place):\n CountryCode, PlaceId = CountryCode_PlaceID(Place)\n return_emits = []\n if CountryCode == \"CAN\": # Target results for just this one country\n # Get category\n CategoryNameList = []\n CategoryNames = Place.findall(ns+\"CategoryName\")\n for Category in CategoryNames:\n Name = Category.find(ns+\"Text\").text\n CategoryNameList.append(Name)\n try:\n FirstCategory = CategoryNameList[0]\n except:\n FirstCategory = \"None\"\n # Get POIName, linkPvid and lat/longs\n BaseTextList = Place.findall(ns+\"BaseText\")\n BaseText = BaseTextList[0] # The first name\n POIName = BaseText.text\n LocationList = Place.findall(ns+\"Location\")\n for Location in LocationList:\n Link = Location.find(ns+\"Link\")\n LinkAttributes = Link.attrib\n if 'linkPvid' in LinkAttributes.keys():\n LinkPvid = LinkAttributes['linkPvid']\n # Get additional info\n AdditionalDataList = Location.findall(ns+\"AdditionalData\")\n for AdditionalData in AdditionalDataList:\n AdditionalDataAttribute = AdditionalData.attrib\n #print \"AA:\", AdditionalDataAttribute\n try:\n \tif AdditionalDataAttribute['key'] == \"LocationType\":\n \t\tLocationType = AdditionalData.text\n except:\n \tLocationType = 'None'\n try:\n \tif AdditionalDataAttribute['key'] ==\"MatchLevel\":\n \t\tMatchLevel = AdditionalData.text\n except:\n \tMatchLevel = 'None'\n\n GeoPositionList = Location.findall(ns+\"GeoPosition\")\n for GeoPosition in GeoPositionList:\n GeoPositionAttributes = GeoPosition.attrib\n if 'type' in GeoPositionAttributes.keys():\n Routing_Display = GeoPositionAttributes['type']\n Longitude = GeoPosition.find(ns+\"Longitude\")\n Latitude = GeoPosition.find(ns+\"Latitude\")\n if Routing_Display == \"ROUTING\":\n try:\n Long = str(Longitude.text)\n LAT = str(Latitude.text)\n except:\n Long = \"\"\n LAT = \"\"\n try:\n # ModID|CountryCode|PlaceId|Primary= True or False|ROUTING|LAT|LONG\n POIName = POIName.encode('UTF-8') # convert the unicode to bytestrings for the return\n emit_string = 'ModID|'+CountryCode+'|'+PlaceId+'|'+LinkPvid+'|'+FirstCategory+'|'+POIName+'|'+LAT+'|'+Long+'|'+LocationType+'|'+MatchLevel\n return_emits.append(emit_string)\n except:\n pass\n return return_emits", "title": "" }, { "docid": "be5e09f1828ef03ae75b95a0240895f2", "score": "0.46612445", "text": "def load_data(filter_lang):\n\n repos = pd.read_csv('/Users/ianshen/Documents/github/clinical-opensource-projects/data/repos.csv')\n\n # preprocess the repos' language column\n repos.loc[repos['language'].isin(['CSS','HTML']), 'language'] = 'CSS/HTML'\n repos.loc[repos['language'].isin(['TypeScript']), 'language'] = 'JavaScript'\n if filter_lang:\n repos = repos.loc[repos.language.notnull()] # repos with no language(s) indicated are removed\n\n return repos", "title": "" }, { "docid": "91629888415d4701fc814221fdeab3e7", "score": "0.46570152", "text": "def create_source_dict(config_sheet_reader, all_files, imp_plan_source_file_path):\n func_config_sheet_reader = config_sheet_reader\n func_all_files = all_files\n func_imp_plan_source_file_path = imp_plan_source_file_path\n dict_src_data = {}\n for config_sheet_reader_item in func_config_sheet_reader:\n df_appended = []\n for imp_plan_file_reader_item in func_all_files:\n file_path = func_imp_plan_source_file_path + imp_plan_file_reader_item\n data = pd.read_excel(\n file_path, sheet_name=CONFIG[config_sheet_reader_item]['SheetName'],\n skiprows=int(CONFIG[config_sheet_reader_item]['skiprows']),\n nrows=int(CONFIG[config_sheet_reader_item]['nrows']),\n usecols=CONFIG[config_sheet_reader_item]['parse_cols']\n )\n column_names = config_string_converter(\n CONFIG[config_sheet_reader_item]['columns']\n )\n imp_plan_df = pd.DataFrame(\n data, columns=column_names\n )\n imp_plan_df = imp_plan_df[\n imp_plan_df[\n CONFIG[config_sheet_reader_item]['nullCheck']\n ].notnull()]\n# #Section to be removed once the migration of the template is done\n# if config_sheet_reader_item in (\n# 'Readsheet ProjectList2019',\n# 'Readsheet ProjectList2020'\n# ):\n# imp_plan_df = migration_related_changes(\n# imp_plan_df,\n# CONFIG[config_sheet_reader_item]['nullCheck'],\n# CONFIG[config_sheet_reader_item]['RankCheck']\n# )\n# #Section to be removed once the migration of the template is done\n imp_plan_df['Source.Name'] = imp_plan_file_reader_item\n df_appended.append(imp_plan_df)\n df_appended = pd.concat(df_appended)\n dict_src_data[DFNAMES[int(CONFIG[config_sheet_reader_item]['DFNum'])]] = df_appended\n del imp_plan_df\n del df_appended\n return dict_src_data", "title": "" }, { "docid": "0f1ad2f24c23fc25117fbc90ef24e864", "score": "0.4651635", "text": "def main(path_to_input_file: str, module_name: str) -> None:\n records = read_registry(path_to_input_file)\n print(f\"{len(records)} IBAN specs read.\")\n iban_registry: Dict[str, IBANSpec] = {}\n for record in records:\n for country_code, spec in record_2_specs(record):\n iban_registry[country_code] = spec\n if module_name:\n input_file_name = os.path.basename(path_to_input_file)\n fn_sections = input_file_name.split('_')\n if len(fn_sections) == 6 and fn_sections[2] == \"Release\":\n release = fn_sections[3]\n published = ' '.join((fn_sections[4], fn_sections[5]))\n else:\n release = None\n published = None\n py_file_name = os.path.join(pkg_dir,\n os.path.extsep.join((module_name, 'py')))\n with open(py_file_name, mode='w', encoding=\"utf-8\") as py_file:\n write_module_header(py_file, input_file_name, release, published)\n write_code(py_file, iban_registry)\n print(f\"{len(iban_registry)} registry entries written.\")\n else:\n pprint(iban_registry)", "title": "" }, { "docid": "dd299fa90d482da84b98437b092118ca", "score": "0.46460104", "text": "def get_population_extract_filename(city_ref_file, data_source):\n\t# Folder exists?\n\timport os\n\tif not(os.path.isdir(storage_folder + \"/\" + data_source)): \n\t\tos.makedirs(storage_folder + \"/\" + data_source)\n\treturn storage_folder + \"/\" + data_source + \"/\" + city_ref_file + \"_population.shp\"", "title": "" }, { "docid": "2ccbca167bcc3cddbbd8d4519eff7560", "score": "0.4645847", "text": "def load(source = None,\n map_code = None,\n date_min = None,\n date_max = None,\n ):\n \n if source == global_var.data_source_production_eco2mix:\n df = eco2mix.load(map_code = map_code,\n date_min = date_min,\n date_max = date_max,\n )\n \n elif source == global_var.data_source_production_rte:\n df = rte.load()\n \n elif source == global_var.data_source_production_entsoe:\n df = entsoe.load(map_code = map_code)\n \n else: \n raise ValueError\n\n assert set(df.columns) == {global_var.commodity,\n global_var.geography_map_code,\n global_var.production_dt_utc,\n global_var.production_nature,\n global_var.production_power_mw,\n global_var.production_source,\n global_var.unit_name,\n }\n\n # Sort\n dg = df.reindex(sorted(df.columns), axis = 1)\n dg = dg.set_index(global_var.production_dt_utc)\n dg = dg.sort_index()\n dg[global_var.production_power_gw] = dg[global_var.production_power_mw]/1e3\n\n # Filter\n dh = dg.loc[ pd.Series(True, index = dg.index)\n & ((dg.index >= date_min) if bool(date_min) else True)\n & ((dg.index < date_max) if bool(date_max) else True)\n ]\n\n # Checks\n assert dh.shape[0] > 0\n assert not dh.reset_index()[[global_var.production_dt_utc,global_var.unit_name]].duplicated().sum()\n\n return dh", "title": "" }, { "docid": "37a491f290abba61bc54bf065fb23d55", "score": "0.4645283", "text": "def get_plugin_configs(self):\n yield super(Site, self).get_plugin_configs()\n yield ('countries', 'country_code', 'BE')\n yield ('b2c', 'import_statements_path', self.project_dir.child('sepa_in'))", "title": "" }, { "docid": "6f173950c9bb9b3e3d21f8013fd4a0f2", "score": "0.46422124", "text": "def get_source_data_info():\n\n # Establish connection to the database\n db = DatabaseConnection(path_config='db_config_data.yaml')\n\n # Iterate through sources listed in sources.json\n sources = json_load('../data/sources.json')\n result = []\n for source in sources:\n # Obtain schema with the last update\n try:\n schema = db.get_latest_schema('source_' + source['name'])\n except Exception as exception:\n print('[WARNING] %s' % (exception))\n continue\n\n # Store information to be returned\n result.append({\n 'description': source['description'],\n 'name': source['name'],\n 'schema': schema,\n 'tables': _get_tables_and_columns_in_schema(db, schema),\n 'update': _datetimestr_from_schema(schema),\n })\n\n # Close database connection and return the result\n db.close()\n return result", "title": "" }, { "docid": "e793cf23a5103e25b8538fdd3383641f", "score": "0.4631709", "text": "def populate_table_geo_headings(census_db, table_name, source_file):\n\n src_file = open(source_file, 'r', encoding='cp1252')\n\n field_holders = get_fields_holder(src_file)\n\n src_file.seek(0, os.SEEK_SET)\n\n log_rec_nos = []\n\n sql_statement = 'INSERT INTO ' + table_name + ' VALUES (' + field_holders + ');'\n sql_values = []\n record_count = 0\n for record in src_file:\n record_count += 1\n values = record.replace('\\r\\n', '').split('|')\n # Only inserting records from LA County\n if values[14] == '037':\n sql_values.append(values)\n log_rec_nos.append(values[7])\n if record_count % 1000 == 0:\n print(\"Populating {} with {} records\".format(str(record_count), str(len(sql_values))))\n execute_many_sql(census_db, sql_statement, sql_values)\n\n if record_count % 10000 == 0:\n census_db.commit()\n\n # One last execute to catch the remaining records\n execute_many_sql(census_db, sql_statement, sql_values)\n census_db.commit()\n\n return log_rec_nos", "title": "" }, { "docid": "a13a2c8d73ef78f6ee38c99eb0c1075b", "score": "0.4629007", "text": "def _init_source_data(self):\n pass", "title": "" }, { "docid": "6db12a3d6c57831f2cc805461f59ef27", "score": "0.46279642", "text": "def read_hasselback_file(fname, aff_map, degree_incl=('PHD',),\n rank_excl=('Retir', 'Emer', 'Deces', 'Visit')):\n df = pd.read_csv(fname)\n df = df[df['degree'].isin(degree_incl)]\n df[\"rank\"] = df[\"rank\"].str.replace(\"$\", \"\", regex=False)\n df = df[~df['rank'].isin(rank_excl)]\n df = df[~df[\"rank\"].fillna(\"\").str.startswith(\"V\")] # Visiting positions\n df = df[~df[\"annotation\"].fillna(\"\").str.startswith(\"visiting from\")]\n # Use correct listing information\n df[\"category\"] = fname.stem[0]\n df[\"listing\"] = df[\"listing\"].fillna(fname.stem[1:]).str[:4]\n # Aggregate entries\n df['degree_year'] = df['degree_year'].apply(complete_year)\n df['school'] = df['school'].replace(aff_map)\n id_cols = ['name', 'school', 'degree_year']\n df['ID'] = df[id_cols].fillna(\"\").apply(lambda l: \";\".join(l), axis=1)\n df['ID'] = df['ID'].str.replace(\";;\", \";\").str.strip(\";\")\n df['institution'] = df['institution'].apply(lambda x: aff_map.get(x))\n df = df.dropna(subset=[\"institution\"]).set_index('ID').drop(columns='degree_year')\n return df", "title": "" }, { "docid": "ec49c7fc8138634b80acffd54790de5d", "score": "0.46271798", "text": "def pre_execute(self):\n logging.warn('%s is still experimental', self.__class__.__name__)\n sitecol = readinput.get_site_collection(self.oqparam)\n self.datastore['sitecol'] = self.sitecol = sitecol\n self.csm = get_composite_source_model(self.oqparam)\n self.gsims_by_grp = {grp.id: self.csm.info.get_gsims(grp.id)\n for sm in self.csm.source_models\n for grp in sm.src_groups}\n self.rup_data = {}", "title": "" }, { "docid": "3dd0b94d5dc31768ab0a0a3c81a77c5d", "score": "0.46220294", "text": "def generate_country_table(source_urls, csv_file_path):\n\n country_id = [\"id\"]\n country_name = []\n\n # Add all countries found in all files and then deduplicate the file for double entries\n for url in source_urls:\n with open(url, \"r\") as opened_file:\n # Skip the first two rows\n reader = csv.reader(opened_file)\n next(reader)\n next(reader)\n\n for row in reader:\n # read all rows until \"World\" or \"TOTAL\" or \"Unidentified*\"\n if row[0] == \"World\" or row[0] == \"TOTAL\" or row[0] == \"Unidentified*\": break\n country_name.append(row[0])\n\n deduped_country_name = list(set(country_name))\n deduped_country_name.sort()\n deduped_country_name.insert(0,\"country\")\n country_id.extend(list(range(1, len(deduped_country_name))))\n\n country_table = [country_id, deduped_country_name]\n\n # Export country(country_id, country_name) table into csv format\n with open(csv_file_path + \"country.csv\", \"w\", newline=\"\") as file:\n writer = csv.writer(file, delimiter=\";\")\n for iter in range(len(country_table[0])):\n writer.writerow([x[iter] for x in country_table])\n\n return country_table", "title": "" }, { "docid": "328f07c6da3c75fcab004daace218cef", "score": "0.4620989", "text": "def process_file(file_path, cp_data, cfg):\n\n file_name = os.path.basename(file_path)\n if cfg[FILE_TYPE] is None:\n file_type = file_name.split('.')[-1]\n else:\n file_type = cfg[FILE_TYPE]\n\n # begin building up com file output\n if cfg[GAUSS_KEYWORDS] is None:\n com_data = [[DEF_GAUSS_KEYWORDS]]\n else:\n com_data = [prep_string(cfg[GAUSS_KEYWORDS])]\n\n if cfg[GAUSS_DESCRIP] is None:\n descrip = \"From '{}'\\n\".format(file_name)\n else:\n descrip = cfg[GAUSS_DESCRIP] + \"\\n\"\n\n charge = cfg[TOT_CHARGE]\n mult = cfg[TOT_MULT]\n com_data += [[descrip], ['{} {}'.format(charge, mult)]]\n\n # prepared list so that values can be entered in the correct location independent of whether read in order\n ring_xyz = [[np.nan] * 3] * 6\n if file_type == 'pdb':\n process_pdb(cfg, com_data, ring_xyz, file_path)\n elif file_type == 'sdf':\n process_sdf(cfg, com_data, ring_xyz, file_path)\n else:\n raise InvalidDataError(\"Found file format '{}'. This program currently reads only pdb and sdf file formats. \"\n \"If the file is one of these types, but has a different extension, specify the type \"\n \"with the '-t' command-line argument.\".format(file_type))\n\n # make sure to add empty line before footer\n com_data.append(['\\n' + prep_string(cfg[GAUSS_FOOTER])])\n com_file = create_out_fname(file_path, base_dir=cfg[OUT_DIR], ext='.com')\n list_to_file(com_data, com_file)\n\n if cfg[PRINT_CP_INPUT]:\n # gather header + 18 floats (6*xyz) for each row of cp input\n raw_cp_data = file_name + ' '\n for ring_atom in ring_xyz:\n raw_cp_data += ' '.join(['{:6.3f} '.format(num) for num in ring_atom])\n if 'nan' in raw_cp_data:\n warning('Did not find the expected six ring atoms. '\n 'For cp_params input, skipping file: {}'.format(file_path))\n else:\n cp_data.append(raw_cp_data)", "title": "" }, { "docid": "fb17c7da554b523fd81a45dd3d65e946", "score": "0.4616035", "text": "def parse(reader):\n\n # create dictionary to fill with countries and their stats\n data = {}\n\n # read the file, if a country has missing or invalid data,\n # do not include the country in the dictionary\n for row in reader:\n country = row['Country'].strip(' ')\n region = row['Region'].strip(' ')\n\n pop = row['Pop. Density (per sq. mi.)']\n if pop == 'unknown':\n continue\n else:\n pop = float(pop.replace(',', '.'))\n\n mort = row['Infant mortality (per 1000 births)']\n if mort:\n mort = float(mort.replace(',', '.'))\n else:\n continue\n\n gdp = row['GDP ($ per capita) dollars']\n if gdp == 'unknown' or gdp == '' \\\n or int(gdp.strip(' dollars')) > GDPMAX:\n continue\n else:\n gdp = int(gdp.strip(' dollars'))\n\n # create dictionary input for each country\n data[country] = {'Region': region, \"Pop. Density (per sq. mi.)\": pop,\n \"Infant mortality (per 1000 births)\": mort,\n \"GDP ($ per capita) dollars\": gdp}\n\n return data", "title": "" }, { "docid": "348496f0702476730c712b1eaf267828", "score": "0.46150127", "text": "def load_intermediate_data(conn, in_prefix_pre, in_prefix_proj, input_prefix_fs, input_prefix_pp, input_type, logfile):\n\n ertac_lib.load_csv_into_table(None, os.path.join(os.path.relpath(sys.path[0]), 'states.csv'), 'states', conn, ertac_tables.states_columns, logfile)\n # This section will reject any input rows that are missing required fields,\n # have unreadable data, or violate key constraints, because it is impossible\n # to store that data in the database tables.\n ertac_lib.load_csv_into_table(in_prefix_proj, 'calc_updated_uaf_v2.csv', 'calc_updated_uaf', conn, ertac_tables.calc_uaf_columns, logfile)\n conn.execute(\"\"\"DELETE FROM calc_updated_uaf WHERE camd_by_hourly_data_type = 'Non-EGU'\"\"\")\n ertac_lib.load_csv_into_table(in_prefix_pre, 'calc_input_variables_v2.csv', 'calc_input_variables', conn, ertac_tables.input_variable_columns, logfile) \n ertac_lib.load_csv_into_table(input_prefix_fs , 'ertac_pusp_info_file.csv', 'ertac_pusp_info_file', conn, pusp_info_file_columns, logfile)\n ertac_lib.load_csv_into_table(input_prefix_fs , 'ertac_base_year_rates_and_additional_controls.csv', 'ertac_base_year_rates_and_additional_controls', conn, additional_control_emission_columns, logfile)\n ertac_lib.load_csv_into_table(input_prefix_fs , 'ertac_additional_variables.csv', 'ertac_additional_variables', conn, additional_variables_columns, logfile)\n ertac_lib.load_csv_into_table(input_prefix_fs , 'ertac_rpo_listing.csv', 'ertac_rpo_listing', conn, rpo_columns, logfile)\n \n if input_prefix_pp is not None:\n ertac_lib.load_csv_into_table(input_prefix_pp, 'annual_unit_summary.csv', 'annual_summary', conn, annual_summary_columns, logfile)\n if input_type == 'ERTAC':\n ertac_lib.load_csv_into_table(in_prefix_proj, 'hourly_diagnostic_file_v2.csv', 'hourly_diagnostic_file', conn, ertac_reports.hourly_diagnostic_file, logfile)\n else:\n ertac_lib.load_csv_into_table(in_prefix_pre, 'calc_hourly_base.csv', 'calc_hourly_base', conn, ertac_tables.calc_hourly_columns, logfile)\n \n if ertac_tables.fuel_set != ertac_tables.default_fuel_set:\n logging.info(\"Default fuel set overwritten. Using: \" + str(ertac_tables.fuel_set))\n print(file=logfile)\n print(\"Default fuel set overwritten. Using: \" + str(ertac_tables.fuel_set), file=logfile) \n \n if ertac_tables.state_set != ertac_tables.default_state_set:\n logging.info(\"Default state set overwritten. Using: \" + str(ertac_tables.state_set))\n print(file=logfile)\n print(\"Default state set overwritten. Using: \" + str(ertac_tables.state_set), file=logfile)", "title": "" }, { "docid": "30a149484841593f5f6dc6c20107d7ea", "score": "0.46135226", "text": "def initialize_proteins_dict(self, infile):\n print('Loading ', infile)\n with open(infile, 'r') as file_handle:\n for line in file_handle:\n if line.startswith('#'):\n continue # skip header and comments\n else:\n line = line.rstrip('\\n\\r')\n line_tokens = line.split('\\t')\n if len(line_tokens) > 3:\n self.proteins_dict[line_tokens[0]]['taxid'] = line_tokens[1]\n self.proteins_dict[line_tokens[0]]['function'] = line_tokens[-1]\n self.proteins_dict[line_tokens[0]]['source'] = line_tokens[-2]\n print(len(self.proteins_dict), ' reference proteins found')", "title": "" }, { "docid": "f3731ec98365cb372931eb003c7006a8", "score": "0.46129557", "text": "def data():\n return IOUtilities.read_data(\"../data/ex1data1.txt\", ['Populations', 'Profit'])", "title": "" }, { "docid": "21c631510014295dfbcd700c1320f31a", "score": "0.4608976", "text": "def setupData(self) -> None: \r\n \r\n print(\"Importing data...\")\r\n t = time.perf_counter()\r\n self.datU = data.UsaData().data\r\n self.datC = data.CanadaData().data\r\n print(\"Done in\", time.perf_counter() - t)", "title": "" }, { "docid": "ade2f1e36918d9d2856afbb6ab82a5af", "score": "0.46012604", "text": "def get_population_estimate(year: str, city: str):\n pop_est = \"B01001_001E\" # according to 2019 variable list\n total_population = (\n base_url(year) + f\"?get=NAME,{pop_est}&for=place:{city}&in=state:{STATE}\"\n )\n try:\n r = requests.get(total_population)\n print(r.json())\n # [['NAME', 'B01001_001E', 'state', 'place'], ['Palm Springs city, California', '47897', '06', '55254']]\n except:\n print(total_population)\n print(\"Connection refused by the server..\")", "title": "" }, { "docid": "be1d51ade52403554a655296eff7e7b7", "score": "0.45959482", "text": "def load_sources(self, config_file) :\n\n\t\tself.sources = list()\n\t\twith open(config_file) as f :\n\t\t\tfor line in f :\n\t\t\t\tif \"volume_spheroid\" in line :\n\t\t\t\t\ttab = line.split(\" \")\n\t\t\t\t\tx1 = float(tab[2])*self.coef_x \n\t\t\t\t\tx2 = float(tab[3])*self.coef_y \n\t\t\t\t\tself.sources += [(int(round(x1)), int(round(x2)))]", "title": "" }, { "docid": "55cfffe3bcea438a63304773eee55b7c", "score": "0.45950368", "text": "def main():\r\n # Define variables\r\n intro = \"\\nThis program is made to help in researching about \\n\" \\\r\n \"Corruption in Developing countries in comparison with Ukraine \\n\"\r\n note = \"The list of all available countries is in config.py \\n\" \\\r\n \"Some information about specific countries or specific years \\n\" \\\r\n \"May be missing\"\r\n\r\n # Print start information\r\n print(intro)\r\n print('NOTE \\n')\r\n print(note)\r\n\r\n # Create CPI table\r\n global cpi_table\r\n cpi_table = GetData().get_cpi_table()\r\n\r\n # Make screenplay\r\n def screenplay():\r\n repeat = None\r\n choice = '\\nWould you like to know something more (y/n)? '\r\n mode = first_choose()\r\n sort = second_choose()\r\n country = third_choose(int(mode))\r\n year = fourth_choose(country, sort)\r\n if year:\r\n get_data(country, sort, year)\r\n\r\n while repeat != 'y' and repeat != 'n':\r\n repeat = input(choice)\r\n\r\n if repeat == 'y':\r\n screenplay()\r\n\r\n # Start\r\n screenplay()", "title": "" }, { "docid": "be3e68924f334f1d08f68cd3d4a8dbc4", "score": "0.45920566", "text": "def read_events(self):\n files = glob.glob(os.path.join(self.raw_folder, \"*\"))\n cameoCode = config.CAMEO_CODE\n text2code = {}\n for code, subset in cameoCode.items():\n for k in subset:\n text2code[k.lower().replace(\",\",\"\")] = str(subset[k])[:3]\n\n for f in files:\n with open(f) as icews_f:\n keys = icews_f.readline().strip().split(\"\\t\")[:-2]\n for line in icews_f:\n infos = line.strip().split(\"\\t\")\n event = {keys[i]:infos[i] for i in range(len(keys))}\n country = event.get(\"Country\", \"\")\n if self.country_list.get(country, 0) == 0: # country not in interested\n continue\n target_city = self.capital_city.get(country, \"\")\n eventText = event['Event Text'].lower().replace(\",\",\"\")\n eventDate = event['Event Date']\n eventCode = text2code.get(eventText, None)\n if not eventCode:\n continue\n\n #country event count\n self.daily_count.setdefault(eventCode, {})\n self.daily_count[eventCode].setdefault(country, {})\n count = self.daily_count[eventCode][country].setdefault(eventDate, 0)\n self.daily_count[eventCode][country][eventDate] = count + 1\n\n #city event count\n self.daily_count[eventCode].setdefault(target_city, {})\n count = self.daily_count[eventCode][target_city].setdefault(eventDate, 0)\n if country == \"Brazil\":\n city = event[\"Province\"]\n if city == target_city:\n self.daily_count[eventCode][target_city][eventDate] = count + 1\n elif country == \"Egypt\":\n city = event[\"City\"]\n if city == target_city or city == \"Tahrir Square\":\n self.daily_count[eventCode][target_city][eventDate] = count + 1\n else:\n city = event[\"City\"]\n if city == target_city:\n self.daily_count[eventCode][target_city][eventDate] = count + 1", "title": "" }, { "docid": "e798ebb96dd35b18e161ecd084ec6282", "score": "0.45895898", "text": "def test_preprocess_data(self):\n gene_map, ref_db, species = preprocess_data(self.working_dir,\n self.target_proteomes_dir,\n ['fa', 'fasta', 'faa'])\n gene_map_exp = {'G1_SE001': '0_0', 'G1_SE002': '1_0',\n 'G1_SE003': '2_0', 'G1_SE004': '3_0',\n '0_0': 'G1_SE001', '1_0': 'G1_SE002',\n '2_0': 'G1_SE003', '3_0': 'G1_SE004',\n 'G2_SE001': '0_1', 'G2_SE002': '1_1',\n 'G2_SE003': '2_1', 'G2_SE004': '3_1',\n '0_1': 'G2_SE001', '1_1': 'G2_SE002',\n '2_1': 'G2_SE003', '3_1': 'G2_SE004',\n 'G3_SE001': '0_2', 'G3_SE002': '1_2',\n 'G3_SE003': '2_2', 'G3_SE004': '3_2',\n '0_2': 'G3_SE001', '1_2': 'G3_SE002',\n '2_2': 'G3_SE003', '3_2': 'G3_SE004',\n 'G4_SE001': '0_3', 'G4_SE002': '1_3',\n 'G4_SE003': '2_3', 'G4_SE004': '3_3',\n '0_3': 'G4_SE001', '1_3': 'G4_SE002',\n '2_3': 'G4_SE003', '3_3': 'G4_SE004',\n 'G5_SE001': '0_4', 'G5_SE002': '1_4',\n 'G5_SE003': '2_4', 'G5_SE004': '3_4',\n '0_4': 'G5_SE001', '1_4': 'G5_SE002',\n '2_4': 'G5_SE003', '3_4': 'G5_SE004'}\n ref_db_exp = {}\n with open(self.species_1_fp, 'U') as fh:\n for label, seq in parse_fasta(fh):\n ref_db_exp[label] = seq\n with open(self.species_2_fp, 'U') as fh:\n for label, seq in parse_fasta(fh):\n ref_db_exp[label] = seq\n with open(self.species_3_fp, 'U') as fh:\n for label, seq in parse_fasta(fh):\n ref_db_exp[label] = seq\n with open(self.species_4_fp, 'U') as fh:\n for label, seq in parse_fasta(fh):\n ref_db_exp[label] = seq\n num_species_exp = 4\n self.assertDictEqual(gene_map, gene_map_exp)\n self.assertDictEqual(ref_db, ref_db_exp)\n self.assertEqual(species, num_species_exp)", "title": "" }, { "docid": "b2e0bbd00e74437eab4afffbd4da653c", "score": "0.4589471", "text": "def main():\n cur_state = \"Alabama\"\n cur_cumex = 0\n with open(outfil, 'wb') as out:\n for line in open(infile, 'rU'):\n if line.startswith(\"State\"):\n head_dat = [\"State\", \"FY\", \"Group_Name\", \"Common_Name\", \n \"Scientific_Name\", \"Population\", \n \"General_Expenditures\", \"Land_Expenditures\", \n \"Grand_Total\", \"State_cumulative\"]\n out.write(\"\\t\".join(head_dat) + \"\\n\")\n elif not line.lstrip()[0].isdigit():\n rec = Record(line, cur_cumex)\n\n if rec.state != cur_state and rec.state != \"\":\n cur_state = rec.state\n rec.cumulative_tot = rec.total_exp\n cur_cumex = rec.cumulative_tot\n elif rec.state == \"\":\n rec.state = cur_state\n cur_cumex = rec.cumulative_tot\n elif rec.state == cur_state:\n cur_cumex = rec.cumulative_tot\n\n new_dat = [rec.state, rec.year, rec.group, rec.common, \n rec.scientific, rec.population, str(rec.general_exp), \n str(rec.land_exp), str(rec.total_exp), \n str(rec.cumulative_tot)]\n newl = \"\\t\".join(new_dat) + \"\\n\"\n out.write(newl)\n else:\n pass", "title": "" }, { "docid": "09f4a7e7c3f6f73632245986dfe3ce3d", "score": "0.4575125", "text": "def file_1_load_result():\n return [\n {\n 'country': 'us',\n 'city': 'newyork',\n 'state': 'ny',\n 'zip': '23424'\n }\n ]", "title": "" }, { "docid": "37a350105c4ec0d33834bf608be5ec6b", "score": "0.45725584", "text": "def initialize_data():\n description = \"This is a platform to openly collaborate with others to \" \\\n \"build and launch a startup. We have rethought the way \" \\\n \"companies are formed based on the idea that an openly \" \\\n \"developed startup has major advantages over its closed \" \\\n \"counterparts.\"\n project = mixer.blend(\n 'project.project', name=\"joltem\", title=\"Joltem\",\n description=description, total_shares=1000000, impact_shares=850000,\n exchange_periodicity=12, exchange_magnitude=25)\n mixer.blend(\n 'git.repository', project=project, name=\"Test repository\",\n description=\"An empty repository for you to play with.\")\n admin = mixer.blend('joltem.user', username='emil', first_name='Emil',\n is_superuser=True, is_staff=True,\n password='123')\n project.admin_set.add(admin)\n project.subscriber_set.add(admin)\n project.founder_set.add(admin)\n first_names = ('Becky', 'Bob', 'Ian', 'Jill', 'Kate', 'Will')\n users = mixer.cycle(6).blend(\n 'joltem.user',\n first_name=(first_name for first_name in first_names),\n username=mixer.MIX.first_name(lambda x: x.lower()),\n password='123')\n project.subscriber_set.add(*users)\n project.save()\n mixer.blend('project.equity', user=admin, project=project,\n shares=150000)", "title": "" }, { "docid": "4fee86183328388e218ee90adcdad17d", "score": "0.4569187", "text": "def localInitialize(self):\n # check the source\n if self.assemblerDict['Source'][0][0] == 'Files':\n self.readingFrom = 'File'\n csvFile = self.assemblerDict['Source'][0][3]\n csvFile.open(mode='r')\n headers = [x.replace(\"\\n\",\"\").strip() for x in csvFile.readline().split(\",\")]\n data = np.loadtxt(self.assemblerDict['Source'][0][3], dtype=np.float64, delimiter=',', skiprows=1, ndmin=2)\n lenRlz = len(data)\n csvFile.close()\n for var in self.toBeSampled:\n for subVar in var.split(','):\n subVar = subVar.strip()\n sourceName = self.nameInSource[subVar]\n if sourceName not in headers:\n self.raiseAnError(IOError, f\"variable {sourceName} not found in the file {csvFile.getFilename()}\")\n self.pointsToSample[subVar] = data[:,headers.index(sourceName)]\n subVarPb = 'ProbabilityWeight-'\n if subVarPb+sourceName in headers:\n self.infoFromCustom[subVarPb+subVar] = data[:, headers.index(subVarPb+sourceName)]\n else:\n self.infoFromCustom[subVarPb+subVar] = np.ones(lenRlz)\n if 'PointProbability' in headers:\n self.infoFromCustom['PointProbability'] = data[:, headers.index('PointProbability')]\n else:\n self.infoFromCustom['PointProbability'] = np.ones(lenRlz)\n if 'ProbabilityWeight' in headers:\n self.infoFromCustom['ProbabilityWeight'] = data[:, headers.index('ProbabilityWeight')]\n else:\n self.infoFromCustom['ProbabilityWeight'] = np.ones(lenRlz)\n\n self.limit = len(utils.first(self.pointsToSample.values()))\n else:\n self.readingFrom = 'DataObject'\n dataObj = self.assemblerDict['Source'][0][3]\n lenRlz = len(dataObj)\n self.pointsToSample = dataObj.sliceByIndex(dataObj.sampleTag)\n for var in self.toBeSampled:\n for subVar in var.split(','):\n subVar = subVar.strip()\n sourceName = self.nameInSource[subVar]\n if sourceName not in dataObj.getVars() + dataObj.getVars('indexes'):\n self.raiseAnError(IOError, f\"the variable {sourceName} not found in {dataObj.type} {dataObj.name}\")\n self.limit = len(self.pointsToSample)\n self.sourceIndexMap = dataObj.getDimensions()\n # if \"index\" provided, limit sampling to those points\n if self.indexes is not None:\n self.limit = len(self.indexes)\n maxIndex = max(self.indexes)\n if maxIndex > len(self.pointsToSample) - 1:\n self.raiseAnError(IndexError, f'Requested index \"{maxIndex}\" from custom sampler, but highest index sample is \"{len(self.pointsToSample) - 1}\"!')\n # TODO: add restart capability here!\n if self.restartData:\n self.raiseAnError(IOError, \"restart capability not implemented for CustomSampler yet!\")\n if self.batch > 1:\n self.addMetaKeys([\"batchId\"])", "title": "" }, { "docid": "c2cbdd174c4fcb0435826eef61c51269", "score": "0.45687363", "text": "def source_data():\n curdir = os.path.dirname(os.path.abspath(__file__))\n source_file = open(curdir + \"/data_source\", \"r\")\n json_dict = source_file.read()\n source_file.close()\n\n data_dict = json.loads(json_dict)\n\n return data_dict.values()", "title": "" }, { "docid": "14b886bee6c68447b1ef22dc12e64b94", "score": "0.45492828", "text": "def get_source_data(gb: Bio.GenBank):\n try:\n assert gb.features[0].key == 'source', 'source information not found'\n source = gb.features[0]\n if source.key != 'source':\n return None\n data = {}\n for q in source.qualifiers:\n key = q.key.strip('=/')\n value = q.value.strip('\"')\n data[key] = value\n return data\n except AssertionError as e:\n logging.warning(f\"{e}\")", "title": "" }, { "docid": "4a5dd4655c8ce2085cd608e6a5234322", "score": "0.4546994", "text": "def update_structure_has_source_table(input_file, NPDatabase_path, new_source_name):\n\n conn = sqlite3.connect(NPDatabase_path)\n c = conn.cursor()\n \n # Retrieve all isomeric smiles and structure_id's from sqlite database\n c.execute('SELECT * from structure')\n\n sqlite_combo_list = []\n for row in c:\n structure_id = row[0]\n iso_smiles = row[1]\n temp_combo_list = [structure_id, iso_smiles]\n sqlite_combo_list += [temp_combo_list]\n temp_combo_list = []\n\n # Adds the correct structure id to the isomeric smiles from the parsed input data\n structure_has_source_list = []\n all_lines = input_file.split('\\n')\n for line in all_lines[1:-1]:\n line = line.split('|||')\n iso_smiles_mibig = line[1]\n external_id = line[0]\n for combo in sqlite_combo_list:\n sqlite_iso_smiles = combo[1]\n structure_id = combo[0]\n if iso_smiles_mibig == sqlite_iso_smiles:\n temp_structure_has_source_list = [structure_id, iso_smiles_mibig]\n structure_has_source_list += [temp_structure_has_source_list]\n temp_structure_has_source_list = []\n \n # Retrieve all isomeric smiles and structure_id's from sqlite database\n c.execute('SELECT * from structure_has_data_source')\n structure_source_id_sqlite_list = []\n external_source_id_sqlite_list = []\n for row in c:\n structure_source_id_sqlite_list += [row[0]]\n external_source_id_sqlite_list += [row[3]]\n \n # For structure_id's: delete \"NPSRC:\" and only keep the nr\n structure_source_id_list = []\n for structure_id in structure_source_id_sqlite_list[1:]:\n structure_source_id_list += [structure_id[7:]]\n\n # Get the highest structure_source_id nr \n structure_source_id_nr = max(structure_source_id_list)\n \n \n external_source_name = new_source_name\n structure_source_count = 0\n sql_list = []\n \n # Check whether external source id from parsed input is already in structure_has_data_source table and created a sql list\n for combo in structure_has_source_list:\n\n structure_id = combo[0]\n external_source_id = combo[1]\n\n if external_source_id in external_source_id_sqlite_list:\n print (external_source_id, ' already in database')\n if external_source_id not in external_source_id_sqlite_list:\n structure_source_count += 1\n structure_source_id_nr = int(structure_source_id_nr)+1\n new_structure_source_id = 'NPSRC:00{}'.format(structure_source_id_nr)\n sql_string = \"INSERT INTO structure_has_data_source VALUES\\\n ('%s','%s', '%s', '%s');\"% (new_structure_source_id, structure_id, external_source_id, external_source_name)\n sql_list += [sql_string] \n structure_source_id_nr = structure_source_id_nr+1\n \n source_name_count_list = [external_source_name, structure_source_count]\n\n # Add new structure source data to structure_has_data_source table\n for i in range(len(sql_list)):\n c.execute(sql_list[i])\n\n # Commit changes\n conn.commit()\n\n # Close the connection\n conn.close()\n \n return source_name_count_list", "title": "" }, { "docid": "3b3f5d5e83b587b912b35154fb7872a4", "score": "0.45369524", "text": "def load_config_developer(cfg_file):\n with open(cfg_file, 'r', encoding='utf-8') as file:\n cfg = yaml.safe_load(file)\n\n if 'obs4mips' in cfg:\n logger.warning(\n \"Correcting capitalization, project 'obs4mips'\"\n \" should be written as 'obs4MIPs' in %s\", cfg_file)\n cfg['obs4MIPs'] = cfg.pop('obs4mips')\n\n for project, settings in cfg.items():\n for site, drs in settings.get('input_dir', {}).items():\n # Since v2.8, 'version' can be used instead of 'latestversion'\n if isinstance(drs, list):\n drs = [d.replace('{latestversion}', '{version}') for d in drs]\n else:\n drs = drs.replace('{latestversion}', '{version}')\n settings['input_dir'][site] = drs\n CFG[project] = settings\n\n read_cmor_tables(cfg_file)", "title": "" }, { "docid": "02d8de85d26f2e6bdfa678b91346d66f", "score": "0.45360884", "text": "def parse_pen_print_pop_output(file_path, skiprows=10, skipfooter=1):\n print(f'Loading printed population output...')\n # the garbage column exist since the C output puts a comma after the last value\n df = pd.read_csv(file_path, skiprows=skiprows, skipfooter=skipfooter,\n index_col=0, names=['x', 'y', 'garbage'], engine='python')\n\n # filter correct rows\n df = df[['x', 'y']]\n\n # filter out rows starting with #\n filter_row_idx = [row_idx for row_idx in df.index if row_idx.startswith('#')]\n df.drop(filter_row_idx, inplace=True)\n\n population_size = viz_utils.get_n_until_first_repeat(df.index)\n\n evolution_data = []\n\n counter = 0\n iter_counter = 0\n step_dict = {}\n for pengu, row in df.iterrows():\n step_dict[pengu] = list(row)\n if counter == (population_size - 1):\n evolution_data.append(step_dict)\n iter_counter += 1\n step_dict = {}\n counter = 0\n else:\n counter += 1\n\n # TODO: There is still a bug in iter_counter.\n print(f'Detected population size: {population_size}.\\n'\n f'{iter_counter - 1} iterations plus initial population.\\n'\n f'Data loaded (hopefully you didn\\'t forget to print the initial population).')\n\n return evolution_data", "title": "" }, { "docid": "7526e209fdfcef6363f9852618181917", "score": "0.45338815", "text": "def city_country(city, country, population):\n output_string = f\"{city.title()}, {country.title()}\"\n output_string += f\" -population {population}\"\n return output_string", "title": "" }, { "docid": "a35ce8a9b011560c2e7809cb40fdd0f2", "score": "0.4531226", "text": "def main():\n args = args_fetch()\n logger = logger_fetch(args.get('log_level'))\n if args['test']:\n logger.info(\"Testing\")\n objs = Location.objects.all()\n for obj in objs:\n logger.info(obj.id)\n if args['import']:\n filename = \"../import_data/census_state.csv\"\n df = pd.read_csv(filename)\n logger.info(df.head())\n for index, row in df.iterrows():\n code = str(row.get('state_code', '')).lstrip().rstrip()\n name = row.get('name', None).lstrip().rstrip()\n logger.info(name)\n myLocation = Location.objects.filter(code=code).first()\n if myLocation is None:\n myLocation = Location.objects.create(code=code, name=name)\n myLocation.name = name\n myLocation.state_name = name\n myLocation.state_code = code\n myLocation.location_type = 'state'\n myLocation.save()\n\n filename = \"../import_data/census_district.csv\"\n df = pd.read_csv(filename)\n logger.info(df.head())\n for index, row in df.iterrows():\n code = str(row.get('district_code', '')).lstrip().rstrip()\n state_code = str(row.get('state_code', '')).lstrip().rstrip()\n name = row.get('name', None).lstrip().rstrip()\n logger.info(name)\n myLocation = Location.objects.filter(code=code).first()\n if myLocation is None:\n myLocation = Location.objects.create(code=code, name=name)\n myLocation.name = name\n myLocation.district_name = name\n myLocation.district_code = code\n myLocation.state_code = state_code\n myLocation.location_type = 'district'\n myLocation.save()\n\n \n logger.info(\"...END PROCESSING\")", "title": "" }, { "docid": "310e7b1c10076a02876f3f4cf7e6a990", "score": "0.45308974", "text": "def load_gcp_data(self):\n pass", "title": "" }, { "docid": "737de9b407b601b07ba10ed952be907b", "score": "0.45303386", "text": "def GEO_0003 (Place):\n CountryCode, PlaceId = CountryCode_PlaceID(Place)\n CategoryNameList = []\n CategoryNames = Place.findall(ns+\"CategoryName\")\n for Category in CategoryNames:\n Name = Category.find(ns+\"Text\").text\n CategoryNameList.append(Name)\n try:\n FirstCategory = CategoryNameList[0]\n except:\n FirstCategory = \"None\"\n\n ExternalReferenceList = Place.findall(ns+\"ExternalReference\")\n ExternalRefSystem = \"\"\n POI_PVID = \"\"\n for ExternalReference in ExternalReferenceList:\n ExternalRefattrib = ExternalReference.attrib\n if 'system' in ExternalRefattrib.keys():\n ExternalRefSystem = ExternalRefattrib['system']\n ExternalReferenceID = ExternalReference.find(ns+\"ExternalReferenceID\")\n ExternalReferenceID_attrib = ExternalReferenceID.attrib\n if 'type' in ExternalReferenceID_attrib.keys():\n ExternalReftype = ExternalReferenceID_attrib['type']\n if ExternalReftype == \"SUPPLIER_POIID\":\n POI_PVID = ExternalReferenceID.text\n\n return_emits = []\n## if CountryCode == \"LUX\":\n LocationList = Place.findall(ns+\"Location\")\n for Location in LocationList:\n LocationAttributes = Location.attrib\n if 'supplier' in LocationAttributes.keys():\n Location_supplier_value = LocationAttributes['supplier']\n try:\n POIName = Location.find(ns+\"BaseText\").text\n except:\n POIName = \"None\"\n\n Link = Location.find(ns+\"Link\")\n LinkAttributes = Link.attrib\n if 'linkPvid' in LinkAttributes.keys():\n LinkPvid = LinkAttributes['linkPvid']\n GeoPositionList = Location.findall(ns+\"GeoPosition\")\n for GeoPosition in GeoPositionList:\n GeoPositionAttributes = GeoPosition.attrib\n if 'type' in GeoPositionAttributes.keys():\n Routing_Display = GeoPositionAttributes['type']\n Longitude = GeoPosition.find(ns+\"Longitude\")\n Latitude = GeoPosition.find(ns+\"Latitude\")\n\n if Routing_Display == \"ROUTING\" and ExternalRefSystem == \"corepoixml\" and POI_PVID and Location_supplier_value == \"NOKIA_GEOCODER\":\n try:\n ROUTING_LONG = str(Longitude.text)\n ROUTING_LAT = str(Latitude.text)\n except:\n ROUTING_LONG = \"\"\n ROUTING_LAT = \"\"\n try:\n # GEO_0003|CountryCode|PlaceId|LinkPvid|Category|POIName|POI_PVID|ROUTING_LAT|ROUTING_LONG\n emit_string = 'GEO_0003|'+CountryCode+'|'+PlaceId+'|'+LinkPvid+'|'+FirstCategory+'|'+POIName+'|'+POI_PVID+'|'+ROUTING_LAT+'|'+ROUTING_LONG\n emit_string = emit_string.encode('UTF-8')\n return_emits.append(emit_string)\n except:\n pass\n return return_emits", "title": "" }, { "docid": "0b7d6231256fcaa4725b6a6b1d4f2296", "score": "0.45281202", "text": "def data_sources():\n \n return render_template(\"data_sources.html\",xpage=\"Data Sources\")", "title": "" }, { "docid": "ebaa858c0ec84499d50985ac84cb9ae0", "score": "0.45258352", "text": "def test_city_country_population(self):\n formatted_string = get_city_name('santiago', 'chile', 5000000)\n self.assertEqual(formatted_string, 'Santiago, Chile - population 5000000')", "title": "" }, { "docid": "a2195db64751467fef9ff87c5a857633", "score": "0.4524292", "text": "def get_sources(source_model_file, discretisation=50.):\n converter = SourceConverter(50, 10, width_of_mfd_bin=0.1,\n area_source_discretization=discretisation)\n parser = SourceModelParser(converter)\n try:\n sources = parser.parse_sources(source_model_file)\n except AttributeError: # Handle version 2.1 and above\n sources = []\n groups = parser.parse_src_groups(source_model_file)\n for group in groups:\n for source in group:\n sources.append(source)\n# name = 'test_point_model'\n new_sources = {}\n for source in sources:\n #pt_sources = area_to_point_sources(source)\n #for pt in pt_sources:\n # pt.source_id = pt.source_id.replace(':','')\n # pt.name = pt.name.replace(':','_')\n try:\n new_sources[source.tectonic_region_type].append(source)\n except KeyError:\n new_sources[source.tectonic_region_type] = [source]\n return new_sources", "title": "" }, { "docid": "ddf3a731056102d123533031de4d5dc2", "score": "0.45206356", "text": "def main():\n mission = Mission.current()\n entries = []\n\n for d in os.listdir(SKINS_DIR):\n path = SKINS_DIR.joinpath(d).resolve()\n\n if not path.is_dir():\n print('Skipping %s as it is not a dir' % d)\n\n entry = Entry(path, mission)\n entry.process()\n entries.append(entry)\n\n for path, cfg, include_self in (('CfgTextures.hpp', mission.cfg_textures, False),\n ('CfgVehicles.hpp', mission.cfg_vehicles, False),\n ('CfgWhitelistedGangs.hpp', mission.cfg_whitelist, True)\n ):\n with open(mission.config.joinpath(path), 'w') as fp:\n for x in encode(cfg, include_self=include_self, indent=4):\n fp.write(x)\n\n with open(mission.config.joinpath('gangs/uniforms.inc.hpp'), 'w') as fp:\n encoder = Encoder(indent=4)\n\n clothes = []\n for i in entries:\n clothes.extend(i.clothes)\n\n for x in encoder.encode(clothes):\n fp.write(x)", "title": "" }, { "docid": "8f11f7844202632e233e59cf662b535f", "score": "0.45162493", "text": "def update_sources() -> None:", "title": "" }, { "docid": "486d57873483105dc65f576675602b8b", "score": "0.45091283", "text": "def __init__(self, *args, **kwargs):\n self.config = ConfigParser()\n self.config.read('collectors.cfg')", "title": "" } ]
498e44c7bfe87ed358f51196e968c737
Calls the string in a subprocess and dies if the return value is not 0
[ { "docid": "03c86c546da2e8f5032a29efab714fae", "score": "0.61249554", "text": "def call(self, args, strict = True, cwd = None):\n self.dbg(\"Executing: %s\" % args)\n rv = subprocess.call(args, env = self._env(), cwd = self._cwd(cwd))\n if strict and not rv == 0:\n raise NonZeroReturnCode(rv, \"%s => %d\" % (\" \".join(args), rv))\n return rv", "title": "" } ]
[ { "docid": "e85a19aea3de35751e1b7ea767315347", "score": "0.75284", "text": "def run(string):\n\n # shlex.split will preserve inner quotes\n prog = shlex.split(string)\n p0 = subprocess.Popen(prog, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n\n stdout0, stderr0 = p0.communicate()\n rc = p0.returncode\n p0.stdout.close()\n\n return stdout0, stderr0, rc", "title": "" }, { "docid": "5ae283af79f3ca06b1c7618d8bedffa9", "score": "0.75201637", "text": "def run(string):\n\n # shlex.split will preserve inner quotes\n prog = shlex.split(string)\n p0 = subprocess.Popen(prog, stdin=None, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n stdout0, _ = p0.communicate()\n rc = p0.returncode\n stdout = stdout0.decode('utf-8')\n\n return stdout, rc", "title": "" }, { "docid": "89cd1a7cb37b6f6b5fdbbb0175efb8f3", "score": "0.69314396", "text": "def runCommand(command, inputVal=''):\n if (inputVal == ''):\n print(command)\n ret_code=subprocess.call(command, shell=True)\n if ret_code:\n printColor((\"bad return code for command execution: %s\" % ret_code), \"red\")\n exit()\n return ret_code", "title": "" }, { "docid": "819abf356d7e60029576dcaaa0cd4dbd", "score": "0.692702", "text": "def _(*args, **kwargs):\n print(\"\\n\\t\", \" \".join(args), \"\\n\")\n return_code = subprocess.call(args, **kwargs)\n if return_code != 0:\n print(\"\\nERROR\", return_code, \" \".join(args))\n sys.exit(return_code)", "title": "" }, { "docid": "ef8ce750ee507240dfd76d6665a55f42", "score": "0.68731546", "text": "def test_process_run_completes_successfully():\n output = process.run(\"echo\", \"success\", stdout=subprocess.PIPE).stdout.strip()\n assert output == \"success\"", "title": "" }, { "docid": "7d949b4fbb2e2f8bf1efad72ec35e17b", "score": "0.68483514", "text": "def call_process(command):\n return check_output(command)", "title": "" }, { "docid": "01fe883dbfb6aa07efb1c6f606e89a7c", "score": "0.67199826", "text": "def runSubprocess(command):\n try:\n return 0, subprocess.check_output(command, shell=True)\n except subprocess.CalledProcessError as e:\n return e.returncode, e.output", "title": "" }, { "docid": "a70c5304269cde11ca91ecbe0ed6c842", "score": "0.6693046", "text": "def run(cmd, chk_err=True):\n retcode,out=run_get_output(cmd,chk_err)\n return retcode", "title": "" }, { "docid": "c067fc463c644f6e3c9219d4c1cd59b2", "score": "0.66722876", "text": "def run_command(command):\n result = os.system(command)\n return 0 if result == 0 else 1", "title": "" }, { "docid": "b18cd382a80dae49f7d99a43b1da876d", "score": "0.66708356", "text": "def _system_call(cmd):\n proc = Popen(cmd, universal_newlines=True, shell=True, stdout=PIPE,\n stderr=PIPE)\n # Communicate pulls all stdout/stderr from the PIPEs\n # This call blocks until the command is done\n stdout, stderr = proc.communicate()\n return_value = proc.returncode\n return stdout, stderr, return_value", "title": "" }, { "docid": "b12b384ceb4bd10af0722c1a78f90ea0", "score": "0.665769", "text": "def system(cmd):\n ret = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)\n out, err = ret.communicate()\n returncode = ret.returncode\n return out, err, returncode", "title": "" }, { "docid": "184e15f59e4eaaa2a856e00dcfa42e77", "score": "0.66543406", "text": "def is_return_code_zero(args):\n with open(os.devnull, 'wb') as FNULL:\n try:\n subprocess.check_call(args, stdout=FNULL, stderr=FNULL)\n except subprocess.CalledProcessError:\n # The given command returned an error\n return False\n except OSError:\n # The given command was not found\n return False\n return True", "title": "" }, { "docid": "646b8c054429010b97cab8b9858f8be0", "score": "0.66400975", "text": "def run_subprocess(self, args, **kw):\n # print (\"20150214 run_subprocess %r\" % args)\n p = self.open_subprocess(args, **kw)\n\n # wait() will deadlock when using stdout=PIPE and/or\n # stderr=PIPE and the child process generates enough output to\n # a pipe such that it blocks waiting for the OS pipe buffer to\n # accept more data. Use communicate() to avoid that.\n if False:\n p.wait()\n else:\n out, err = p.communicate()\n # raise Exception(\"20160711 run_subprocess\", out)\n rv = p.returncode\n # kw.update(stderr=buffer)\n # rv = subprocess.call(args,**kw)\n if rv != 0:\n cmd = ' '.join(args)\n # if six.PY2:\n # # if the output contains non-asci chars, then we must\n # # decode here in order to wrap it into our msg. Later\n # # we must re-encode it because exceptions, in Python\n # # 2, don't want unicode strings.\n # out = out.decode(\"utf-8\")\n msg = \"%s (%s) returned %d:\\n-----\\n%s\\n-----\" % (\n cmd, kw, rv, out)\n # try:\n # msg = \"%s (%s) returned %d:\\n-----\\n%s\\n-----\" % (\n # cmd, kw, rv, out)\n # except UnicodeDecodeError:\n # out = repr(out)\n # msg = \"%s (%s) returned %d:OOPS\\n-----\\n%s\\n-----\" % (\n # cmd, kw, rv, out)\n\n # print msg\n # if six.PY2:\n # msg = msg.encode('utf-8')\n self.fail(msg)", "title": "" }, { "docid": "6bfbe44317a566ca188d230b098d96e0", "score": "0.6637921", "text": "def popenCatch(command):\n logger.debug(\"Running the command: %s\" % command)\n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)\n sts = os.waitpid(process.pid, 0)\n i = sts[1]\n if i != 0:\n raise RuntimeError(\"Command: %s exited with non-zero status %i\" % (command, i))\n return process.stdout.read().strip()", "title": "" }, { "docid": "96aeefdc406470052ec48fbc6dd32356", "score": "0.66302043", "text": "def system_call(command:str) -> int:\n\n return(subprocess.call(shlex.split( command ), shell=False))", "title": "" }, { "docid": "496d83400846ee49debd5de145d3f365", "score": "0.6604856", "text": "def execute(args, stderr=DEVNULL):\n try:\n return check_output(args, stderr=stderr).decode('utf-8')\n except CalledProcessError as exc:\n return exc.output.decode('utf-8')", "title": "" }, { "docid": "7282f3a8d1f4b920a6240ab7f34939dd", "score": "0.6542439", "text": "def run_notfatal(*cmd):\n try:\n print 'Run \"%s\"' % (cmd,)\n return subprocess.check_output(*cmd,shell=True)\n except Exception,e:\n print 'Warning : %s' % e\n return ''", "title": "" }, { "docid": "4be8acbb6fe8c977532b2f6f9acd2ec5", "score": "0.65413505", "text": "def test_zero_args(self):\n R = subprocess.run(['./'+program],input=simple_test_input.encode(), **self.Parms)\n self.assertEqual(R.returncode,0)\n self.assertEqual(simple_test_input,R.stdout.decode())", "title": "" }, { "docid": "1591f6369954df718ced40dca3b11f3f", "score": "0.6538287", "text": "def system_command(command):\n\n\tstatus = subprocess.call(command)\n\n\treturn status", "title": "" }, { "docid": "bb16a9a8cd922dc9a1ef519bda23a13b", "score": "0.6518003", "text": "def runCommand(*args):\n a = subprocess.call(args)\n if a != 0:\n sys.stderr.write(\"The command gave an error:\\n %s\\n\" % \" \".join(args))\n sys.exit(1)", "title": "" }, { "docid": "bc3d2333f4002d6be8f78dd43e0ae9d1", "score": "0.6500414", "text": "def call(cmd):\n return subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).communicate()[0].decode('utf-8')", "title": "" }, { "docid": "1a3ac4b76362925efd207503b810a07b", "score": "0.6477877", "text": "def __call__(self):\n try:\n subprocess.check_call(\n self._cmd,\n timeout=self._timeout_secs,\n shell=True,\n preexec_fn=self._preexec_fn)\n return True, None\n except subprocess.CalledProcessError as reason:\n # The command didn't return a 0 so provide reason for failure.\n return False, str(reason)\n except subprocess.TimeoutExpired:\n return False, 'Health check timed out.'\n except OSError as e:\n return False, 'OSError: %s' % e.strerror\n except ValueError:\n return False, 'Invalid commmand.'", "title": "" }, { "docid": "fbe0df94c771b3a521bd7bd9b3ed0b88", "score": "0.64669764", "text": "def run_cmd(command, print_warning_on_fail=True):\n print('[Exec] %s' % ' '.join(command))\n return_code = subprocess.call(command)\n \n if return_code:\n print(\"The command '%s' failed with return code: %s\" % (' '.join(command), return_code))\n print(\"Ignoring and moving on to the next example\")\n \n return return_code", "title": "" }, { "docid": "70b9b1af3b13f9ac0c2df3ef4cd78ced", "score": "0.6442572", "text": "def run(cmd):\n res = os.system(cmd)\n # Assumes that if a process doesn't call exit, it was successful\n if (os.WIFEXITED(res)):\n code = os.WEXITSTATUS(res)\n if code != 0:\n print \"Error: return code: \" + str(code)\n sys.exit(code)", "title": "" }, { "docid": "8251d3a11734c9bdcfb8fda48d4ce87f", "score": "0.6439539", "text": "def run_cmd(cmd, return_p=False):\n if return_p:\n r = os.popen(cmd)\n text = r.read()\n r.close()\n return text\n # p = subprocess.Popen(cmd.split(' '), stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n # text = p.stdout.read()\n # return text\n else:\n os.system(cmd)", "title": "" }, { "docid": "296b46bfb75d11deea01d6e71e6569cb", "score": "0.6410772", "text": "def safe_cmd(args):\n cmd = subprocess.Popen(args, stderr=subprocess.PIPE)\n (_, p_err) = cmd.communicate()\n if cmd.returncode != 0:\n raise Exception('Command %s failed with exit code %i, stderr:\\n%s' %\n (' '.join(args),\n cmd.returncode,\n p_err.decode('utf-8')))", "title": "" }, { "docid": "dbc0f748027a6b51760926eb4524eb3b", "score": "0.6407955", "text": "def cmd_failable(cmd):\n print 'Executing: %s' % cmd\n subprocess.call(cmd, shell=True)", "title": "" }, { "docid": "483b186e48854d7df1467c633a60bfd6", "score": "0.63940644", "text": "def run_or_fail(self):\n\n self.proc.run()\n results = self.proc.get_results()\n assert results.rc == 0, (\"\"\n \"{cmd} failed: args='{args}'\\n{stderr}\".format(\n cmd=self.cmd,\n args=' '.join(self.args),\n stderr=results.stderr))\n self.results = results\n return results.stdout, results.stderr", "title": "" }, { "docid": "b6ab0a9a0b94c0ad9411d72c83944491", "score": "0.63874185", "text": "def execute(commandline, need_output = False):\n time = datetime.datetime.now()\n logger.info('starting \"%s\"'% commandline)\n if need_output:\n process = subprocess.Popen(commandline, bufsize=1, stdout=subprocess.PIPE, shell=True)\n else:\n process = subprocess.Popen(commandline, bufsize=1, shell=True)\n process.wait()\n if process.returncode:\n logger.error('[%s] failed \"%s\" in %s'%(process.returncode, commandline, str(datetime.datetime.now() - time)))\n else:\n logger.info('[%s] completed \"%s\" in %s'%(process.returncode, commandline, str(datetime.datetime.now() - time)))\n if need_output:\n return process.stdout.read()\n return process.returncode", "title": "" }, { "docid": "6a376244075f41bc8e238a3327cd8884", "score": "0.63788265", "text": "def run(cmd, env):\n res = os.system(cmd)\n if (os.WIFEXITED(res)):\n code = os.WEXITSTATUS(res)\n return code\n # Assumes that if a process doesn't call exit, it was successful\n return 0", "title": "" }, { "docid": "6a05829fe9cf9fd60927230676d1c5e1", "score": "0.6360902", "text": "def _run(cmd: List[str]) -> bytes:\n\n with tempfile.TemporaryFile('rb+') as outf:\n # pylint: disable=consider-using-with\n proc = subprocess.Popen(\n cmd,\n stdout=outf,\n stderr=subprocess.STDOUT\n )\n code = proc.wait()\n outf.seek(0)\n\n if code != 0:\n output = outf.read().decode('utf-8')\n raise RuntimeError(output)\n\n return outf.read()", "title": "" }, { "docid": "2d7a8a811ed60afc367ebf68d4b4ff95", "score": "0.63350934", "text": "def trycmd(*args, **kwargs):\n discard_warnings = kwargs.pop('discard_warnings', False)\n\n try:\n out, err = execute(*args, **kwargs)\n failed = False\n except ProcessExecutionError as exn:\n out, err = '', str(exn)\n failed = True\n\n if not failed and discard_warnings and err:\n # Handle commands that output to stderr but otherwise succeed\n err = ''\n\n return out, err", "title": "" }, { "docid": "1982478a65d046d3c9f8b8740f62d594", "score": "0.633249", "text": "def runCommand(command):\n print(command)\n ret_code = subprocess.call(command, shell=True)\n if ret_code:\n printColor((\"bad return code for command execution %s: %s\" %\n (command, ret_code)), \"red\")\n exit()\n return ret_code", "title": "" }, { "docid": "dcc658fe0ca6e921ebf4e078689206b0", "score": "0.63152224", "text": "def run_cmd(cmd):\n if VERBOSE:\n print(\" \" + cmd)\n try:\n out = sp.check_output(cmd, shell=True)\n except sp.CalledProcessError as exc:\n out = exc.output\n return out", "title": "" }, { "docid": "b327d4f6297f80ed5d69ca49db3fa646", "score": "0.63144475", "text": "def run_cmd(command):\n try:\n ret = check_call(command, shell=True)\n return {'success': True, 'return': ret, 'exception': None}\n except CalledProcessError, cpe:\n return {'success': False,\n 'return': None,\n 'exception': cpe,\n 'command': command}", "title": "" }, { "docid": "f47a0901906e715cc123f6626aeb00ac", "score": "0.63115335", "text": "def robust_exec(cmds, get_output = False, verbose = True):\n import platform, os, time, inspect, subprocess, shlex\n src_file_name = os.path.basename(inspect.getfile(inspect.currentframe()))\n if platform.system() == 'Linux':\n if os.environ.get('LD_LIBRARY_PATH'):\n # fucking bugs http://stackoverflow.com/questions/24788292/intermittent-oserror-errno-7-argument-list-too-long-with-short-command-12\n current_path = os.path.dirname(os.path.abspath(os.sys.argv[0]))\n if len(os.environ['LD_LIBRARY_PATH']) > 0:\n path_set = set(os.environ['LD_LIBRARY_PATH'].split(':'))\n if current_path not in path_set:\n os.environ['LD_LIBRARY_PATH'] += \":\" + current_path\n else:\n os.environ['LD_LIBRARY_PATH'] += \":\" + current_path\n else:\n os.environ['LD_LIBRARY_PATH'] = os.path.dirname(os.path.abspath(os.sys.argv[0]))\n# print(os.path.dirname(os.sys.argv[0]))\n os.umask(002)\n\n if type(cmds) == str:\n if verbose:\n print(cmds)\n p = None\n if get_output:\n p = subprocess.Popen(shlex.split(cmds.encode('string-escape')), env=os.environ, stdout=subprocess.PIPE)\n else:\n p = subprocess.Popen(shlex.split(cmds.encode('string-escape')), env=os.environ)\n try:\n if get_output:\n (output_text, stderrdata) = p.communicate()\n return (output_text, p.returncode)\n else:\n p.communicate()\n return p.returncode\n except:\n print('[{0}] exception in robust_exec()'.format(src_file_name))\n if platform.system() == 'Linux':\n p.send_signal(2)\n else:\n p.kill()\n while p.poll() == None:\n print('[{0}] waiting the child exit'.format(src_file_name))\n time.sleep(0.05)\n pass\n exit(1)\n else:\n cmds = [str(cmd) for cmd in cmds]\n if verbose:\n print(' '.join(cmds))\n p = None\n if get_output:\n p = subprocess.Popen(cmds, env=os.environ, stdout=subprocess.PIPE)\n else:\n p = subprocess.Popen(cmds, env=os.environ)\n try:\n if get_output:\n (output_text, stderrdata) = p.communicate()\n return (output_text, p.returncode)\n else:\n p.communicate()\n return p.returncode\n except:\n print('[{0}] exception in robust_exec()'.format(src_file_name))\n if platform.system() == 'Linux':\n p.send_signal(2)\n else:\n p.kill()\n while p.poll() == None:\n print('[{0}] waiting the child exit'.format(src_file_name))\n time.sleep(0.05)\n pass\n exit(1)", "title": "" }, { "docid": "bb75d6a74815df4fedef2668de9f05e2", "score": "0.62912625", "text": "def call(cmd, shell=True, raiseError=True, noOutput=False, input=None):\n\n std_in = None\n std_out = None\n\n debug(\"Executing: %s\" % cmd.__str__())\n if input:\n std_in = subprocess.PIPE\n if noOutput:\n std_out = open(os.path.devnull, \"w\")\n\n prog = subprocess.Popen(cmd, shell=shell, stdin=std_in, stdout=std_out)\n prog.communicate(input)\n\n rc = prog.returncode\n if raiseError and rc != 0:\n raise CommandFailed(cmd, rc)\n\n return rc", "title": "" }, { "docid": "9f900c23b30bc0826c0e11dfa4fa5466", "score": "0.6288891", "text": "def RunCommand(command, fail_hard=True):\n\n print 'Running %s' % (str(command))\n if subprocess.call(command, shell=False) == 0:\n return True\n print 'Failed.'\n if fail_hard:\n sys.exit(1)\n return False", "title": "" }, { "docid": "06755875865031ff5531f9fb6d6bdc57", "score": "0.62884825", "text": "def check_call(*popenargs, **kwargs):\r\n retcode = call(*popenargs, **kwargs)\r\n cmd = kwargs.get(\"args\")\r\n if cmd is None:\r\n cmd = popenargs[0]\r\n if retcode:\r\n raise CalledProcessError(retcode, cmd)\r\n return retcode", "title": "" }, { "docid": "925814397b97cff1d316d7d90ff247ee", "score": "0.628487", "text": "def call_command_line(string, **kwargs):\n return subprocess.check_call(string.split(\" \"), **kwargs)", "title": "" }, { "docid": "e7b3f551b16c7f40adfa6eb90e47b574", "score": "0.6276788", "text": "def execute_cmd(args):\n output = ''\n ok = False\n try:\n if args:\n output = subprocess.check_output(args, stderr=subprocess.STDOUT)\n ok = True\n except subprocess.CalledProcessError as e:\n output = e.output\n if 'File exists' in output:\n ok = True\n else:\n ok = False\n\n return ok, output", "title": "" }, { "docid": "9ba0589993ff9a416988a4737211b6f1", "score": "0.62662166", "text": "def try_execute(*args, **kwargs):\n error_formatter = kwargs.get(\"error_formatter\", None) # Fix for Py < 3\n debug_out(\"Executing command: \", ' '.join(args), \"\\n\")\n try:\n process = subprocess.Popen(args, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n line = process.stdout.readline()\n output = line\n while line:\n debug_out(line.decode(locale.getpreferredencoding(), errors=\"ignore\").rstrip(\"\\r\\n\"))\n line = process.stdout.readline()\n output += line\n process.wait()\n if process.returncode != 0:\n if error_formatter:\n error_formatter(decode_str(output))\n raise Exception(\"Process had non-zero returncode:\", process.returncode)\n\n except subprocess.CalledProcessError as msg:\n debug_out(\"Process error:\")\n output = msg.output.decode(locale.getpreferredencoding(), errors=\"ignore\")\n if error_formatter:\n error_formatter(decode_str(output))\n else:\n debug_out(output)\n fatal_error(\"Subprocess returned no-zero statuscode!\")", "title": "" }, { "docid": "e7703b3c4b605a88e55fecbffafbdbde", "score": "0.6254871", "text": "def call(cmd):\n with tempfile.TemporaryFile(prefix='stderr') as capture:\n try:\n output = subprocess.check_output(cmd, stderr=capture)\n except subprocess.CalledProcessError:\n capture.seek(0)\n sys.stderr.write(capture.read().decode('ascii', errors='replace'))\n raise\n return output.decode()", "title": "" }, { "docid": "5bbed05a1235c788012b84ce44c1db09", "score": "0.6253184", "text": "def call_cli_command(command_call):\r\n # calls the command in shell\r\n call = subprocess.run(command_call, shell = True, capture_output=True)\r\n # returns the result in bytes as well as the sucess code (0 is success)\r\n return call.stdout, call.returncode", "title": "" }, { "docid": "fa0a184b05c7b8462172de351396f926", "score": "0.6252483", "text": "def execute(cmd):\n p = Popen(\n cmd,\n shell=True,\n stdin=PIPE,\n stdout=PIPE,\n stderr=PIPE\n )\n out, err = p.communicate()\n if not err:\n return out.decode()\n return err.decode()", "title": "" }, { "docid": "cae839131c1b036be654749b4e072d15", "score": "0.62422585", "text": "def run_cmd(cmd):\n print(f\"Executing command: {cmd}\")\n try:\n result = subprocess.run(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n shell=True,\n encoding=\"utf-8\",\n check=True,\n timeout=180,\n )\n print( result.stdout )\n return (len(result.stdout) > 0)\n except subprocess.CalledProcessError as e:\n result = e.stdout\n return result", "title": "" }, { "docid": "9df55b646116d01266d696821a9e169a", "score": "0.624021", "text": "def _check_return_code(process: subprocess.CompletedProcess) -> None:\n try:\n process.check_returncode()\n except subprocess.CalledProcessError:\n raise SystemExit(f\"\\n\\nCommand failed.\")", "title": "" }, { "docid": "1c2807edb70bf9eb88330b61a26733ce", "score": "0.6233698", "text": "def check_call(*popenargs, **kwargs):\n retcode = call(*popenargs, **kwargs)\n cmd = kwargs.get(\"args\")\n if cmd is None:\n cmd = popenargs[0]\n if retcode:\n raise CalledProcessError(retcode, cmd)\n return retcode", "title": "" }, { "docid": "bc0fb58560bee4376cdb66695671ef46", "score": "0.6226469", "text": "def run_command(cmd, displayOut):\n\n # print (cmd); \n child = popen(cmd)\n output = child.read()\n if displayOut:\n print output\n err = child.close()\n if err:\n return err", "title": "" }, { "docid": "fa7f554b139634f012bf86fdf2a558d4", "score": "0.62262195", "text": "def run_checked (cmd, ret_ok=(0,), **kwargs):\n retcode = run(cmd, **kwargs)\n if retcode not in ret_ok:\n msg = \"Command `%s' returned non-zero exit status %d\" % (cmd, retcode)\n raise OSError(msg)\n return retcode", "title": "" }, { "docid": "e1b8375b213139f9a48151b6aa50b2a6", "score": "0.62169516", "text": "def try_shell(cmd):\r\n\tdebug(\"running command: \" + cmd)\r\n\tif os.system(cmd) != 0:\r\n\t\traise RuntimeError(\"shell command failed:\\n%s\" % cmd)", "title": "" }, { "docid": "0f11e01da6a145e61acd6b649dcc274d", "score": "0.6189685", "text": "def run(args):\n try:\n with Popen(args,stdout=PIPE,stderr=PIPE) as process:\n out, err = process.communicate()\n print(out[:-1].decode('utf8'))\n if err:\n print(err.decode('utf8'))\n except OSError as e:\n print('OS Error: {}'.format(e))\n return False\n except FileNotFoundError as e:\n print('File not found: {}'.format(e))\n except ValueError as e:\n # invalid arguements\n print('Invalid arguments for {0}'.format(args))", "title": "" }, { "docid": "60bc69a5ad518da51d2393a1b9b9e081", "score": "0.61846703", "text": "def run_sys_cmd(cmd):\n ret = os.system(cmd)\n if ret != 0:\n raise RuntimeError(f\"Nonzero exit code from: {cmd}\")", "title": "" }, { "docid": "90db884693e1c25112f9ae71fd464390", "score": "0.61762965", "text": "def runCommand(cmd):\n p = Popen(cmd.split(' '), stdout=PIPE)\n return p.communicate()", "title": "" }, { "docid": "90db884693e1c25112f9ae71fd464390", "score": "0.61762965", "text": "def runCommand(cmd):\n p = Popen(cmd.split(' '), stdout=PIPE)\n return p.communicate()", "title": "" }, { "docid": "90db884693e1c25112f9ae71fd464390", "score": "0.61762965", "text": "def runCommand(cmd):\n p = Popen(cmd.split(' '), stdout=PIPE)\n return p.communicate()", "title": "" }, { "docid": "90db884693e1c25112f9ae71fd464390", "score": "0.61762965", "text": "def runCommand(cmd):\n p = Popen(cmd.split(' '), stdout=PIPE)\n return p.communicate()", "title": "" }, { "docid": "81cc153cb661166421b50d804298a361", "score": "0.6171797", "text": "def run(cmd: str, cwd: Optional[Path] = None) -> str:\n try:\n return str(\n subprocess.run(\n cmd.split(), capture_output=True, check=True, cwd=cwd\n ).stdout.decode(\"utf-8\")\n )\n except subprocess.CalledProcessError as e:\n print(e.stderr.decode(\"utf-8\"))\n exit(1)", "title": "" }, { "docid": "665fcfd8901018be2f38f84feeaa526a", "score": "0.61696607", "text": "def call(cmd, echo=False, capture=False, **kw):\n lines = []\n if echo:\n print( \"Executing '%s'\" % ' '.join (cmd))\n if 'stdout' not in kw:\n p = subprocess.Popen(cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT, **kw)\n while True:\n l = p.stdout.readline()\n if not l: break\n print (l, end='')\n lines .append(l)\n #p.wait()\n else:\n p = subprocess.Popen(cmd, **kw)\n\n p.wait()\n\n if capture:\n return p.returncode, \"\".join(lines)\n else:\n return p.returncode", "title": "" }, { "docid": "5591e615be23d18b7bdc0d4586f40b51", "score": "0.6168684", "text": "def _run_subprocess(*args, **kwargs):\n p = subprocess.Popen(*args, **kwargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=setsid)\n output = b\"\"\n err = b\"\"\n try:\n while p.poll() is None:\n (o, e) = p.communicate(None, timeout=1)\n output += o\n err += e\n # On hangup kill the program (and children)\n except subprocess.TimeoutExpired:\n killpg(getpgid(p.pid), signal.SIGKILL)\n\n return p.returncode, output, err", "title": "" }, { "docid": "8978f98a2cdff66714f6665987390277", "score": "0.6159558", "text": "def shell(args):\n if isinstance(args, str):\n raise ValueError(\"Passed string to shell, it is safer to use array: %s\" % args)\n\n proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = proc.communicate()\n ret = proc.returncode\n\n out = str(out.decode())\n err = str(err.decode())\n\n\n return (ret, out, err)", "title": "" }, { "docid": "1a3e1e8b1839df4532bcf1705bdf5b3f", "score": "0.61374223", "text": "def check_call(self, cmd):\n \n retcode = 0\n self._log_cmd(cmd)\n if not self._dryrun:\n try:\n retcode = subprocess.check_call(cmd, shell=True,\n stdout=open(os.devnull, 'wb'),\n stderr=open(os.devnull, 'wb'))\n except subprocess.CalledProcessError as e:\n retcode = e.returncode\n return retcode", "title": "" }, { "docid": "1e319f6210f7a0c6394179ab4c150587", "score": "0.6135833", "text": "def call(cmd, *args, **Popen_kwargs):\n if not args:\n cmd, *args = shlex.split(cmd)\n\n proc = subprocess.Popen([cmd, *args], **Popen_kwargs)\n ret_code = proc.wait()\n\n if ret_code:\n sys.exit(ret_code)\n else:\n return proc", "title": "" }, { "docid": "c693a6a915cc24d824b856ffe2f2aab8", "score": "0.6133045", "text": "def run(cmd):\n\n logger.info(cmd)\n r = subprocess.call(cmd, shell=True, stdout=stdout, stderr=subprocess.STDOUT,\n env=os.environ)\n if r != 0:\n logger.critical(r)\n return", "title": "" }, { "docid": "dff55b14b0df0f2e585d09b5136051bd", "score": "0.6110678", "text": "def RunCommand(cmd):\n print \"Running: \", \" \".join(cmd)\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n output, _ = p.communicate()\n print \"%s\" % (output.rstrip(),)\n return (output, p.returncode)", "title": "" }, { "docid": "bc2bd7d145817dfbf85e1184ed9763a5", "score": "0.61043185", "text": "def call(*popenargs, **kwargs):\r\n return Popen(*popenargs, **kwargs).wait()", "title": "" }, { "docid": "12bdc73288563831f5b0ab559331f38d", "score": "0.6102054", "text": "def check_call_out(args, **kwargs):\n out, returncode = communicate(args, **kwargs)\n if returncode:\n raise CalledProcessError(\n returncode, args, kwargs.get('cwd'), out[0], out[1])\n return out", "title": "" }, { "docid": "d9e0554620604d032c782a669b7d0945", "score": "0.60789186", "text": "def call(*args, **kwargs):\n # Make the default stdout None.\n kwargs.setdefault('stdout', None)\n return Popen(*args, **kwargs).wait()", "title": "" }, { "docid": "8bc82081eb7018747efb0750d4c962a7", "score": "0.60767597", "text": "def save_call(command):\n ex_code=subprocess.call(shlex.split(command))\n if ex_code != 0: sys.exit(ex_code)", "title": "" }, { "docid": "066a1ac00808ff02dd5158367cc7a7d5", "score": "0.60675013", "text": "def run_cmd(cmd):\n try:\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, shell=True)\n data, err = p.communicate()\n rc = p.returncode\n except subprocess.CalledProcessError as e:\n rc = 1\n err = e.output\n except ValueError as e:\n rc = 1\n err = str(e)\n if rc > 0:\n logging.error(\"unable to run command: \" + cmd)\n logging.error(\"cause: \" + str(err))\n return False\n return True", "title": "" }, { "docid": "6d876395f31b986421634151567ef120", "score": "0.60473067", "text": "def check_output(*popenargs, **kwargs):\n p = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)\n output = p.communicate()[0]\n assert(p.wait() == 0)\n return output", "title": "" }, { "docid": "51d88180ebd48bc53d551da21e43cec3", "score": "0.6023635", "text": "def _check_output(*popenargs, **kwargs):\n process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)\n output, unused_err = process.communicate()\n retcode = process.poll()\n if retcode:\n cmd = kwargs.get(\"args\")\n if cmd is None:\n cmd = popenargs[0]\n error = subprocess.CalledProcessError(retcode, cmd)\n error.output = output\n raise error\n return output", "title": "" }, { "docid": "48b04d0bb956a6cd299d34a34e7ab6cd", "score": "0.60217667", "text": "def test_200ok(self):\n out = os.system(path)\n self.assertEqual(out, \"It works!!!\", \"Incorrect message on ststus code 200\")", "title": "" }, { "docid": "7c7bc0a4319f1d3bfdd9a378a4a988bd", "score": "0.6021641", "text": "def run(cmd):\n\n logger.debug(\"executing command: {}\".format(cmd))\n p = proc.Popen(cmd, shell=True, stdout=proc.PIPE, stderr=proc.PIPE)\n out, err = p.communicate()\n\n if p.returncode:\n logger.error('command {} failed with code {}.\\n STDERR: {}'.format(\n cmd, p.returncode, err))\n raise Exception('{} failed'.format(cmd))\n\n return p.returncode, out", "title": "" }, { "docid": "761504cf3ac413c05ddf1a557fcf227a", "score": "0.60215384", "text": "def execute(command):\n from subprocess import check_output, STDOUT\n print(\"Shell command : {}\".format(command))\n command = \"{}; exit 0\".format(command)\n return check_output(command, stderr=STDOUT, shell=True).decode(\"utf-8\")", "title": "" }, { "docid": "312ebc1dc389483f4ad4aaae9f021998", "score": "0.60201025", "text": "def _shellcmd(cmd, args=[]):\n cmd = [cmd]\n cmd.extend(args)\n print cmd\n try:\n p = subp.Popen(cmd, stdout=subp.PIPE, stderr=subp.PIPE)\n except ValueError, err:\n raise RuntimeError(err)\n stdout, stderr = p.communicate()\n if p.returncode == 0:\n return stdout\n else:\n raise RuntimeError(stdout + stderr)", "title": "" }, { "docid": "27f142ab3196c9fb49b5f5a972bafa05", "score": "0.6014648", "text": "def subprocess_cmd(command, attempt = 2):\n logger.debug('Executing command: ' + command)\n process = subprocess.Popen(command,stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n stdout, stderr = process.communicate()\n stderr = stderr.strip()\n stdout = stdout.strip()\n\n if process.returncode != 0:\n if attempt != 0:\n logger.debug('Hit error ' + stderr + '. Trying again.')\n sleep(5)\n return subprocess_cmd(command, attempt - 1)\n elif attempt == 0:\n raise RuntimeError('Hit error while running command (' + command + ') \\n' + stderr)\n return stdout", "title": "" }, { "docid": "2bf49b09438c56e790c983389a494488", "score": "0.6010533", "text": "def sys_exec(cmd):\n class _proc(object):\n\n def __init__(self, command):\n self.command = command\n self._stdin = None\n self._stdout = None\n self._stdout_text = None\n self._returncode = None\n\n def set_stdin(self, stdin):\n self._stdin = stdin\n\n def set_stdout(self, stdout):\n self._stdout = stdout\n\n @property\n def stdin(self):\n return 'stdin'\n\n @property\n def stdout(self):\n if self._stdout_text is not None:\n return self._stdout_text\n\n @property\n def stderr(self):\n if self._stderr_text is not None:\n return self._stderr_text\n\n @property\n def returncode(self):\n if self._returncode is not None:\n return self._returncode\n\n @property\n def ok(self):\n if self._returncode is not None:\n return self.returncode is 0\n\n @property\n def subprocess(self):\n if self._subprocess is not None:\n return self._subprocess\n\n def start(self):\n self._subprocess = subprocess.Popen(\n args=self.command,\n shell=True,\n stdin=self._stdin if self._stdin else subprocess.PIPE,\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE\n )\n\n def wait(self, unread=False):\n self._returncode = self._subprocess.wait()\n if self._subprocess.stdout is not None and not unread:\n self._stdout_text = self._subprocess.stdout.read().decode()\n self._stderr_text = self._subprocess.stderr.read().decode()\n\n def run(self):\n self.start()\n self.wait()\n\n def __repr__(self):\n return '<Process: {0}>'.format(self.command)\n\n p = _proc(cmd)\n p.run()\n return p", "title": "" }, { "docid": "1de00187b4706e9ca5ff88fb92b12775", "score": "0.6006434", "text": "def check_output(*args, **kwargs):\n process = Popen(stdout=PIPE, *args, **kwargs)\n output, unused_err = process.communicate()\n retcode = process.poll()\n if retcode:\n raise CalledProcessError(retcode, args[0], output=output)\n return output", "title": "" }, { "docid": "95d2aa6a9f9ab75d737deba280f3b426", "score": "0.59953773", "text": "def run(cmd):\n return call(cmd.split(' '))", "title": "" }, { "docid": "398307f36857a27a81bb3addcdb957a1", "score": "0.59944135", "text": "def run(command):\n return subprocess.Popen(command.split(), stdout=PIPE, stderr=PIPE).communicate()", "title": "" }, { "docid": "330e9db7e8a4f8a28e373d1492dd0145", "score": "0.5994219", "text": "def runprog(cmd, print_cmd=True, abort_on_error=False):\n # Check if we have a binary to run\n if not os.access(cmd.split()[0], os.X_OK) and cmd.startswith(\"/\"):\n raise BroadbandExternalError(\"%s does not seem an executable path!\" %\n (cmd.split()[0]))\n\n try:\n if print_cmd:\n print(\"Running: %s\" % (cmd))\n proc = subprocess.Popen(cmd, shell=True)\n proc.wait()\n except KeyboardInterrupt:\n print(\"Interrupted!\")\n sys.exit(1)\n except:\n print(\"Unexpected error returned from Subprocess call: \",\n sys.exc_info()[0])\n\n if abort_on_error:\n # If we got a non-zero exit code, abort\n if proc.returncode != 0:\n # Check if interrupted\n if proc.returncode is None:\n raise BroadbandExternalError(\"%s\\n\" %\n (traceback.format_exc()) +\n \"%s failed!\" %\n (cmd))\n raise BroadbandExternalError(\"%s\\n\" %\n (traceback.format_exc()) +\n \"%s returned %d\" %\n (cmd, proc.returncode))\n\n return proc.returncode", "title": "" }, { "docid": "3e7cbbec79a0f09ed4733e73ceb9b6ec", "score": "0.5992401", "text": "def sys_call(cmd, quiet=False):\n try:\n if quiet:\n DEVNULL = open(os.devnull, 'w')\n proc = subprocess.Popen([cmd], shell=True, stdout=DEVNULL)\n else:\n proc = subprocess.Popen([cmd], shell=True)\n except subprocess.CalledProcessError:\n pass # handle errors in the called executable\n except OSError:\n raise OSError('No executable for command: \"{}\"\\n'.format(cmd))\n output, err = proc.communicate()\n return output, err", "title": "" }, { "docid": "0e1d18952757bfe707243f87770db2ef", "score": "0.5991562", "text": "def runcmd(raiseresult,command):\n\n try:\n log(\"Running: \"+command)\n os.system(command)\n return True\n\n except:\n if raiseresult == True:\n raise\n else:\n return False", "title": "" }, { "docid": "23c849444e4b1a2f5f0812596f43e251", "score": "0.5984427", "text": "def _wait_and_check_exit_code(cmd, child):\n child.wait()\n exit_code = child.returncode\n\n if exit_code != 0:\n raise error.ExecutedErrorNonZeroExitCode(\n u'Shell command executed with \"{0}\" '\n 'exit code: {1} '.format(exit_code, cmd))", "title": "" }, { "docid": "db002e00c92682eb8e5d95aab958460e", "score": "0.5982239", "text": "def checkOutput(d): #{{{\n (out, mem, fill) = (None, None, None)\n try:\n\tp = subprocess.Popen(binary.split(\" \") + [d],\n\t stdin=subprocess.PIPE,\n\t stdout=subprocess.PIPE,\n\t stderr=subprocess.PIPE)\n\n\t(out, err) = p.communicate()\n\n\tif len(err) == 4097:\n\t fill = err[0]\n\t mem = err[1:]\n\telse:\n\t print \"[-] Something went wrong when executing \\\"%s\\\". Timeout kill or crash?\" % binary\n\t return (None, None, None)\n except:\n pass\n return (out, mem, fill)", "title": "" }, { "docid": "0d6a101871da62a2b6c81207c820788a", "score": "0.59769565", "text": "def run_command(runArgs, environment, errorMessage):\n log('')\n log(\" \".join(runArgs))\n\n try:\n subprocess.check_output(runArgs, stderr=subprocess.PIPE, env=environment)\n except subprocess.CalledProcessError as e:\n log(errorMessage)\n log(e.output.decode('utf-8'))\n raise", "title": "" }, { "docid": "650b2415c14a2ad452591eb4bf208557", "score": "0.59744906", "text": "def execute(command):\n process = sub.Popen([command], stdin=sub.PIPE, stdout=sub.PIPE, stderr=sub.PIPE, shell =True)\n process.stdin.write('Y')\n stdoutput, stderror = process.communicate()\n if stderror:\n\treturn stderror\n else:\n\treturn stdoutput", "title": "" }, { "docid": "720af1e6163ef53edf8f0fb151802976", "score": "0.5970879", "text": "def run(cmd):\r\n log.debug(\"running '%s'\", cmd)\r\n fixed_cmd = cmd\r\n if sys.platform == \"win32\" and cmd.count('\"') > 2:\r\n fixed_cmd = '\"' + cmd + '\"'\r\n retval = os.system(fixed_cmd)\r\n if hasattr(os, \"WEXITSTATUS\"):\r\n status = os.WEXITSTATUS(retval)\r\n else:\r\n status = retval\r\n if status:\r\n raise OSError(status, \"error running '%s'\" % cmd)", "title": "" }, { "docid": "b4ad6201f73ab0516be6f376e855c494", "score": "0.59678525", "text": "def RunCommand(cmd, cwd=None):\n process = subprocess.Popen(cmd, cwd=cwd)\n process.wait()\n return process.returncode", "title": "" }, { "docid": "3ab1fde95f63035f4b64c1b45c61731a", "score": "0.596755", "text": "def call_noerr(cmd):\n\n try: check_call(cmd, shell=True)\n except Exception as e:\n logger.warn(\"Got exception running {}: {}\".format(cmd, str(e)))\n logger.warn(\"Traceback: {}\".format(traceback.format_exc()))", "title": "" }, { "docid": "8f3948f06ac329a95b19a290b6ae3ff3", "score": "0.59640825", "text": "def run_command(cmd):\n _log.debug('External command execution: {c}'.format(c=cmd))\n try:\n sproc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = sproc.communicate()\n exitcode = sproc.returncode\n if exitcode == 0:\n _log.debug(\"Execution succeeded: {o}\".format(o=stdout.replace('\\r', ' ').replace('\\n', ' ')))\n if stderr:\n _log.warning(\"Execution STDERR: {o}\".format(o=stderr.replace('\\r', ' ').replace('\\n', ' ')))\n else:\n msg = 'Execution failed! EXITCODE: {c} STDOUT: {o} STDERR: {e}'.format(c=exitcode, o=stdout.replace('\\r', ' ').replace('\\n', ' '), e=stderr.replace('\\r', ' ').replace('\\n', ' '))\n _log.critical(msg)\n raise ONEFluxError(msg)\n except subprocess.CalledProcessError, e:\n msg = \"Execution raised error: '{e}'\".format(e=str(e))\n _log.critical(msg)\n raise ONEFluxError(msg)\n return stdout", "title": "" }, { "docid": "fe749c546e736faadffcb778ab730290", "score": "0.5962532", "text": "def exec_cmdv(cmdv):\n try:\n output = subprocess.check_output(cmdv, stderr=subprocess.STDOUT)\n retval = SUCCESS\n\n except OSError, e:\n output = str(e)\n retval = None\n\n except subprocess.CalledProcessError, e:\n output = e.output\n retval = e.returncode\n\n return (retval, output)", "title": "" }, { "docid": "a0a58fa9de6f4d037ad1f2d83f0608e7", "score": "0.5959967", "text": "def runCmdGetString(filePath, * args):\n proc = Popen(flatten(joinPaths(filePath), args), stdout=PIPE);\n cont = proc.stdout.read()\n retcode = proc.wait()\n return (retcode, cont)", "title": "" }, { "docid": "973756b652a085dd8430488c379778b2", "score": "0.5959961", "text": "def run_cmd(self, cmd, exit_ = True, error_handler = lambda :True):\n self.log(\"Run command:\\t\"+cmd)\n if call(cmd, shell = True):\n # If running command encounters error,\n # call() returns a non-zero value\n\n result = error_handler()\n if exit_:\n raise\n sys.exit(0)\n else:\n return result\n else:\n return True", "title": "" }, { "docid": "04b0de785fa31b17ed974d20a244bc0e", "score": "0.59589934", "text": "def test_error():\n # Wrong argument names\n process = subprocess.run([\"eFISHent\", \"--not-an-argument\", \"ENSB29329382938\"])\n assert process.returncode == 2\n\n # No reference genome passed\n process = subprocess.run([\"eFISHent\", \"--ensembl-id\", \"ENSG00000026025\"])\n assert process.returncode == 2\n files = glob.glob(\"./ENSG00000026025_*.csv\")\n assert len(files) == 0\n\n # Non-bools\n process = subprocess.run(\n [\n \"efishent\",\n \"--reference-genome\",\n \"./sacCer3.fa\",\n \"--save-intermediates\",\n \"Yup\",\n \"--sequence-file\",\n \"./renilla.fa\",\n ]\n )\n assert process.returncode == 2\n\n # Too short/long length?\n # p = subprocess.Popen(args, stderr=subprocess.PIPE)\n # stdout, stderr = p.communicate()", "title": "" }, { "docid": "1b9a655d7e10daf0a41f81ae7fa4c568", "score": "0.59475154", "text": "def execute_cmd(cmd: str, wd: str = None) -> int:\n # we call it like this because the normal exception\n # subprocess.CalledProcessError doesn't return the stderr\n # of the pip call itself (it just says it fails)\n # so we solve it with popen and pipe/capture the output\n p = subprocess.run(cmd, shell=True, cwd=wd)\n # output, error = p.communicate()\n return p.returncode\n if p.returncode != 0:\n raise Exception(f\"cmd '{cmd}' statuscode: {p.returncode}\")", "title": "" }, { "docid": "5ae6089e1e79a399ed53afa1c02e06b7", "score": "0.5946792", "text": "def execute(cmd: str, *, timeout: Optional[float] = None) -> Result[None, Error]:\n try:\n subprocess.check_call(cmd, shell=True, timeout=timeout)\n return Ok(None)\n except Exception as e:\n return Err(Error.from_exception(e))", "title": "" }, { "docid": "87bfddcf6232c146df6a1579372387c7", "score": "0.5943468", "text": "def system(cls, cmd, expected_code=0, shell=True):\n logging.debug('Execute command: %s', cmd)\n\n process = subprocess.Popen(\n cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell)\n out, err = process.communicate()\n ret = process.returncode\n\n if ret != expected_code:\n desc = '%s failed: %s\\nError Message: %s' % (cmd, out, err)\n logging.error(desc)\n raise Exception(desc)\n\n return out, err", "title": "" }, { "docid": "829b1afd42dac4e5db1308377e929b1d", "score": "0.59388936", "text": "def run_cmd(self, cmd, exit_ = True, error_handler = lambda :False):\n self.log(\"Run command:\\t\"+cmd)\n if call(cmd, shell = True):\n # if encounters error\n result = error_handler()\n if exit_:\n print \"`\"+cmd+\"`\"+\" failed, exit\"\n sys.exit(0)\n else:\n return result\n else:\n return True", "title": "" } ]
9be33284d225994574b3fdffd5f29267
Displays the name of the object in a vieweraware manner.
[ { "docid": "565ff90e8cd1f8ed43fd67d0004d8982", "score": "0.0", "text": "def get_display_name(self, looker, **kwargs):\n #overrides the same method in DefaultObject\n string = self.name\n if self.locks.check_lockstring(looker, \"perm(Builder)\") and looker.db.settings_viewNumbers == True:\n string += \"(#{})\".format(self.id)\n\n if self.db.hidden:\n string += \"(|xhidden|n)\"\n\n return string", "title": "" } ]
[ { "docid": "c7e5e5ee59874bf65c57d2c2ae3d9ca4", "score": "0.74172854", "text": "def show_name(self):\r\n return print(self.__name)", "title": "" }, { "docid": "c7e5e5ee59874bf65c57d2c2ae3d9ca4", "score": "0.74172854", "text": "def show_name(self):\r\n return print(self.__name)", "title": "" }, { "docid": "0c7bdd87cf69a92fb8582c78b4fa4406", "score": "0.7330914", "text": "def object_title():", "title": "" }, { "docid": "9eb444a5423657dd6d1c8f245524e3f0", "score": "0.72641796", "text": "def showname(self):\n print 'your name is', self.name\n print 'my name is ', self.__class__.__name__", "title": "" }, { "docid": "769d50130bdb78c4c7d0dd6c195ffb02", "score": "0.699533", "text": "def display_name():", "title": "" }, { "docid": "98cae3af8373fc6605e17385c7b520dd", "score": "0.69347084", "text": "def __str__(self):\n\n\t return self.name", "title": "" }, { "docid": "98cae3af8373fc6605e17385c7b520dd", "score": "0.69347084", "text": "def __str__(self):\n\n\t return self.name", "title": "" }, { "docid": "338ce43cb9b1094da3c04346e9b8eefc", "score": "0.69090706", "text": "def __str__(self):\n return '%s' % (self.name)", "title": "" }, { "docid": "7ee25085c25a3d2b2705f90daff534ca", "score": "0.68817896", "text": "def __str__(self) -> str:\n\t\treturn self.name", "title": "" }, { "docid": "c1bdc4425caaf63af3232d1b4a5b0297", "score": "0.6876214", "text": "def __str__(self):\n\t\tif self.name:\n\t\t\treturn self.name", "title": "" }, { "docid": "ee26831b7d8436f4b47e6cf3e5f22456", "score": "0.68731695", "text": "def __str__(self):\n return \"%s\" %(self.name)", "title": "" }, { "docid": "ee26831b7d8436f4b47e6cf3e5f22456", "score": "0.68731695", "text": "def __str__(self):\n return \"%s\" %(self.name)", "title": "" }, { "docid": "ee26831b7d8436f4b47e6cf3e5f22456", "score": "0.68731695", "text": "def __str__(self):\n return \"%s\" %(self.name)", "title": "" }, { "docid": "d5dcaa771fde2396207c971847d85f45", "score": "0.6853483", "text": "def __str__(self):\r\n return self.name", "title": "" }, { "docid": "d5dcaa771fde2396207c971847d85f45", "score": "0.6853483", "text": "def __str__(self):\r\n return self.name", "title": "" }, { "docid": "d5dcaa771fde2396207c971847d85f45", "score": "0.6853483", "text": "def __str__(self):\r\n return self.name", "title": "" }, { "docid": "d5dcaa771fde2396207c971847d85f45", "score": "0.6853483", "text": "def __str__(self):\r\n return self.name", "title": "" }, { "docid": "d79cd3afb1df067a8a723f51d53ea7d6", "score": "0.68353623", "text": "def __str__(self):\n return self._name", "title": "" }, { "docid": "d6cf61621fc6d87a47e8f4b8b6fd98e7", "score": "0.683396", "text": "def __str__(self):\r\n\t\treturn self.name", "title": "" }, { "docid": "da736db744a1bf5c4bf21c674a7ba759", "score": "0.6817897", "text": "def __str__(self):\n\t\treturn self.name", "title": "" }, { "docid": "90ec01cd5534147f6c7aa5b557b1d33d", "score": "0.6811139", "text": "def __str__(self):\n return \"%s\" %self.name", "title": "" }, { "docid": "835a6b088767a7a97ab91ffa660ceabb", "score": "0.6805332", "text": "def __str__(self):\n return '{}'.format(self.name)", "title": "" }, { "docid": "835a6b088767a7a97ab91ffa660ceabb", "score": "0.6805332", "text": "def __str__(self):\n return '{}'.format(self.name)", "title": "" }, { "docid": "835a6b088767a7a97ab91ffa660ceabb", "score": "0.6805332", "text": "def __str__(self):\n return '{}'.format(self.name)", "title": "" }, { "docid": "c1ebb2b417dc5ca2e59d42c29c3e92f1", "score": "0.6802552", "text": "def __str__(self) -> str:\n return self.name", "title": "" }, { "docid": "c1ebb2b417dc5ca2e59d42c29c3e92f1", "score": "0.6802552", "text": "def __str__(self) -> str:\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" }, { "docid": "ada5b31bac5a1dfbff9952c8c64d011a", "score": "0.67832136", "text": "def __str__(self):\n return self.name", "title": "" } ]
4f6f40244a426bbab693292cb8d599c5
Calculate Utility based on Edmunds Rating from the User's Requirements.
[ { "docid": "22c21d39c48a42e9ec61ce41618132c9", "score": "0.0", "text": "def knowledge_based_rec(df,\n manufacturer=None,\n category=None,\n price=None,\n comfort=None,\n driving=None,\n interior=None,\n tech=None,\n utility=None):\n # int(\"0\") = 0, int(\"1\") = 1\n attributes = {'comfort': int(comfort),\n 'driving': int(driving),\n 'interior': int(interior),\n 'tech': int(tech),\n 'utility':int(utility),\n 'weight': None,\n 'manufacturer': manufacturer,\n 'category': category,\n 'price': price\n }\n cu = []\n\n sum = int(comfort) + int(driving) + int(interior) + int(tech) + int(utility)\n if 0 == sum:\n weight = 0\n attributes['weight'] = weight\n\n for i, row in df.iterrows():\n calculated_utility = (row['comfort'] + row['driving'] + row['interior'] + row['tech'] + row['utility'])/5\n cu.append(calculated_utility)\n\n else:\n weight = 1/sum\n attributes['weight'] = weight\n\n for i, row in df.iterrows():\n calculated_utility = (attributes['comfort']*weight)*row['comfort']\n + (attributes['driving']*weight)*row['driving'] + (attributes['interior']*weight)*row['interior'] + (attributes['tech']*weight)*row['tech'] + (attributes['utility']*weight)*row['utility']\n\n cu.append(calculated_utility)\n\n if 'Calculated Utility' in df.columns:\n del df['Calculated Utility']\n\n df.insert(0, 'Calculated Utility', cu, allow_duplicates=True)\n\n\n # print(f\"Weight: {weight}\")\n # print(f\"attributes: {attributes}\")\n return attributes", "title": "" } ]
[ { "docid": "37fbe4192b5c8647e710d339021da4b6", "score": "0.60642374", "text": "def calcOffensivePointsRating(self):\n evG = self.goals5v5 * 1\n evA1 = self.fAssists5v5 * self.fAssistToGoalWeight\n evA2 = self.sAssists5v5 * self.sAssistToGoalWeight\n\n ppG = self.ppGoals * self.ppGoalTo5v5Weight\n ppA = self.ppAssists * self.fAssistToGoalWeight * self.ppGoalTo5v5Weight\n\n pkG = self.pkGoals * self.pkGoalTo5v5Weight\n pkA = self.pkAssists * self.fAssistToGoalWeight * self.pkGoalTo5v5Weight\n\n if (self.position == \"D\"):\n self.evPointsRating += evG * self.defenceGoalWeight\n self.evPointsRating += evA1 * self.defenceFAssistWeight\n self.evPointsRating += evA2 * self.defenceSAssistWeight\n\n self.ppPointsRating += ppG * self.defenceGoalWeight\n self.ppPointsRating += ppA * self.defenceFAssistWeight\n\n self.pkPointsRating += pkG * self.defenceGoalWeight\n self.pkPointsRating += pkA * self.defenceFAssistWeight\n else:\n self.evPointsRating += evG\n self.evPointsRating += evA1\n self.evPointsRating += evA2\n\n self.ppPointsRating += ppG\n self.ppPointsRating += ppA\n\n self.pkPointsRating += pkG\n self.pkPointsRating += pkA", "title": "" }, { "docid": "b13a24dd979af216a3d48fd7a7bed64f", "score": "0.60518545", "text": "def calcOffensiveRating(self):\n toiAllWeight = self.toiALL / self.games * 0.05\n\n self.offensiveRating = 0\n self.ppOffensiveRating = 0\n self.pkOffensiveRating = 0\n\n self.calcOffensivePointsRating()\n self.evOffensiveRating += self.evPointsRating\n self.ppOffensiveRating += self.ppPointsRating\n self.pkOffensiveRating += self.pkPointsRating\n\n self.calcOffensiveShotsRating()\n self.evOffensiveRating += self.evShotsRating\n self.ppOffensiveRating += self.ppShotsRating\n self.pkOffensiveRating += self.pkShotsRating\n\n self.offensiveRating += (self.evOffensiveRating + self.ppOffensiveRating + self.pkOffensiveRating)\n\n self.offensiveRating *= toiAllWeight\n\n evORatingPer60 = 0\n ppORatingPer60 = 0\n pkORatingPer60 = 0\n\n ppWeight = self.toiPP / self.toiALL\n pkWeight = self.toiPK / self.toiALL\n\n # Shots are multiplied to keep data scaled similarly to the Points Rating.\n # Temporary fix until the weightings are better adjusted.\n shotMultiplier = 8\n\n if (self.toi5v5 > 0):\n self.evPointsRating = self.evPointsRating / (self.toi5v5 / 60)\n self.evShotsRating = (self.evShotsRating / (self.toi5v5 / 60)) * shotMultiplier\n evORatingPer60 += self.evPointsRating + self.evShotsRating\n if (self.toiPP > 0):\n self.ppPointsRating = (self.ppPointsRating / (self.toiPP / 60)) * ppWeight\n self.ppShotsRating = ((self.ppShotsRating / (self.toiPP / 60)) * ppWeight) * shotMultiplier\n ppORatingPer60 += self.ppPointsRating + self.ppShotsRating\n if (self.toiPK > 0):\n self.pkPointsRating = (self.pkPointsRating / (self.toiPK / 60)) * pkWeight\n self.pkShotsRating = ((self.pkShotsRating / (self.toiPK / 60)) * pkWeight) * shotMultiplier\n pkORatingPer60 += self.pkPointsRating + self.pkShotsRating\n\n self.offensivePointsRating = self.evPointsRating + self.ppPointsRating + self.pkPointsRating\n self.offensiveShotsRating = self.evShotsRating + self.ppShotsRating + self.pkShotsRating\n\n self.evOffensiveRating = evORatingPer60\n self.ppOffensiveRating = ppORatingPer60\n self.pkOffensiveRating = pkORatingPer60\n\n ORatingPer60 = evORatingPer60 + ppORatingPer60 + pkORatingPer60\n\n self.offensiveRating = ORatingPer60\n\n return ORatingPer60", "title": "" }, { "docid": "c36ba3672d30c155c2c687c4f672c535", "score": "0.6033435", "text": "def calcOffensiveShotsRating(self):\n\n CFWeight = 0.040\n FFWeight = 0.020\n SFWeight = 0.020\n SCFWeight = 0.020\n HDCFWeight = 0.150\n\n # League avg Shooting% for Unblocked Shots (Fenwick)\n evxFSHLeagueAvg = 0.057\n ppxFSHLeagueAvg = 0.092\n pkxFSHLeagueAvg = 0.070\n\n if (self.shPercentage != '-'):\n self.evShotQualityAdjustment = (evxFSHLeagueAvg * self.shPercentage) * 0.10\n self.ppShotQualityAdjustment = (ppxFSHLeagueAvg * self.shPercentage) * 0.10\n self.pkShotQualityAdjustment = (pkxFSHLeagueAvg * self.shPercentage) * 0.10\n #\n # Even Strength (5v5)\n #\n # eviCF = even strength individual Corsi For\n # eviCF = even strength individual Fenwick For\n # eviSF = even strength individual Shots For\n # eviSCF = even strength individual Scoring Chances For\n # eviHDSCF = even strength individual High-Danger Scoring Chances For\n #\n eviCF = self.eviCF * CFWeight\n eviFF = self.eviFF * FFWeight\n eviSF = self.eviSF * SFWeight\n eviSCF = self.eviSCF * SCFWeight\n eviHDCF = self.eviHDCF * HDCFWeight\n\n self.evShotsRating = eviCF + eviFF + eviSF + eviSCF + eviHDCF\n\n if (self.position == \"D\"):\n self.evShotsRating = (self.evShotsRating * self.evDefenceShotWeight) * self.evShotQualityAdjustment\n else:\n self.evShotsRating = self.evShotsRating * self.evShotQualityAdjustment\n\n #\n # Powerplay\n #\n ppiCF = self.ppiCF * CFWeight\n ppiFF = self.ppiFF * FFWeight\n ppiSF = self.ppiSF * SFWeight\n ppiSCF = self.ppiSCF * SCFWeight\n ppiHDCF = self.ppiHDCF * HDCFWeight\n\n self.ppShotsRating = ppiCF + ppiFF + ppiSF + ppiSCF + ppiHDCF\n\n if (self.position == \"D\"):\n self.ppShotsRating = (self.ppShotsRating * self.ppDefenceShotWeight) * self.ppShotQualityAdjustment\n else:\n self.ppShotsRating = (self.ppShotsRating * self.ppForwardShotWeight) * self.ppShotQualityAdjustment\n\n #\n # Penalty Kill\n #\n pkiCF = self.pkiCF * CFWeight\n pkiFF = self.pkiFF * FFWeight\n pkiSF = self.pkiSF * SFWeight\n pkiSCF = self.pkiSCF * SCFWeight\n pkiHDCF = self.pkiHDCF * HDCFWeight\n\n self.pkShotsRating = pkiCF + pkiFF + pkiSF + pkiSCF + pkiHDCF\n\n if (self.position == \"D\"):\n self.pkShotsRating = (self.pkShotsRating * self.pkDefenceShotWeight) * self.pkShotQualityAdjustment\n else:\n self.pkShotsRating = (self.pkShotsRating * self.pkForwardShotWeight) * self.pkShotQualityAdjustment", "title": "" }, { "docid": "1130fea7ad107c84e9cde67269103127", "score": "0.59557766", "text": "def rating(self):\n return 0", "title": "" }, { "docid": "d5ca3ed58b0b57493dff5a693dbb7c7f", "score": "0.5936615", "text": "def get_rating(self):\r\n # type: () -> mpf\r\n\r\n rating_penalty: float = 0 if self.role in self.__playable_positions else 0.1\r\n if self.position.position_type in [\"GK\", \"DF\"]:\r\n return mpf_average_of_list([self.stats.get_defense() * 1.15, self.stats.get_attack(),\r\n self.stats.get_physical_and_mental()]) * (1 - rating_penalty)\r\n elif self.position.position_type == \"MF\":\r\n return mpf_average_of_list([self.stats.get_defense(), self.stats.get_attack(),\r\n self.stats.get_physical_and_mental() * 1.15]) * (1 - rating_penalty)\r\n else:\r\n return mpf_average_of_list([self.stats.get_defense(), self.stats.get_attack() * 1.15,\r\n self.stats.get_physical_and_mental()]) * (1 - rating_penalty)", "title": "" }, { "docid": "9c00c6b1d5b58fc3ff8472db8f7f549d", "score": "0.5910215", "text": "def get_rating(self):\n if not (self.votes and self.score):\n return 0\n return (\n float(self.score + (self.field.weight*self.field.get_site_wide_average_rating()))\n / (self.votes + self.field.weight)\n )", "title": "" }, { "docid": "51962c197774f3c7798e1fc1e0cb8acf", "score": "0.589886", "text": "def calculate_utility(rewards, r, c, action):\n if r == 3 and c == 1:\n u = -2\n elif r == 1 and c == 2:\n u = -10\n elif r == 1 and c == 3:\n u = -10\n else:\n u = REWARD\n\n u += 0.1 * DISCOUNT * get_utility(rewards, r, c, (action-1)%4)\n u += GAMMA * DISCOUNT * get_utility(rewards, r, c, action)\n u += 0.1 * DISCOUNT * get_utility(rewards, r, c, (action+1)%4)\n\n return u", "title": "" }, { "docid": "749e388fd75c4818ef7ca1773a485166", "score": "0.5894943", "text": "def calculateUsage(row, trainDf, testDf, businesses, hybridCorrelationWeights, userVectors, pseudoUserVectors, meanUserRatings, ratingCounts, n=10, hybrid=True, contentOnly=False):\r\n userId = row.name\r\n userIndex = pseudoUserVectors.index.get_loc(userId)\r\n priorRatings = trainDf[trainDf.user_id == userId].business_id.unique()\r\n if not contentOnly:\r\n predictedRatings = predict(userIndex, hybridCorrelationWeights, userVectors, pseudoUserVectors, meanUserRatings, ratingCounts, hybrid=hybrid)\r\n else:\r\n predictedRatings = row\r\n predictedRatings = predictedRatings[~businesses.iloc[predictedRatings.index].business_id.isin(priorRatings)]\r\n recommendations = predictedRatings.nlargest(n)\r\n recommended = pd.Series(index=predictedRatings.index, data=predictedRatings.index.isin(recommendations.index))\r\n testPriorRatings = testDf[testDf.user_id == userId].business_id.unique()\r\n used = pd.Series(index=predictedRatings.index, data=businesses.iloc[predictedRatings.index].business_id.isin(testPriorRatings))\r\n confusionMatrix = confusion_matrix(used, recommended, labels=[True,False])\r\n row = pd.Series(index=[\"TP\", \"FP\", \"FN\", \"TN\"], data=confusionMatrix.reshape(-1))\r\n return row", "title": "" }, { "docid": "0cef2d4939a1be6c99582df33645eed2", "score": "0.5873093", "text": "def get_rating_data(user, SellerRating, BuyerRating):\n\tratings = {}\n\tratings.update( {\"positive\":0,\n\t\t\t\"negative\": 0,\n\t\t\t\"neutral\": 0,\n\t\t\t\"Bpositive\": 0,\n\t\t\t\"Bnegative\": 0,\n\t\t\t\"Bneutral\": 0} )\n\n\tseller_qry = SellerRating.query(SellerRating.seller == user.name)\n\toverall = []\n\tshipping = []\n\thonesty = []\n\tcom = []\n\tdef get_ratings(rating):\n\t\toverall.append(rating.overall)\n\t\tif rating.overall == -1:\n\t\t\tratings[\"negative\"] += 1\n\t\telif rating.overall == 1:\n\t\t\tratings[\"positive\"] += 1\n\t\telif rating.overall == 0:\n\t\t\tratings[\"neutral\"] += 1\n\t\tshipping.append(rating.shipping)\n\t\thonesty.append(rating.honesty)\n\t\tcom.append(rating.communication)\n\tseller_qry.map(get_ratings)\n\t\n\tratings[\"pos percentage\"] = float(ratings[\"positive\"])/float(len(overall) or 1)*100\n\tratings[\"neg percentage\"] = float(ratings[\"negative\"])/float(len(overall) or 1)*100\n\tratings[\"neut percentage\"] = float(ratings[\"neutral\"])/float(len(overall) or 1)*100\n\n\tratings[\"shipping\"] = average(shipping)\n\tratings[\"honesty\"] = average(honesty)\n\tratings[\"communication\"] = average(com)\n\n\tbuyer_qry = BuyerRating.query(BuyerRating.buyer == user.name)\n\tB_overall = []\n\tpayment = []\n\tB_com = []\n\tdef get_Bratings(rating):\n\t\tB_overall.append(rating.overall)\n\t\tif rating.overall == -1:\n\t\t\tratings[\"Bnegative\"] += 1\n\t\telif rating.overall == 1:\n\t\t\tratings[\"Bpositive\"] += 1\n\t\telif rating.overall == 0:\n\t\t\tratings[\"Bneutral\"] += 1\n\t\tpayment.append(rating.payment)\n\t\tB_com.append(rating.communication)\n\tbuyer_qry.map(get_Bratings)\n\n\tratings[\"Bpos percentage\"] = float(ratings[\"Bpositive\"])/float(len(B_overall)or 1)*100\n\tratings[\"Bneg percentage\"] = float(ratings[\"Bnegative\"])/float(len(B_overall)or 1)*100\n\tratings[\"Bneut percentage\"] = float(ratings[\"Bneutral\"])/float(len(B_overall) or 1)*100\n\n\tratings[\"Bpayment\"] = average(payment)\n\tratings[\"Bcommunication\"] = average(B_com)\n\n\tratings[\"user\"] = average(payment + B_com + shipping + honesty + com)\n\tratings[\"amount\"] = len(B_overall) + len(overall)\n\tratings[\"buy_amount\"] = len(B_overall)\n\tratings[\"sell_amount\"] = len(overall)\n\t\n\treturn ratings", "title": "" }, { "docid": "90e9a671ecd109ea6fe30860739e9578", "score": "0.5848464", "text": "def _predict_user_rating(user_id: UserId, item_id: ItemId) -> Rating:\n mean: Rating = self.means[user_id]\n std: float = self.stds[user_id]\n\n neighbors = _select_neighbors(user_id, item_id)\n if not neighbors.empty:\n rating = sum(correlation * self.ratings.loc[neighbor_id, item_id]\n for neighbor_id, correlation in neighbors.items()) \\\n / sum(abs(correlation) for _, correlation in neighbors.items())\n predicted_rating = (rating * std) + mean\n\n else: # If no users similar to the current user have rated the item, default to mean user rating\n predicted_rating = mean\n\n return round(predicted_rating)", "title": "" }, { "docid": "7eb47763a78e13e2388ee4b0b1bd7147", "score": "0.5844153", "text": "def get_rating(self, user):\r\n return getattr(Vote.objects.get_for_user(self, user), 'vote', None)", "title": "" }, { "docid": "873acbf196525f15489cda03b77e54e9", "score": "0.5829937", "text": "async def get_utilisation(self, drone_uuid: str) -> float:\n return min(await self.get_resource_ratios(drone_uuid), default=0.0)", "title": "" }, { "docid": "261ac9512d9638592195291c98dbfc47", "score": "0.58247954", "text": "def get_users_rating(self, users: torch.tensor) -> torch.tensor:\n all_users, all_items = self.computer()\n users_emb = all_users[users.long()]\n items_emb = all_items\n rating = torch.matmul(users_emb, items_emb.t())\n return rating", "title": "" }, { "docid": "b00dd02eec11eaf4251df64ddb5d0b13", "score": "0.578568", "text": "def evaluate_recommendation(self, test_data):\n print(\"[{}] Start evaluating model with test data...\".format(self.name)) # noqa\n users_id = pd.unique(test_data['visitorid'])\n recall = precision = n_valid_users = covered_users = fallout = 0\n n_items = len(self.items)\n covered_items = set()\n for user_id in users_id:\n real_items_id = self.get_user_real_items(user_id, test_data)\n reco_items_id = self.make_recommendation(user_id)\n if not isinstance(reco_items_id, set):\n print('[{}] Cannot make recommendation for user {}'.format(self.name, user_id)) # noqa\n continue\n n_TP = self.compute_n_hit(reco_items_id, real_items_id)\n n_FP = len(reco_items_id) - n_TP\n # recall\n recall += n_TP/len(real_items_id)\n # precision\n precision += n_TP/len(reco_items_id)\n # fallout\n fallout += n_FP/(n_items-len(real_items_id))\n # coverage\n covered_items.update(reco_items_id)\n n_valid_users += 1\n recall /= n_valid_users\n precision /= n_valid_users\n fallout /= n_valid_users\n coverage = len(covered_items)/len(self.items)\n print('[{}] Number of valid unique users: {}'.format(self.name, n_valid_users))\n print('[{}] Total unique users in the test set: {}'.format(self.name, len(pd.unique(test_data['visitorid']))))\n print('[{}] Recall:{}, Precision:{}, Coverage:{}'.format(self.name, recall, precision, coverage))\n return {'recall': recall, 'precision': precision, 'fallout': fallout, 'coverage': coverage}", "title": "" }, { "docid": "99cbd061b9fc9b8cf8b6a380edec22f9", "score": "0.5767993", "text": "def __ratingsystem__():", "title": "" }, { "docid": "ee678029ad810542a081755f22967fa4", "score": "0.57382655", "text": "def recommend(user_in_question, users, distance_method, coefficient):\n # find most user with close taste \n similar_user = find_user_with_same_taste(user_in_question, users, distance_method, coefficient)\n print \"Most similar user %s\" % similar_user\n \n # Find unrates for user_in_question\n unrates = []\n similar_user_ratings = users[similar_user]\n print \"Similar user %s likes %r\" % (similar_user, similar_user_ratings)\n user_in_question_ratings = users[user_in_question]\n print \"User in question %s likes %r\" % (user_in_question, user_in_question_ratings)\n \n for band in similar_user_ratings:\n if not band in user_in_question_ratings: \n unrates.append((band, similar_user_ratings[band]))\n return similar_user, sorted(unrates, key=lambda bandTuple: bandTuple[1], reverse=True)", "title": "" }, { "docid": "7188df41617125f577cb21d88dc34246", "score": "0.57184684", "text": "def rateWineUser(user, wine, rating):", "title": "" }, { "docid": "a2339c5480979d2bcf45fb1ad378358b", "score": "0.5706257", "text": "def computeEstimatedRatings(urm, U, S, Vt, uTest, K, test):\n rightTerm = S*Vt \n r=rightTerm.todense()\n #print(r)\n #print()\n estimatedRatings = np.zeros(shape=(MAX_UID, MAX_PID), dtype=np.float16)\n #for userTest in uTest:\n prod = U[uTest, :]*rightTerm\n #print(prod)\n print()\n #we convert the vector to dense format in order to get the indices \n #of the movies with the best estimated ratings \n estimatedRatings[uTest, :] = prod.todense()\n print(estimatedRatings[uTest])\n \n # recom = (-estimatedRatings[userTest, :]).argsort()\n return estimatedRatings[uTest]", "title": "" }, { "docid": "d40a4f9ec6acb01b08e8d26edf8097bc", "score": "0.5694388", "text": "def __inputrating__():", "title": "" }, { "docid": "df0ea4b07b453808ed0ca0f508f4b5dd", "score": "0.5689248", "text": "def get_customer_rating(user_info, customer_id) :\n return user_info[customer_id]['total'] / user_info[customer_id]['count']", "title": "" }, { "docid": "9feec59e285b853a2fc9d22e59a2b5e9", "score": "0.5622826", "text": "def normalize_user_ratings(utility_matrix: np.array) -> np.array:\n t = np.transpose\n avgs = np.sum(utility_matrix, axis=1) / np.count_nonzero(utility_matrix, axis=1)\n nonzero_mask = np.where(utility_matrix > 0, 1, 0)\n return t(t(utility_matrix) - avgs) * nonzero_mask\n\n # avgs = np.sum(utility_matrix, axis=1) / np.count_nonzero(utility_matrix, axis=1)\n # zero_mask = np.where(utility_matrix > 0, 0, 1)\n # return t(t(utility_matrix) + avgs * t(zero_mask))", "title": "" }, { "docid": "986f3cab74439765f9198c8cecceaf4a", "score": "0.562273", "text": "def calculate_user_mean_centered_rating(ratings_user, avgRatingUser):\n sum = 0.0\n for rating in ratings_user:\n sum = sum + pow(rating['rating']-avgRatingUser, 2)\n return sum", "title": "" }, { "docid": "75ff0eecd3c34efb5068ec4138b445a2", "score": "0.5617989", "text": "def _transform_rating(self, rating: float):\n return (rating - self._min_rating)/(self._max_rating - self._min_rating)", "title": "" }, { "docid": "397d1c1174864f174b2279421445c204", "score": "0.5564061", "text": "def dbRateWineUser(user, wine, rating):", "title": "" }, { "docid": "d2fb12a3c09357b3f4aeaf7468e3592c", "score": "0.5547706", "text": "def calculate_rating(self, line, ratings, wordVectors, vocabSet, alg=0):\n data_type = type(line)\n if data_type == list:\n t1 = [c.bag_of_words(vocabSet, i) for i in line]\n # t1 = self.bag_of_words(vocabSet, line)\n if alg == 1:\n t1 = c.tf_idf1(t1, vocabSet)\n elif alg == 2:\n t1 = c.tf_idf2(t1, vocabSet)\n elif alg == 3:\n t1 = c.tf_idf3(t1, vocabSet)\n t1 = [y for i in t1 for y in i]\n w = [c.cosine_similarity(i, t1) for i in wordVectors]\n for i in w:\n if type(i) == str:\n return 'With our data, I cannot come up with a rating...'\n assert len(w) == len(ratings)\n y = [w[i] * ratings[i] for i in range(len(ratings))]\n return sum(y) / sum(w)\n elif data_type == dict:\n pass", "title": "" }, { "docid": "835d13cf739c031ef2ba54e1d76d800d", "score": "0.55386937", "text": "def rating(self):\n return float(self.total)/self.votes", "title": "" }, { "docid": "e254b1002c54a9a453f67d5d42922833", "score": "0.5530621", "text": "def passer_rating(self):\n l = [((self.passing_cmp / self.passing_att) - .3) * 5]\n l.append(((self.passing_yds / self.passing_att) - 3) * .25)\n l.append((self.tds / self.passing_att) * 20)\n l.append(2.375 - (self.passing_ints / self.passing_att * 25))\n\n m = []\n for a in l:\n if a < 0:\n a = 0\n m.append(a)\n elif a > 2.375:\n a = 2.375\n m.append(a)\n else:\n m.append(a)\n\n rating = round((sum(m) / 6) * 100, 1)\n return rating", "title": "" }, { "docid": "c026bcdc93e20973611799b0c3f5aab6", "score": "0.5526393", "text": "def eval(self, recommenders=None, minRatingsPerUser=1):\n self.test_set = check_matrix(self.test_set, 'csr', dtype=np.float32)\n\n nusers, nitems = self.test_set.shape\n at = self.at\n n_eval = 0\n\n rows = self.test_set.indptr\n numRatings = np.ediff1d(rows)\n mask = numRatings >= minRatingsPerUser\n usersToEvaluate = np.arange(nusers)[mask]\n usersToEvaluate = list(usersToEvaluate)\n\n recommenders_to_evaluate = list(recommenders.keys())\n n_recs = len(recommenders_to_evaluate)\n\n rmse_ = np.zeros(shape=(n_recs,))\n roc_auc_ = np.zeros(shape=(n_recs,))\n precision_ = np.zeros(shape=(n_recs,))\n recall_ = np.zeros(shape=(n_recs,))\n map_ = np.zeros(shape=(n_recs,))\n mrr_ = np.zeros(shape=(n_recs,))\n ndcg_ = np.zeros(shape=(n_recs,))\n\n pop_bins_by_recommender = dict()\n\n for rec_key in recommenders_to_evaluate:\n if (not rec_key in self.rec_evals):\n self.rec_evals[rec_key] = dict()\n self.rec_evals[rec_key]['RMSE'] = list()\n self.rec_evals[rec_key]['ROC_AUC'] = list()\n self.rec_evals[rec_key]['Precision'] = list()\n self.rec_evals[rec_key]['Recall'] = list()\n self.rec_evals[rec_key]['MAP'] = list()\n self.rec_evals[rec_key]['MRR'] = list()\n self.rec_evals[rec_key]['NDCG'] = list()\n self.rec_evals[rec_key]['item_pop_bin'] = list()\n\n for test_user in usersToEvaluate:\n if (test_user % 10000 == 0):\n logger.info(\"Evaluating user {}\".format(test_user))\n\n # Getting user_profile by it's rated items (relevant_items) in the test.\n relevant_items = self.test_set[test_user].indices\n relevant_predictions = self.test_set[test_user,relevant_items].toarray()\n relevant_data = self.test_set[test_user].data\n i = 0\n for rec_key in recommenders_to_evaluate:\n rec_to_eval = recommenders[rec_key]\n\n ranked_items = rec_to_eval.recommend(user_id=test_user,\n n=at,\n exclude_seen=True\n )\n predicted_relevant_items = rec_to_eval.predict(user_id=test_user,\n rated_indices=relevant_items\n )\n\n # evaluate the recommendation list with RMSE and ranking metrics.\n is_relevant = np.in1d(ranked_items,\n relevant_items,\n assume_unique=True\n )\n # TopPop only works for ranking metrics.\n if (rec_key == \"TopPop1\" or rec_key == \"TopPop2\"):\n rmse_[i] += 0.0\n else:\n rmse_[i] += metrics.rmse(predicted_relevant_items, relevant_predictions)\n roc_auc_[i] += metrics.roc_auc(is_relevant)\n precision_[i] += metrics.precision(is_relevant)\n recall_[i] += metrics.recall(is_relevant, relevant_items)\n map_[i] += metrics.map(is_relevant, relevant_items)\n mrr_[i] += metrics.rr(is_relevant)\n ndcg_[i] += metrics.ndcg(ranked_items, relevant_items, relevance=relevant_data, at=at)\n\n if (self.eval_bins):\n if (not rec_key in pop_bins_by_recommender.keys()):\n pop_bins_by_recommender[rec_key] = np.zeros(self.nbins, dtype=np.int32)\n\n pop_bins_by_recommender[rec_key] += self.check_ranked_in_bins(ranked_list=ranked_items,rec_key=rec_key)\n\n i += 1\n\n # Increase the number of evaluations performed.\n n_eval += 1\n\n # Recommender evaluation.\n i = 0\n for rec_key in recommenders_to_evaluate:\n self.rec_evals[rec_key]['RMSE'].append(rmse_[i] / n_eval)\n self.rec_evals[rec_key]['ROC_AUC'].append(roc_auc_[i] / n_eval)\n self.rec_evals[rec_key]['Precision'].append(precision_[i] / n_eval)\n self.rec_evals[rec_key]['Recall'].append(recall_[i] / n_eval)\n self.rec_evals[rec_key]['MAP'].append(map_[i] / n_eval)\n self.rec_evals[rec_key]['MRR'].append(mrr_[i] / n_eval)\n self.rec_evals[rec_key]['NDCG'].append(ndcg_[i] / n_eval)\n\n if (self.eval_bins):\n self.rec_evals[rec_key]['item_pop_bin'].append(pop_bins_by_recommender[rec_key])\n\n i += 1", "title": "" }, { "docid": "8888c2e6bbabc0deae972974aa849725", "score": "0.5512753", "text": "def administer(self): \n score = 0 \n for question in self.questions:\n if question.ask_and_evaluate() == True:\n score += 1\n return float(score)/len(self.questions)", "title": "" }, { "docid": "90e9f1b4f69d4b0e0e323478729ba438", "score": "0.5504636", "text": "def cal_user_similarity_best(self) -> Dict[str, Dict[str, float]]:\n similarity: Dict[str, Dict[str, float]] = {} # the result to return.\n item_users: Dict[str, List[str]] = {} # The inverted table, e.g. for each item, summary the users who rated.\n N: Dict[str, int] = {} # Count the num of items rated by each user.\n C: Dict[str, Dict[str, int]] = {} # Count the num of intersect items between each pair of user.\n\n # Step1: Generate inverted table\n for user, items in self.user_scores.items():\n for item, score in items.items():\n item_users.setdefault(item, [])\n if score > 0:\n item_users[item].append(user)\n\n # Step2: Summary to N and C\n for item, users in item_users.items():\n for u in users:\n N.setdefault(u, 0)\n N[u] += 1\n C.setdefault(u, {})\n for v in users:\n C[u].setdefault(v, 0)\n if v == u:\n continue\n C[u][v] += 1 / math.log(1 + len(users)) # Hot item punishment, the only difference from `better`.\n\n # Step3: Compute similarity.\n for u, related_users in C.items(): # loop on each user\n similarity.setdefault(u, {})\n for v, cuv in related_users.items():\n if v == u:\n continue\n similarity[u][v] = cuv / ((N[u] * N[v]) ** 0.5)\n\n return similarity", "title": "" }, { "docid": "010159b8b3253a8b6c7e136fe554c75f", "score": "0.54657614", "text": "def calc_schofield_multiplier(gender, occupation_activity, non_occupation_activity):\n if gender.lower() == 'male':\n if occupation_activity == 1 and non_occupation_activity == 1:\n schofield_multiplier = 1.4\n elif occupation_activity == 1 and non_occupation_activity == 2:\n schofield_multiplier = 1.5\n elif occupation_activity == 1 and non_occupation_activity == 3:\n schofield_multiplier = 1.6\n elif occupation_activity == 2 and non_occupation_activity == 1:\n schofield_multiplier = 1.6\n elif occupation_activity == 2 and non_occupation_activity == 2:\n schofield_multiplier = 1.7\n elif occupation_activity == 2 and non_occupation_activity == 3:\n schofield_multiplier = 1.8\n elif occupation_activity == 3 and non_occupation_activity == 1:\n schofield_multiplier = 1.7\n elif occupation_activity == 3 and non_occupation_activity == 2:\n schofield_multiplier = 1.8\n elif occupation_activity == 3 and non_occupation_activity == 3:\n schofield_multiplier = 1.9\n return schofield_multiplier\n elif gender.lower() == 'female':\n if occupation_activity == 1 and non_occupation_activity == 1:\n schofield_multiplier = 1.4\n elif occupation_activity == 1 and non_occupation_activity == 2:\n schofield_multiplier = 1.5\n elif occupation_activity == 1 and non_occupation_activity == 3:\n schofield_multiplier = 1.6\n elif occupation_activity == 2 and non_occupation_activity == 1:\n schofield_multiplier = 1.5\n elif occupation_activity == 2 and non_occupation_activity == 2:\n schofield_multiplier = 1.6\n elif occupation_activity == 2 and non_occupation_activity == 3:\n schofield_multiplier = 1.7\n elif occupation_activity == 3 and non_occupation_activity == 1:\n schofield_multiplier = 1.5\n elif occupation_activity == 3 and non_occupation_activity == 2:\n schofield_multiplier = 1.6\n elif occupation_activity == 3 and non_occupation_activity == 3:\n schofield_multiplier = 1.7\n return schofield_multiplier", "title": "" }, { "docid": "587b7fd6e1c85f69dc47bdca4545b87c", "score": "0.5450744", "text": "def user_item_score(self, user: str, item: str) -> float:\n score = 0.0\n user_sim = self.users_sim[user]\n for user_, sim in user_sim.items():\n score += sim * self.user_scores[user_][item]\n return score", "title": "" }, { "docid": "ca7929532e0961e56cb001ca104756e8", "score": "0.5449005", "text": "def cal_user_similarity_better(self) -> Dict[str, Dict[str, float]]:\n similarity: Dict[str, Dict[str, float]] = {} # the result to return.\n item_users: Dict[str, List[str]] = {} # The inverted table, e.g. for each item, summary the users who rated.\n N: Dict[str, int] = {} # Count the num of items rated by each user.\n C: Dict[str, Dict[str, int]] = {} # Count the num of intersect items between each pair of user.\n\n # Step1: Generate inverted table\n for user, items in self.user_scores.items():\n for item, score in items.items():\n item_users.setdefault(item, [])\n if score > 0:\n item_users[item].append(user)\n\n # Step2: Summary to N and C\n for item, users in item_users.items():\n for u in users:\n N.setdefault(u, 0)\n N[u] += 1\n C.setdefault(u, {})\n for v in users:\n C[u].setdefault(v, 0)\n if v == u:\n continue\n C[u][v] += 1\n\n # Step3: Compute similarity.\n for u, related_users in C.items(): # loop on each user\n similarity.setdefault(u, {})\n for v, cuv in related_users.items():\n if v == u:\n continue\n similarity[u][v] = cuv / ((N[u] * N[v]) ** 0.5)\n\n return similarity", "title": "" }, { "docid": "c85540702cae238e1091814596fa5c7b", "score": "0.5447496", "text": "def get_rating(self, instance):\n rate_sum, rate_count = 0, 0\n for rev in instance.reviews.all():\n rate_sum += rev.rate\n rate_count += 1\n return round(rate_sum / rate_count, 2) if rate_count else None", "title": "" }, { "docid": "7a97f04f9f06ebb3b3d50fe6c28b9ecc", "score": "0.5447448", "text": "def calculate_average_utility(self, unused_t):\n if self.is_active:\n summe = 0\n for ind in self.individuals:\n summe += ind.utility\n self.average_utility = summe / self.population", "title": "" }, { "docid": "8f295d1aed3aa79c6b32f0e8a0c14382", "score": "0.5426131", "text": "def predict_rating(user_id, item_id):\n user_preference = latent_user_preferences[user_id]\n item_preference = latent_item_features[item_id]\n return user_preference.dot(item_preference)", "title": "" }, { "docid": "ce5b5dc42063598abfe8c1df25ec1aae", "score": "0.542379", "text": "def evaluation(self,indiv):\r\n\t\t################################\r\n\t\t# computing individual benefit #\r\n\t\t################################\r\n\t\tBonus = percent(indiv.gene_relative_intensity('favourable') * self.Parameter('IndividualBenefit'))\r\n\t\tBonus += percent(self.CollectiveAsset * self.Parameter('CollectiveBenefit'))\r\n\t\tindiv.score(Bonus, FlagSet=False) # Bonus is added to Score \r", "title": "" }, { "docid": "ef361a9ee1a535101d5687b99092a1bf", "score": "0.54117924", "text": "def get_rating(self):\n return self.__rating", "title": "" }, { "docid": "6678a6aefcc219733fc97a7416edac76", "score": "0.5408331", "text": "def administer(self):\n\n score = 0.00 # set to a float\n\n # Call the method on the \n for question in self.questions_list:\n num_corr_response = question.ask_and_evaluate()\n if num_corr_response == True:\n score += 1\n\n return score / len(self.questions_list) * 100", "title": "" }, { "docid": "489971a7af4289306b214948ce337240", "score": "0.5403823", "text": "def recalculate_user(user_ratings, model_file_path='files/model.sav', sparse_user_item_file_path='files/sparse_user_item.npz'):\n alpha = 40\n m = load_npz(sparse_user_item_file_path)\n n_users, n_movies = m.shape\n\n ratings = [alpha for i in range(len(user_ratings))]\n\n m.data = np.hstack((m.data, ratings))\n m.indices = np.hstack((m.indices, user_ratings))\n m.indptr = np.hstack((m.indptr, len(m.data)))\n m._shape = (n_users + 1, n_movies)\n\n with open(model_file_path, 'rb') as pickle_in:\n model = pickle.load(pickle_in)\n\n recommended, _ = model.recommend(n_users, m[n_users], recalculate_user=True)\n\n return recommended, map_movies(recommended)", "title": "" }, { "docid": "4312f10af283244ae601da64f5b0c2fc", "score": "0.53949004", "text": "def recommend(self, user: str) -> Dict[str, float]:\n return {item: self.user_item_score(user, item) for item in self.user_non_score_items[user]}", "title": "" }, { "docid": "6daf41e2a376a8fe5fb7c6af59eac024", "score": "0.5394386", "text": "def __ratingresults_():", "title": "" }, { "docid": "9f1a64feffcce98bae940cba5b571a0e", "score": "0.53740144", "text": "def armor_rating(actor):\n ## Note that armor is pre-scaled by Guild's armor type skills * level\n armor = actor.get_total_ac()\n bonus = stat_bonus(actor, 'precision')\n return armor * bonus", "title": "" }, { "docid": "06774fb429a2fc300ee4a87a48a194df", "score": "0.53560036", "text": "def cal_user_similarity(self) -> Dict[str, Dict[str, float]]:\n similarity: Dict[str, Dict[str, float]] = {}\n user_items: Dict[str, Set[str]] = {user: set([item for item, rating in item_ratings.items() if rating > 0])\n for user, item_ratings in self.user_scores.items()}\n for u, u_items in user_items.items():\n similarity.setdefault(u, {})\n for v, v_items in user_items.items():\n if u == v:\n continue\n sim = self._cal_user_similarity(u_items, v_items)\n similarity[u][v] = sim\n return similarity", "title": "" }, { "docid": "c1168be080591adc14c99677befb2425", "score": "0.535189", "text": "def evaluate(true_ratings, predicted_ratings, func=\"mae\"):\n assert len(true_ratings) == len(predicted_ratings)\n if func == \"mae\":\n absolute_errors = sum(\n abs(a - b) for a, b in zip(true_ratings, predicted_ratings)\n )\n mae = absolute_errors / len(true_ratings)\n val = mae\n elif func == \"mse\":\n sq_errors = sum((a - b) ** 2 for a, b in zip(true_ratings, predicted_ratings))\n val = sq_errors / len(true_ratings)\n return val", "title": "" }, { "docid": "ec3563bc6562c6a0c5749ab748bfc5ce", "score": "0.5339137", "text": "def _recommend(self, user_id, item_id):\n p = np.mat(self.p.ix[user_id].values)\n q = np.mat(self.q.ix[item_id].values).T\n r = (p * q).sum()\n logit = 1.0 / (1 + exp(-r))\n return logit", "title": "" }, { "docid": "2d9baafbf6dd5a3e5556f4c870154422", "score": "0.53352696", "text": "def get_satisfaction_rate(uid):\n\n x = list(db_shop.find({\"tg_uid\": ObjectId(uid), \"tour_reviews\": {\"$ne\": \"null\"}}, {\"_id\": 0, \"tour_reviews\": 1}))\n x = [x[i] for i in range(len(x)) if len(x[i]['tour_reviews']) != 0]\n l = []\n for listing in x:\n # print(listing)\n d = {}\n for review in listing['tour_reviews']:\n # print(review)\n booking_id = review['booking']\n # print(booking_id)\n book = list(db_transactions.find({'booking': ObjectId(booking_id)}))[0]\n date = f\"{book['month_paid']}-{book['year_paid']}\"\n if 'date' in d:\n d['stars'].append(int(review['stars']))\n else:\n d['date'] = date\n d['stars'] = [int(review['stars'])]\n\n d['stars'] = mean(d['stars'])\n l.append(d)\n\n l.sort(key=lambda x: datetime.strptime(x['date'], '%m-%Y'))\n\n if len(l) != 6:\n for _ in range(6 - len(l)):\n l.insert(0, {'stars': 0})\n\n return l", "title": "" }, { "docid": "9f3d8e7aaf76265eaadfb83e87820e61", "score": "0.5331512", "text": "def rating(self):\n return self._row[7] or 0", "title": "" }, { "docid": "91e46dcd87526da0fbb1905101eec2d2", "score": "0.53236645", "text": "def __average_rating(self):\n self.__rating = 0\n for review in self.__review_list:\n self.__rating += review.rating\n self.__rating /= len(self.__review_list)", "title": "" }, { "docid": "4441bb143434de376212e7babeddee52", "score": "0.5320145", "text": "def calculate_significance_weighing_factor(userAid, userUid, ratings, amount=None):\n if amount is None:\n if ratings is None:\n raise ValueError(\"[Error] 'ratings' is none! Cannot continue with the method!\")\n common_ratings, amount = utils.find_common_ratings(userAid, userUid, ratings)\n return float(min(CUT_OFF, amount))/float(CUT_OFF)", "title": "" }, { "docid": "30b495a3c9c125912510c2d14d437c77", "score": "0.531801", "text": "def score(self, predictions):\n\n df = pd.concat([predictions,\n self.target], axis=1)\n\n g = df.groupby('user')\n\n top_5 = g.rating.transform(\n lambda x: x >= x.quantile(.95)\n )\n\n return self.target[top_5==1].mean()", "title": "" }, { "docid": "faf6a76e3874dc452877357a87ddf032", "score": "0.53024733", "text": "def satisfactionMetrics():\n\tprint(\"For purchasers in what period?\")\n\tstartDate, endDate = inputDate()\n\tbought = data.loc[data.boughtTime.between(startDate, endDate)]\n\n\tmeanReg = bought.satisfactionFeedbackIntro.mean()\n\tFFsubset = bought.loc[bought.upliftRatingatFF.notnull() == True]\n\tmeanFF = FFsubset.upliftRatingatFF.mean()\n\tTEsubset = bought.loc[bought.upliftRatingatTE.notnull() == True]\n\tif bought.upliftRatingatTE.notnull().sum() == 0:\n\t\tmeanTE = 0\n\telse:\n\t\tmeanTE = TEsubset.upliftRatingatTE.mean()\n\n\tprint(\"Mean Satisfaction Scores (Intro 1-5, others 1-10)\")\n\tprint(\"1:\", round(meanReg))\n\tprint(\"2:\", round(meanFF), \"(\", FFsubset.shape[0], \"total entries)\")\n\tprint(\"8:\", round(meanTE), \"(\", TEsubset.shape[0], \"total entries)\")\n\n\tinput()", "title": "" }, { "docid": "4734434cab6ee5845b69a29c1152eae8", "score": "0.5295617", "text": "def main() -> None:\n user = User()\n\n # for i in range(10):\n # user.add_product()\n [(user.add_product(250)) for _ in range(10)]\n\n health = user.calculate_health_score(user.user_products)\n calories = user.calculate_calorie_score(user.user_products)\n\n print(\"{}\\n{}\\n{}\".format(health, calories,\n user.calculate_final_score()))", "title": "" }, { "docid": "95ec6d6d30e8bff41583dae9fe0289d1", "score": "0.52831197", "text": "def calculate_final_score(self) -> float:\n score = self.calculate_health_score(self.user_products)\n kcal_score = self.calculate_calorie_score(self.user_products)\n\n return round((0.7 * kcal_score) + (0.3 * score), 1)", "title": "" }, { "docid": "04be3f2ef5b62a5c09c88d3107e2677b", "score": "0.52796715", "text": "def advisability(self):\r\n advisability = self._temperature_factor() + self._rain_factor()\r\n\r\n if advisability < -5:\r\n advisability = -5\r\n elif advisability > 5:\r\n advisability = 5\r\n\r\n return advisability", "title": "" }, { "docid": "dc259fb6ea63b659ccbd8943f7e82b8b", "score": "0.52792066", "text": "def predictRating(self, userId, itemId, similarity_threshold=DEFAULT_SIM, subset_fuzziness=0.8):\n # Get the list of items already rated by userId\n relevantSet = set()\n ratingsSum = 0\n ratingsCount = 0\n for item, rating in self.ratingsIndex[userId]:\n relevantSet.add(item)\n ratingsSum += rating\n ratingsCount += 1\n relevantSet.add(itemId)\n ratingsMean = float(ratingsSum) / float(ratingsCount)\n # Find users that rated the same items\n ratersList = set()\n for user, item in self.ratingsIndex.items():\n potentialSet = {item[0] for item in item}\n if len(relevantSet.intersection(potentialSet)) > int(subset_fuzziness * len(relevantSet)) and user != userId:\n # print(len(relevantSet.intersection(potentialSet)), int(subset_fuzziness * len(relevantSet)), user != userId)\n # print('Yay!')\n ratersList.add(user)\n # Compute pearson similarities ( pearson was recommended in the lecture )\n similarities = {}\n vector1 = [rating[1] for rating in self.ratingsIndex[userId]]\n for rater in ratersList:\n vector2 = [rating[1] for rating in self.ratingsIndex[rater]]\n similarities[rater] = self.pearsonSimilarity(vector1, vector2)\n # Neighbor selection\n neighbors = []\n for user, similarity in similarities.items():\n # Select only users with more than 80% similarity by default\n if similarity > similarity_threshold:\n neighbors.append(user)\n # Check if neighbors list is empty\n if not neighbors:\n return \"No neighbors found, try to change similarity value\"\n # Value prediction\n similaritySum = 0.0\n neighborsMean = {}\n itemIdRatings = {}\n for user in neighbors:\n similaritySum += similarities[user]\n neighborSum = 0\n neighborCount = 0\n for item, rating in self.ratingsIndex[user]:\n if item in relevantSet:\n if item == itemId:\n itemIdRatings[user] = rating\n neighborSum += rating\n neighborCount += 1\n if neighborCount != 0:\n neighborsMean[user] = float(neighborSum) / float(neighborCount)\n else:\n return \"Error: Cannot Divide by 0\"\n # Calculating K\n k = 1/similaritySum\n predSum = 0\n # Calculating SubSum\n for user in neighbors:\n similarity = similarities[user]\n mean = neighborsMean[user]\n if user in itemIdRatings:\n rating = itemIdRatings[user]\n predSum += similarity * (rating - mean)\n return ratingsMean + k * predSum", "title": "" }, { "docid": "57b5c77de650244ec6102d5a1ccb4b89", "score": "0.5274964", "text": "def getGradeAndAccuracyScore(turthMaskTotal,userCorrectMaskTotal,userOverlapTotal):\n grade = (userCorrectMaskTotal * 100) / turthMaskTotal\n #accuracyScore = ((grade/100) * userOverlapTotal) -userOverlapTotal\n accuracyScore = userOverlapTotal*-1\n #print(userOverlapTotal)\n #if((grade/100) == 1):\n # accuracyScore = -userOverlapTotal\n return grade, accuracyScore", "title": "" }, { "docid": "7d944d4d6e46cfcd2c244de9b18fc86a", "score": "0.52698654", "text": "def rating(self):\n \n ratings = [r.rating for r in self.review_set.all()]\n if len(ratings) == 0:\n return 0\n \n return round(sum(ratings)/len(ratings),1)", "title": "" }, { "docid": "9d948c85ac3c8915b6019bc8d940170a", "score": "0.5266056", "text": "def expected_rating(rating_dist):\n runsum = 0\n for i in [1, 2, 3, 4, 5]:\n runsum += rating_dist[i - 1] * i\n return runsum", "title": "" }, { "docid": "9d948c85ac3c8915b6019bc8d940170a", "score": "0.5266056", "text": "def expected_rating(rating_dist):\n runsum = 0\n for i in [1, 2, 3, 4, 5]:\n runsum += rating_dist[i - 1] * i\n return runsum", "title": "" }, { "docid": "5b61b44d32bbf6e8aef877b467afeeaf", "score": "0.5250392", "text": "def get_rating(self):\n return Decimal(self.rating).normalize()", "title": "" }, { "docid": "ae0895ed288119221d03088b08654fbf", "score": "0.52367014", "text": "def defense_rating(actor, tutor=True):\n if tutor:\n skillup_check(actor, 'defense')\n base = actor.get_skill('defense')\n bonus = stat_bonus(actor, 'brawn')\n armor = armor_rating(actor)\n return base * bonus + armor", "title": "" }, { "docid": "10e59b13c0d6940c09fc3d86489f2dec", "score": "0.5235142", "text": "def get_review_rating(self):\n\n review_rating = self.soupe.find_all(\"td\")[6].text\n return review_rating", "title": "" }, { "docid": "6378d1f1dea8175a6c525be0c42e2412", "score": "0.522988", "text": "def _predict_score(self, u, i, cond=True):\n\n if self.baseline:\n rui = self.train_set[\"mean_value\"] + self.bu[u] + self.bi[i] + np.dot(self.p[u], self.q[i])\n else:\n rui = self.train_set['mean_value'] + np.dot(self.p[u], self.q[i])\n\n if cond:\n if rui > self.train_set[\"max_value\"]:\n rui = self.train_set[\"max_value\"]\n elif rui < self.train_set[\"min_value\"]:\n rui = self.train_set[\"min_value\"]\n\n return rui", "title": "" }, { "docid": "5686c46e7cc1fa090024ea0e4213ac6f", "score": "0.5223183", "text": "def rating(self) -> AssetScreenerCreditStandardAndPoorsRatingOptions:\n return self.__rating", "title": "" }, { "docid": "f4c0f379b280814a65e0db6ee67caf17", "score": "0.5220424", "text": "def get_user_score(self, username):\n user = User.nodes.get(username=username)\n completed_dares = user.completed_dare.all()\n total = 0\n for dare in completed_dares:\n total += dare.score\n return total", "title": "" }, { "docid": "c8169d0031c4617c7952821ff1401d83", "score": "0.5214937", "text": "def __pred(self, i, u, normalized=1):\r\n # Step 1: find all items which rated u\r\n ids = np.where(self.Y_data[:, 1] == u)[0].astype(np.int32)\r\n # Step 2:\r\n items_rated_u = (self.Y_data[ids, 0]).astype(np.int32)\r\n # Step 3: find similarity btw the current items and others\r\n # which already rated u\r\n sim = self.S[i, items_rated_u]\r\n # Step 4: find the k most similarity items\r\n a = np.argsort(sim)[-self.k:]\r\n # and the corresponding similarity levels\r\n nearest_s = sim[a]\r\n # How did each of 'near' items rated user u\r\n r = self.Ybar[u, items_rated_u[a]]\r\n if normalized:\r\n # add a small number, for instance, 1e-8, to avoid dividing by 0\r\n return (r * nearest_s)[0] / (np.abs(nearest_s).sum() + 1e-8)\r\n\r\n return (r * nearest_s)[0] / (np.abs(nearest_s).sum() + 1e-8) + self.mu[i]", "title": "" }, { "docid": "39b5db014741969dd857e66313ed6c28", "score": "0.52142763", "text": "def rating(self, item, user, as_int=True):\n rating = self.query_associated(self.rating_assoc, item).filter_by(user=user).first()\n # most common case is assumed to be 'get the number'\n if not as_int:\n return rating\n # get the int value if there's a rating\n return rating.rating if rating is not None else None", "title": "" }, { "docid": "f26d6386c1248f221e971bf488d947d5", "score": "0.521088", "text": "def __check_ratings__(self, prediction):\n maximum = 5.0\n minimum = 0.0\n if prediction < minimum:\n prediction = minimum\n elif prediction > maximum:\n prediction = maximum\n return prediction", "title": "" }, { "docid": "7f7e6cb3b73146e362fcec357bc74f36", "score": "0.52077454", "text": "def calculate_mean_ratings(userID, ratings_by_user=None):\n # To avoid excessive computation during run-time, the user ratings are provided by parameter.\n # If it's not none, then it can be used, otherwise we need to calculate it.\n if ratings_by_user is not None:\n return np.mean([rating['rating'] for rating in ratings_by_user])\n else:\n if globals.RATINGS_BY_USER is not None:\n tmp = globals.RATINGS_BY_USER[userID]\n return np.mean([rating['rating'] for rating in tmp])\n else:\n raise ValueError(\"[Error] RATINGS_BY_USER is none! Cannot continue with the method!\")", "title": "" }, { "docid": "5b07ad6132056c42c7beba3d9d442765", "score": "0.52067983", "text": "def get_utility(self, x):\n # Distribute input arguments\n x1, x2 = x\n\n alpha = self.alpha\n\n # Utility calculation\n u = (x1 ** alpha) * (x2 ** (1.0 - alpha))\n\n # Finishing\n return u", "title": "" }, { "docid": "7ba9fe1414e0570b0b92c68b65b42302", "score": "0.52047604", "text": "def _get_rating(self):\n self._cr.execute(\"\"\"\n SELECT res_id, avg(rating)\n FROM mail_message\n WHERE res_id IN %s AND model = 'product.template' AND rating > 0\n GROUP BY res_id\"\"\", (self._ids,))\n res = dict(self._cr.fetchall())\n for record in self:\n record.rating = res.get(record.id)", "title": "" }, { "docid": "62576ec21598661170b3e6ee65d0413d", "score": "0.5194102", "text": "def compare_rating(self, opponent):\n return (1 + 10 ** ((opponent.rating - self.rating) / 400.0)) ** -1", "title": "" }, { "docid": "3736e0458a4fa1b36946a8063714ffef", "score": "0.51857895", "text": "def fetch_rating(self):\n records = VoteRecord.fetch_all(recipe_id=self.ID)\n sum_ = sum([record.value for record in records])\n return sum_", "title": "" }, { "docid": "87146708b2b8e348554aa488e53af56e", "score": "0.51794016", "text": "def __init__(\n self, ratings_filename=None, \n user_min_ratings=10, restaurant_min_ratings=10,\n K=10\n ):\n\n self.K = K\n\n if ratings_filename is None:\n ratings = run_query(RATINGS_QUERY, use_prefixes=False)\n else:\n ratings = pd.read_csv(ratings_filename)\n\n # Before filtering, let's compute the mean ratings for all restaurants\n # This will be our \"prior knowledge\" that we will use when we don't know\n # the score to give to a restaurant.\n # We won't use the mean ratings directly, but the mean \"likeability\"\n # of a restaurant when we have standardized every user rating\n Xprior = ratings.copy()\n agg = Xprior.groupby('user').rating.agg(['mean', 'std']).reset_index()\n\n Xprior = Xprior.merge(agg)\n Xprior['rating'] = (Xprior['rating'] - Xprior['mean']) / Xprior['std']\n\n self.prior_ratings = Xprior.groupby('restaurant').rating.mean()\n self.restaurants = list(self.prior_ratings.index)\n\n\n # Filter the dataset\n restaurant_counts = ratings.groupby('restaurant').size()\n ratings = ratings[\n ratings.restaurant.isin(\n set(\n restaurant_counts[\n restaurant_counts >= restaurant_min_ratings\n ].index\n )\n )\n ].copy()\n\n user_counts = ratings.groupby('user').size()\n ratings = ratings[\n ratings.user.isin(\n set(user_counts[user_counts >= user_min_ratings].index)\n )\n ].copy()\n\n ratings = ratings.pivot_table(\n index='user', columns='restaurant', values='rating', aggfunc='mean'\n )\n\n self.nusers, self.nitems = ratings.shape\n\n X = ratings.values.copy()\n\n means = np.nanmean(X, axis=1)\n X -= means.reshape((self.nusers, 1))\n\n stds = np.nanstd(X, axis=1)\n X /= stds.reshape((self.nusers, 1))\n\n voted = ~np.isnan(X)\n\n # Assign results to the instance\n self.ratings = ratings\n self.X = X\n self.voted = voted\n self.means = means\n self.stds = stds\n\n self._users = list(ratings.index)\n self._items = list(ratings.columns)", "title": "" }, { "docid": "80220651b539479d85b45651fff3f6cd", "score": "0.5176807", "text": "def administer(self):\n\n score = 0\n\n for question in self.questions:\n if question.ask_question():\n score += 1\n\n return float(score) / len(self.questions)", "title": "" }, { "docid": "420e57f38c6f6d4900ad62c7e3f01a3b", "score": "0.51710355", "text": "def _cal_bonus(self):\n score = self.pattern.additional_score\n for search_path in self.recursion_path:\n score += search_path.pattern.additional_score\n return score", "title": "" }, { "docid": "4153b058f2f6352814dce53933e7e76c", "score": "0.5164078", "text": "def compute_average_user_ratings(user_ratings):\n ave_ratings = {}\n for user in user_ratings:\n user_dict = user_ratings[user]\n values = user_dict.values()\n ave_ratings[user] = sum(values)/float(len(values))\n return ave_ratings", "title": "" }, { "docid": "c343072364a18411236c9aa241fb9f52", "score": "0.51563877", "text": "def _MUF(self):\n return log10(self.rating**self.rating)", "title": "" }, { "docid": "e624f66efa5238fa2bde23cceb1ac7f8", "score": "0.51539934", "text": "def _calculate_call_cost(self, userid):\n return max(self.contrib.values()) - self.contrib[userid]", "title": "" }, { "docid": "13dbbf36ee096ec6f3e74cf2fdda176c", "score": "0.5153073", "text": "def recommend(self, user):\n recommendations = {}\n # first get list of users ordered by nearness\n nearest = self.computeNearestNeighbor(user)\n #\n # now get the ratings for the user\n #\n userRatings = self.data[user]\n #\n # determine the total distance\n totalDistance = 0.0\n for i in range(self.k):\n totalDistance += nearest[i][1]\n # now iterate through the k nearest neighbors\n # accumulating their ratings\n for i in range(self.k):\n # compute slice of pie \n weight = nearest[i][1] / totalDistance\n # get the name of the person\n name = nearest[i][0]\n # get the ratings for this person\n neighborRatings = self.data[name]\n # get the name of the person\n # now find bands neighbor rated that user didn't\n for artist in neighborRatings:\n if not artist in userRatings:\n if artist not in recommendations:\n recommendations[artist] = (neighborRatings[artist]\n * weight)\n else:\n recommendations[artist] = (recommendations[artist]\n + neighborRatings[artist]\n * weight)\n # now make list from dictionary\n recommendations = list(recommendations.items())\n recommendations = [(self.convertProductID2name(k), v)\n for (k, v) in recommendations]\n # finally sort and return\n recommendations.sort(key=lambda artistTuple: artistTuple[1],\n reverse = True)\n # Return the first n items\n return recommendations[:self.n]", "title": "" }, { "docid": "8e2096282f8403b5d63dbcfbd88a320a", "score": "0.5143201", "text": "def calculate_item_mean_centered_rating(ratings_by_users, avgRatingItem):\n sum = 0.0\n for rating in ratings_by_users:\n sum = sum + pow(rating['rating'] - avgRatingItem, 2)\n return sum", "title": "" }, { "docid": "43edb58a0ad2fa21a822d725a87ab7a7", "score": "0.51202434", "text": "def efficiencyRoleLevel(self):\r\n factor, addition = (1, 0)\r\n if self.isInTank:\r\n factor, addition = self.descriptor.efficiencyOnVehicle(self.vehicleDescr)\r\n return round(self.roleLevel * factor)", "title": "" }, { "docid": "777c4263bacd9512969d1c133a8d625f", "score": "0.51181704", "text": "def dodge_rating(actor, tutor=True):\n if tutor:\n skillup_check(actor, 'dodge')\n base = actor.get_skill('dodge')\n bonus = stat_bonus(actor, 'cunning')\n return base * bonus", "title": "" }, { "docid": "32c0ce304ca4a3fee9f72872dac83b1e", "score": "0.51154125", "text": "def evaluation_collaborative_svd_model(userId,userOrItem): \r\n movieIdsList= list()\r\n movieRatingList=list()\r\n movieIdRating= pd.DataFrame(columns=['movieId','rating'])\r\n if userOrItem== True:\r\n movieIdsList=getRecommendedMoviesAsperUserSimilarity(userId)\r\n else:\r\n movieIdsList=recommendedMoviesAsperItemSimilarity(user_id)\r\n for movieId in movieIdsList:\r\n predict = svd.predict(userId, movieId)\r\n movieRatingList.append([movieId,predict.est])\r\n movieIdRating = pd.DataFrame(np.array(movieRatingList), columns=['movieId','rating'])\r\n count=movieIdRating[(movieIdRating['rating'])>=3]['movieId'].count()\r\n total=movieIdRating.shape[0]\r\n hit_ratio= count/total\r\n return hit_ratio,", "title": "" }, { "docid": "5eecc71d27e61d9be37b2eb1f4c16c35", "score": "0.5113229", "text": "def computeError(predictedRDD, actualRDD):\r\n # Transform predictedRDD into the tuples of the form ((UserID, MovieID), Rating)\r\n predictedReformattedRDD = predictedRDD.map(lambda (UserID, MovieID, Rating): ((UserID, MovieID),Rating))\r\n\r\n # Transform actualRDD into the tuples of the form ((UserID, MovieID), Rating)\r\n actualReformattedRDD = actualRDD.map(lambda (UserID, MovieID, Rating): ((UserID, MovieID),Rating))\r\n\r\n # Compute the squared error for each matching entry (i.e., the same (User ID, Movie ID) in each\r\n # RDD) in the reformatted RDDs using RDD transformtions - do not use collect()\r\n squaredErrorsRDD = (predictedReformattedRDD\r\n .join(actualReformattedRDD).map(lambda ((u,m),(p,a)): float(math.pow((p-a),2))))\r\n\r\n # Compute the total squared error - do not use collect()\r\n totalError = squaredErrorsRDD.sum()\r\n\r\n # Count the number of entries for which you computed the total squared error\r\n numRatings = squaredErrorsRDD.count()\r\n\r\n # Using the total squared error and the number of entries, compute the RSME\r\n return math.sqrt(float(totalError/numRatings))", "title": "" }, { "docid": "a2fb8126978948fb31505e92791ceef0", "score": "0.51077765", "text": "def compute_measures(tp, fp, fn, tn):\n specificity = tn / (tn + fp)\n sensitivity = tp / (tp + fn)\n return sensitivity, specificity", "title": "" }, { "docid": "42c713f9932963eb6a61daee1a824a20", "score": "0.5104482", "text": "def main():\n args = parse_argument()\n train_file = args['train'][0]\n test_file = args['test'][0]\n train_user_ratings, train_movie_ratings = parse_file(train_file)\n test_user_ratings, test_movie_ratings = parse_file(test_file)\n train_ave_user_ratings = compute_average_user_ratings(train_user_ratings)\n f = open('predictions.txt', 'wt')\n writer = csv.writer(f)\n AE = 0.0\n SE = 0.0\n n = 0\n similarity_dict = {}\n for user_id in test_user_ratings:\n d1 = train_user_ratings[user_id]\n ave_rat1 = train_ave_user_ratings[user_id]\n for movie_id in test_user_ratings[user_id]:\n denom = 0.0\n numer = 0.0\n true_rating = test_user_ratings[user_id][movie_id]\n for movie_user_id in train_movie_ratings[movie_id]:\n if (user_id, movie_user_id) in similarity_dict or (movie_user_id, user_id) in similarity_dict:\n ave_rat2 = train_ave_user_ratings[movie_user_id]\n try:\n similarity = similarity_dict[(user_id, movie_user_id)]\n except:\n similarity = similarity_dict[(movie_user_id, user_id)]\n denom += abs(similarity)\n numer += similarity*(train_movie_ratings[movie_id][movie_user_id] - ave_rat2)\n else:\n d2 = train_user_ratings[movie_user_id]\n ave_rat2 = train_ave_user_ratings[movie_user_id]\n similarity = compute_user_similarity(d1, d2, ave_rat1, ave_rat2)\n similarity_dict[(user_id, movie_user_id)] = similarity\n denom += abs(similarity)\n numer += similarity*(train_movie_ratings[movie_id][movie_user_id] - ave_rat2)\n if denom == 0:\n rating_prediction = ave_rat1\n else:\n rating_prediction = ave_rat1 + numer/denom\n writer.writerow((movie_id, user_id, true_rating, rating_prediction))\n AE += abs(rating_prediction - true_rating)\n SE += (rating_prediction - true_rating)**2\n n += 1\n f.close()\n MAE = AE/n\n RMSE = math.sqrt(SE/n)\n print \"RMSE %0.4f\" % RMSE\n print \"MAE %0.4f\" % MAE", "title": "" }, { "docid": "73377bb30837b9b3c36970cd6dd8c15f", "score": "0.5097652", "text": "def _get_utility(net, gs, acq_func, y_best=0.0):\n # obtain predictive posterior\n distribution = _independent(net.condition(gs))\n\n # obtain utility from vanilla acquisition func\n utility = acq_func(distribution, y_best=y_best)\n \n return utility", "title": "" }, { "docid": "074e75d8e09461667757b4f94bdbc532", "score": "0.5086391", "text": "def compute_rates(self, pred, target):\n pred = tt.ge(pred, 0.5).to(tt.float32)\n tp = (target * pred).sum().to(tt.float32)\n tn = ((1 - target) * (1 - pred)).sum().to(tt.float32)\n fp = ((1 - target) * pred).sum().to(tt.float32)\n fn = (target * (1 - pred)).sum().to(tt.float32)\n\n return tp.item(), tn.item(), fp.item(), fn.item()", "title": "" }, { "docid": "18bd827cf619f05efb380eda95d20522", "score": "0.5082739", "text": "def testUsage(trainDf, testDf, businesses, businessesToIndex, hybridCorrelationWeights, userVectors, pseudoUserVectors, meanUserRatings, ratingCounts, n=10, mode=\"hybrid\"):\r\n # Calculate confusion matrices for each row.\r\n if mode == \"hybrid\":\r\n usage = pseudoUserVectors.apply(lambda row: calculateUsage(row, trainDf, testDf, businesses, hybridCorrelationWeights, userVectors, pseudoUserVectors, meanUserRatings, ratingCounts, n=n, hybrid=True), axis=1)\r\n elif mode == \"content\":\r\n usage = pseudoUserVectors.apply(lambda row: calculateUsage(row, trainDf, testDf, businesses, hybridCorrelationWeights, userVectors, pseudoUserVectors, meanUserRatings, ratingCounts, n=n, contentOnly=True), axis=1)\r\n elif mode == \"collaborative\":\r\n usage = pseudoUserVectors.apply(lambda row: calculateUsage(row, trainDf, testDf, businesses, hybridCorrelationWeights, userVectors, pseudoUserVectors, meanUserRatings, ratingCounts, n=n, hybrid=False), axis=1)\r\n # Aggregate all confusion matrices into one matrix.\r\n confusionFrame = usage.sum()\r\n # Calculate precision, recall and F1-score.\r\n precision = confusionFrame[\"TP\"] / (confusionFrame[\"TP\"] + confusionFrame[\"FP\"])\r\n recall = confusionFrame[\"TP\"] / (confusionFrame[\"TP\"] + confusionFrame[\"FN\"])\r\n fScore = 2 * (precision * recall) / (precision + recall)\r\n return fScore, precision, recall", "title": "" }, { "docid": "ddae2107ee56b930c09db9a0bbba6d01", "score": "0.50817776", "text": "def rating_prediction_user(userAid, itemID, neighborsIDs, neighbors_data):\n if globals.RATINGS_BY_USER is None:\n raise ValueError(\"[Error] RATINGS_BY_USER is none! Cannot continue with the method!\")\n if globals.RATINGS_BY_USER_MAP is None:\n raise ValueError(\"[Error] RATINGS_BY_USER_MAP is none! Cannot continue with the method!\")\n ratings_by_user_A = globals.RATINGS_BY_USER[userAid] # list of ALL ratings by user U\n mean_ratings_A = calculate_mean_ratings(userAid, ratings_by_user_A) # mean ratings of user U\n\n # WATCH OUT! To save time and computation resources, there is no need to calculate the Pearson correlation\n # as many times as it appears in the formula.\n # A numerator and denominator have to be calculated: it would be convenient to collect the corresponding\n # Pearson correlations once and use it either in numerator and denominator later on.\n # If 'neighbors_data' was populated with relevant data, we can obtain it from that dictionary.\n numerator = 0.0\n denominator = 0.0\n # If the target item has not been rated by anyone else, then, we return the mean rating of user A:\n if len(neighborsIDs) == 0:\n return mean_ratings_A\n for userUid in neighborsIDs:\n pearson_value = neighbors_data[userUid]['pearson'] # retrieve Pearson value\n ratings_by_user_U = neighbors_data[userUid]['ratings_by_user'] # retrieve all ratings by user U\n mean_ratings_U = calculate_mean_ratings(userUid, ratings_by_user_U) # mean ratings of user U\n if globals.LOG_STATUS is True:\n print \"Pearson value for user {0} = {1}\".format(userUid, pearson_value)\n # Fetch the rating of user U for item 'itemID':\n rating_u_i = globals.RATINGS_BY_USER_MAP[userUid][itemID]\n numerator = numerator + ((rating_u_i-mean_ratings_U)*pearson_value)\n denominator = denominator + pearson_value # updating the denominator with accumulative Pearson correlation values\n return mean_ratings_A + (float(numerator)/float(denominator))", "title": "" }, { "docid": "8ffb66b7b226f0e95011b6fea62e5793", "score": "0.5064356", "text": "def _compute_predictions(self, user_votes, user_mean, user_std, K):\n\n sims = np.nan_to_num(self._similarities(user_votes))\n predictions = []\n \n for item in range(self.nitems):\n if not np.isnan(user_votes[item]):\n predictions.append(user_votes[item])\n continue\n \n votes = self.voted[:, item]\n \n ratings = self.X[votes, item]\n users = np.arange(self.nusers)[votes]\n sims_f = sims[votes]\n \n neighs = (sims_f ** 2).argsort()[-K:] # take both >0 and <0\n pred = (\n sims_f[neighs].dot(ratings[neighs]) / \n np.abs(sims_f[neighs]).sum()\n )\n \n predictions.append(pred)\n \n pred = pd.Series(predictions, index=self._items)\n # Add all items that could not be predicted due to cold-start\n pred = pred.reindex(self.restaurants) # this adds nans\n\n # For those items where we haven't got a rating, \n # we'll use the prior rating \n # (the mean rating accross the entire dataset)\n # These ratings, however, will be penalized with -1 std\n pred[np.isnan(pred)] = self.prior_ratings[np.isnan(pred)] - 1\n\n # Finally, compute the real ratings (readjusting the normalization)\n pred = (pred * user_std + user_mean)#.round().astype(int)\n \n pred = np.where(pred < 1, 1, pred)\n pred = np.where(pred > 5, 5, pred)\n \n return pd.Series(pred, index=self.restaurants)", "title": "" }, { "docid": "bbe95424914c4a4f189a720f5f08089a", "score": "0.50585186", "text": "def get_similarity(user1: Rating, user2: Rating) -> float:\n shared = 0.0\n for m_id in user1:\n if m_id in user2:\n shared += user1[m_id] * user2[m_id]\n norm1 = 0.0\n for m_id in user1:\n norm1 = norm1 + user1[m_id] ** 2\n norm2 = 0.0\n for m_id in user2:\n norm2 = norm2 + user2[m_id] ** 2\n return (shared * shared) / (norm1 * norm2)", "title": "" }, { "docid": "81b798d77202d1667a7bb93c997c51a5", "score": "0.5050131", "text": "def get_rating(self, i, j):\n prediction = self.b + self.b_u[i] + self.b_i[j] + self.P[i, :].dot(self.Q[j, :].T)\n return prediction", "title": "" }, { "docid": "341cf8dd5e6951eddac51fa5f30f9aa9", "score": "0.5049374", "text": "def _get_rating(mr_val, snt):\n # if the previous sentence ends with a dot\n if snt[-1] != \".\":\n beginning = \" with\"\n else:\n beginning = \" It has\"\n\n if mr_val.isalpha():\n s = \"%s a %s customer rating\" % (beginning, mr_val)\n\n else:\n s = \"%s a customer rating of %s\" % (beginning, mr_val)\n\n return snt + s", "title": "" }, { "docid": "a41febb80800a5c47ef80b4f650c8936", "score": "0.5047668", "text": "def _compute_grade(formula, i, Q, cumul, best, acquired):\n i = int(i)\n if i == 0:\n I = cumul\n elif i == 1:\n I = best\n else:\n I = acquired\n \n Q /= 10\n I /= 100\n formula = \"10 * (%s)\" % formula\n return round(eval(formula.replace(\"^\", \"**\")), 2)", "title": "" }, { "docid": "7ac40228b40dddf0365225bac7c8b047", "score": "0.50356066", "text": "def unit_annotation_score(unit_id, annotation, unit_work_annotation_dict, wqs):\n\n uas_numerator = 0.0\n uas_denominator = 0.0\n\n worker_ids = unit_work_annotation_dict[unit_id]\n for worker_id in worker_ids:\n uas_numerator += worker_ids[worker_id][annotation] * wqs[worker_id]\n uas_denominator += wqs[worker_id]\n if uas_denominator < SMALL_NUMBER_CONST:\n uas_denominator = SMALL_NUMBER_CONST\n return uas_numerator / uas_denominator", "title": "" }, { "docid": "d47debcef97081f664f9d933e47e5946", "score": "0.5032895", "text": "def netflix_eval (movie_id, user_id) :\n assert type(movie_id) is str\n assert type(user_id) is str\n x = average_movie_rating_cache[movie_id]\n y = average_viewer_rating_cache[user_id]\n assert x > 0\n assert y > 0\n z = round((0.91 * (x + y) - 2.88), 1)\n return 1.0 if (z < 1.0) else 5.0 if (z > 5.0) else z", "title": "" }, { "docid": "999105cdf213c272ef8cb0be572e5206", "score": "0.5030382", "text": "def computeError(predictedRDD, actualRDD):\n # Transform predictedRDD into the tuples of the form ((UserID, bookId), Rating)\n \n predictedReformattedRDD = predictedRDD.map(lambda x:((x[0],x[1]),x[2]))\n\n # Transform actualRDD into the tuples of the form ((UserID, MovieID), Rating)\n \n actualReformattedRDD = actualRDD.map(lambda x:((x[0],x[1]),x[2]))\n \n # Compute the squared error for each matching entry (i.e., the same (User ID, Movie ID) in each\n # RDD) in the reformatted RDDs using RDD transformtions - do not use collect()\n #squaredErrorsRDD = (predictedReformattedRDD.<FILL IN>)\n squaredErrorsRDD = (predictedReformattedRDD.join(actualReformattedRDD).map(lambda((x,y),z):z).map(lambda (x,y):(x-y)**2))\n # Compute the total squared error - do not use collect()\n totalError = squaredErrorsRDD.sum()\n\n # Count the number of entries for which you computed the total squared error\n numRatings = squaredErrorsRDD.count()\n\n # Using the total squared error and the number of entries, compute the RSME\n \n return (math.sqrt(float(totalError)/numRatings))", "title": "" }, { "docid": "53a0b7fe1c23979ecd163b35b573bcb9", "score": "0.50280786", "text": "def update_ratings(self):\n # Update pages first because we have better priors/initial values for\n # the routes.\n self.update_page_ratings()\n self.update_route_ratings()", "title": "" } ]
cf4de57d473a86eab5179fa5926a1e81
set_car(swig_int_ptr pair, swig_int_ptr value) Stores in the car field of .
[ { "docid": "2d00be2864280cb7b8c6b73722c609bb", "score": "0.67336106", "text": "def set_car(*args, **kwargs):\n return _pmt_swig.set_car(*args, **kwargs)", "title": "" } ]
[ { "docid": "10c6f080f447cf0c29f145940470ac59", "score": "0.5541338", "text": "def setInteger(self, value):", "title": "" }, { "docid": "10c6f080f447cf0c29f145940470ac59", "score": "0.5541338", "text": "def setInteger(self, value):", "title": "" }, { "docid": "10c6f080f447cf0c29f145940470ac59", "score": "0.5541338", "text": "def setInteger(self, value):", "title": "" }, { "docid": "ccbaf50bebb72d872162b3b62c2fd2a4", "score": "0.5441937", "text": "def define(self, sym, val):\n self.inner[sym] = val", "title": "" }, { "docid": "531400cdcbc6f593dd6628ce64b05f7c", "score": "0.5358703", "text": "def __set__(self,instance,value): \n raise TypeError(\"Gouy phase is not possible to set\")", "title": "" }, { "docid": "97825e44485fe51eca35c156bfe0a600", "score": "0.5324856", "text": "def set_tag(self,tag,tag_value):\n for pid, ptclObj in self.ptclC :\n ptclObj.tagsDict[tag] = tag_value", "title": "" }, { "docid": "6c8fa41a944eeda0b1c78a97ff412f28", "score": "0.53243405", "text": "def set_index(self, pair):\n self.index = pair", "title": "" }, { "docid": "2166657529fc8d9eb491480ffd1007a6", "score": "0.5324187", "text": "def __setattr__(self, name, value):\n if name in ('cx', 'cy'):\n value_str = str(int(value))\n self.set(name, value_str)\n else:\n super(CT_SlideSize, self).__setattr__(name, value)", "title": "" }, { "docid": "a50735ddc14fd164558d7fe75eecc6d3", "score": "0.5321478", "text": "def __set__(self, instance, value):", "title": "" }, { "docid": "7707dbaab269153d31b9a2783d136a45", "score": "0.52912176", "text": "def __setitem__( self , index , value ):\n if isinstance( index , IntType):\n self.cNamespace().iset( self, index , value )\n else:\n raise TypeError(\"Index should be integer type\")", "title": "" }, { "docid": "be7dec443ade07a7a7973c72b3dcfc2b", "score": "0.52891076", "text": "def setValue(self, v, ord=\"'XYZ'\"):\n \n pass", "title": "" }, { "docid": "d3199e07e4abb3c1481cfef180c8266b", "score": "0.52877223", "text": "def __set__(self, instance, value):\n if value is None: self.mp = None; return\n self.mp = value[0]\n self.gallat = value[1]", "title": "" }, { "docid": "9595b39849d76fa4d6591bdc39b394bf", "score": "0.526022", "text": "def __set__(self, instance, value):\n from ..error import ValueError\n if value is None: self.nspin, self.ncycles = None, None; return\n if not hasattr(value, '__getitem__'): \n raise ValueError('Incorrect input to spinlock: {0}.'.format(value))\n self.nspin = int(value[0])\n self.ncycles = value[1]", "title": "" }, { "docid": "720cf494e85f3dca83c1a42e6aff5105", "score": "0.52258706", "text": "def setDisplacementParameter(self,key,value):\n\t\tself.param_dict[key] = value", "title": "" }, { "docid": "eefeac19a7d4e56b308d657db37f0a5f", "score": "0.52150273", "text": "def set_cdr(*args, **kwargs):\n return _pmt_swig.set_cdr(*args, **kwargs)", "title": "" }, { "docid": "cf8eaad29b5efd549baec94cdcbd91ae", "score": "0.52132136", "text": "def _set(self, sec, key, value):\n return self._p.set(sec.name, key, value)", "title": "" }, { "docid": "e3acdaac709e2b15cf1726d30e6f4bfb", "score": "0.5195863", "text": "def set(self, key, value, section, export=True):\n self[key] = self.Pair(value, section, export)", "title": "" }, { "docid": "45f2ced3254a81b92fb03187afbab3cf", "score": "0.51941967", "text": "def assign_car(self, car: Car) -> str | None:\n\n success = self.__slots.acquire(timeout=DEFAULT_TIMEOUT) # so we acquire the semaphore. Notice that we are\n # providing a timeout value in seconds. If the semaphore is not acquired, then the thread will give up and\n # the subsequent call to acquire will return False.\n\n if success: # if we are successful\n token = str(uuid.uuid4()) # we generate a unique identifier using the standard third party library\n self.__space[token] = car # and we will use the token that's provided to mark our car. In Java, this is\n # equivalent to saying map.put(token, car)\n print(f\"{car.owner}'s car was parked successfully. Token: {token}\")\n return token\n else:\n return None", "title": "" }, { "docid": "23c87dc5898a57072c7f33aa49d98e73", "score": "0.5192348", "text": "def put(self, index: int, value: object) -> None:\n ...", "title": "" }, { "docid": "704b4d10c8e67268745b445c647a3f0f", "score": "0.5183795", "text": "def set(self, k, v):", "title": "" }, { "docid": "02b786c1d6b8f9bcd09391db29d76281", "score": "0.5178858", "text": "def __setitem__(self, *args):\n return _pmt_swig.pmt_vector_int16___setitem__(self, *args)", "title": "" }, { "docid": "975ba7e04c2c2a50a9a1f39326d8f3a1", "score": "0.5178035", "text": "def setItem(self, x, y, value):\r\n self.genomeList[x][y] = value", "title": "" }, { "docid": "a87cf8252bdff4d000ebc395482f47a1", "score": "0.5160878", "text": "def __set__(self,instance,value): \n raise TypeError(\"radius is not possible to set\")", "title": "" }, { "docid": "09dac7147e396c241047d64dbe83e672", "score": "0.5158642", "text": "def set(self, atoms, value):\n raise NotImplementedError(\"Has to be implemented in derived classes!\")", "title": "" }, { "docid": "8ac5bd5da3e35a9d183e9411502018d4", "score": "0.51561326", "text": "def set(self, card, *args):\n\n if tag == 'param_card':\n tag = 'slha'\n attr_tag = 'param_card'\n elif tag == 'run_card':\n tag = 'mgruncard' \n attr_tag = 'run_card'\n elif tag == 'proc_card':\n tag = 'mg5proccard' \n attr_tag = 'proc_card'\n elif tag == 'model':\n tag = 'mg5proccard' \n attr_tag = 'proc_card'\n arg = ('model',)\n elif tag == 'generate':\n tag = 'mg5proccard' \n attr_tag = 'proc_card'\n arg = ('generate',)\n elif tag == 'shower_card':\n tag = 'mgshowercard'\n attr_tag = 'shower_card'\n assert tag in ['slha', 'mgruncard', 'mg5proccard', 'shower_card'], 'not recognized'\n \n if not hasattr(self, attr_tag):\n self.charge_card(attr_tag) \n \n card = getattr(self, attr_tag)\n if len(args) ==2:\n if tag == 'mg5proccard':\n card.info[args[0]] = args[-1]\n else:\n card[args[0]] = args[1]\n else:\n card[args[:-1]] = args[-1]", "title": "" }, { "docid": "8ac5bd5da3e35a9d183e9411502018d4", "score": "0.51561326", "text": "def set(self, card, *args):\n\n if tag == 'param_card':\n tag = 'slha'\n attr_tag = 'param_card'\n elif tag == 'run_card':\n tag = 'mgruncard' \n attr_tag = 'run_card'\n elif tag == 'proc_card':\n tag = 'mg5proccard' \n attr_tag = 'proc_card'\n elif tag == 'model':\n tag = 'mg5proccard' \n attr_tag = 'proc_card'\n arg = ('model',)\n elif tag == 'generate':\n tag = 'mg5proccard' \n attr_tag = 'proc_card'\n arg = ('generate',)\n elif tag == 'shower_card':\n tag = 'mgshowercard'\n attr_tag = 'shower_card'\n assert tag in ['slha', 'mgruncard', 'mg5proccard', 'shower_card'], 'not recognized'\n \n if not hasattr(self, attr_tag):\n self.charge_card(attr_tag) \n \n card = getattr(self, attr_tag)\n if len(args) ==2:\n if tag == 'mg5proccard':\n card.info[args[0]] = args[-1]\n else:\n card[args[0]] = args[1]\n else:\n card[args[:-1]] = args[-1]", "title": "" }, { "docid": "2b4f21d8c7751713b7be79c2381aa44e", "score": "0.5149095", "text": "def __setitem__(self, *args):\n return _pmt_swig.pmt_vector_int32___setitem__(self, *args)", "title": "" }, { "docid": "3e5bc1c12e2edd1992f7fa4dda91b48b", "score": "0.51391745", "text": "def SetValue(self, value):", "title": "" }, { "docid": "ee0addd14b94a04530097af2382ef427", "score": "0.5138203", "text": "def set_identification(self, value):", "title": "" }, { "docid": "69adab749194f38c9fc78565b430746a", "score": "0.5123269", "text": "def __setitem__(self,index,value):\r\n id,tp = self._lookup(index)\r\n id=c_int(id)\r\n Env = globals()[\"Env\"]\r\n if tp != 3 and type(value) == type(\"\"):\r\n value = self._v[value.upper()]\r\n #print \"Setting\",index,\"(%d)\"%id.value,\"to\",value\r\n if tp == 0:\r\n status = cplex.CPXsetdblparam(Env,id,c_double(float(value)))\r\n elif tp == 3:\r\n status = cplex.CPXsetstrparam(Env,id,str(value).encode())\r\n elif tp == 4:\r\n status = cplex.CPXsetlongparam(Env,id,c_long(value))\r\n else:\r\n status = cplex.CPXsetintparam(Env,id,c_int(int(value)))\r\n if status:\r\n print(\"ERROR\",status,\": setting parameter\",index,\"=\",value)\r\n print(\" Usage:\",self._p[index.upper()][2]) # doc\r\n return status", "title": "" }, { "docid": "995df37cff37ebe0c2c8f876b608ba87", "score": "0.5119582", "text": "def __setitem__(self, attr, val):\n setattr(self, attr, val)", "title": "" }, { "docid": "6a8e1797658277cdc2a7dc0c103995f2", "score": "0.5117734", "text": "def __setitem__(self, index, value):\n self._dictionary[index] = value + 1", "title": "" }, { "docid": "704d577fd54410502a07c4e2c1c69612", "score": "0.5064625", "text": "def set(self, register, value):\n try:\n self._registers[register] = int(value)\n except ValueError:\n self._registers[register] = self._registers[value]", "title": "" }, { "docid": "ea0384adf1f60db28bf39888adf78a61", "score": "0.50555706", "text": "def update(self,_id,key=None,value=None,**pair):\n return self.put_unique(_id,key,value,**pair)", "title": "" }, { "docid": "bc7eb1760974fce72514807daedd732d", "score": "0.5055003", "text": "def __setitem__(self, key, val):\n self.b[key[0]][key[1]] = val", "title": "" }, { "docid": "c952fa1bd9fdf6a29f2113398dbbe894", "score": "0.5049377", "text": "def __setitem__(k, value):", "title": "" }, { "docid": "f3d3e3896f7f6a569672be01f028c56b", "score": "0.50487703", "text": "def setValue(self, value):\n ...", "title": "" }, { "docid": "a82ee3c92b6170b292c8d0452ac20f17", "score": "0.5038256", "text": "def set(field_name, value):", "title": "" }, { "docid": "d0252ae426810e4c55d72e0eeced4a12", "score": "0.5037995", "text": "def setter(val,control):\n #print \"setter\", control,val\n value = int(val) # turn bools into ints\n device = \"-d\"+str(control[\"src\"])\n control_name = control[\"name\"]\n control[\"value\"]=val\n sp.Popen([v4l2_ctl,device,\"-c\"+control_name+\"=\"+str(value)])", "title": "" }, { "docid": "db308d38aa41fafda359baf535494634", "score": "0.5029736", "text": "def __set__(self, instance, value):\n from ..error import ValueError\n if value is None: self.shift, self.lock = None, None; return\n if not hasattr(value, '__getitem__'): \n raise ValueError('Incorrect input to levshift: {0}.'.format(value))\n self.shift = value[0]\n self.lock = value[1]", "title": "" }, { "docid": "745245d22668bef64323be0be780be9e", "score": "0.50296515", "text": "def __setitem__(self, key, value):\r\n self.genomeList[key] = value", "title": "" }, { "docid": "6645547087bf6169e25918e1650ebc22", "score": "0.502806", "text": "def pair(self):", "title": "" }, { "docid": "1969a954aaefff4ecc5083985b135e11", "score": "0.50277877", "text": "def __setattr__(self,name,value):\n if name.startswith(\"__\") and name.endswith(\"__\"):\n object.__setattr__(self,name,value)\n ##debug(\"EnsembleWrapper.__setattr__(%r,%r)\" % (name,value))\n setattr(self.__record__,name,value)", "title": "" }, { "docid": "4884f5612e944e1a64a6b8f15f237769", "score": "0.50275254", "text": "def put(self, key: int, value: int) -> None:\n y = key % 80\n add_pair = (key, value)\n for count, val in enumerate(self.store[y]):\n if val[0] == key:\n self.store[y][count] = add_pair\n return\n self.store[y].append(add_pair)", "title": "" }, { "docid": "5a4fb041078573869dd7e6d129116e33", "score": "0.5019773", "text": "def __setitem__(self,k,value): \n self.vec[k] = value", "title": "" }, { "docid": "8dabebb7e3e68af7a092454b3c2ced7f", "score": "0.5008638", "text": "def __setitem__(self, x, val):\n pass", "title": "" }, { "docid": "0eda925ab74edb4de2962a8b24095279", "score": "0.50037843", "text": "def __setattr__(self, key, val):", "title": "" }, { "docid": "d7eb0dc5b5c0671ea3fa94f0f4508200", "score": "0.5001667", "text": "def __setitem__(key, value):", "title": "" }, { "docid": "847ab63a68afbc7c7a4c1a9c87be6e4a", "score": "0.49984568", "text": "def __setitem__(self, k, v):\n ...", "title": "" }, { "docid": "2cb4720bb638b2484043a7e2acaaffd8", "score": "0.49910152", "text": "def set(self, commit_or_id, tag_key, value):\n\n release_id = self.release.get(commit_or_id)['id']\n\n return super(ReleaseTag, self).set(release_id, tag_key, value)", "title": "" }, { "docid": "cc2d3c113fb807060e2f58f3f3cbef6c", "score": "0.4982439", "text": "def __setitem__(self,key,value):\n pass", "title": "" }, { "docid": "d4445e4df059bde7447019fd40fad161", "score": "0.4976691", "text": "def __setitem__(self, k, v):\n self.nss.__setitem__(k, v)", "title": "" }, { "docid": "5071baa4749f35a6632e8c4cbcfff6f1", "score": "0.49682423", "text": "def put(key, value):\n global bridgeclient\n bridgeclient.put(rcw(key), str(value))", "title": "" }, { "docid": "998c54b9adad6a9bbc6a7c3fd7349cb5", "score": "0.4960901", "text": "def __setattr__(self, key, value):\n self.__setitem__(key, value)", "title": "" }, { "docid": "07c2a604c75b6e5369744520b0073d01", "score": "0.4960046", "text": "def __init__(self, value, price):\n\t\tself.price = int(price) # int\n\t\tself.value = int(value) # int", "title": "" }, { "docid": "5eea954ee66557e7870f8788e3ffb8f4", "score": "0.49510086", "text": "def __setitem__(self, key, value):\r\n self.__setattr__(key, value)", "title": "" }, { "docid": "759a1e26aae75d9e23182d4db38e065f", "score": "0.49505833", "text": "def __set__(self, instance, value):\n # type: (object, int) -> None\n raise AttributeError(\"cannot modify bit-field of immutable int\")", "title": "" }, { "docid": "832e50608e89591225cb7c1db273e551", "score": "0.49428874", "text": "def __setitem__(self, key: str, value: 'Book') -> None:\n self.library[key] = value\n return self", "title": "" }, { "docid": "deba7cc657bca895345e4cd84844a572", "score": "0.49410537", "text": "def _set_value(self, idx, value):\n set_struct(self._full_data, self.ent_idx, self.ent_size, self.offset, self.struct, idx, value)", "title": "" }, { "docid": "c6c85f901f2d0dbf496d4004b5174057", "score": "0.4934714", "text": "def add_pair(self, key, value):\n self.extradata[key] = value", "title": "" }, { "docid": "e7ae88add44c1ac140f48fc775e4b214", "score": "0.49296054", "text": "def __setitem__(self, key, value):\n self.sequence[key] = value", "title": "" }, { "docid": "fc6b1113ca6820af57675575054a8542", "score": "0.4926317", "text": "def __setitem__(self, *args):\n return _openfst.IntVector___setitem__(self, *args)", "title": "" }, { "docid": "67c62ec8ab6a6afdae9e4b66c68c2eb8", "score": "0.4921499", "text": "def SetVectorComponent(self, p_int, string, p_int_1, p_int_2, p_int_3, p_int_4):\n ...", "title": "" }, { "docid": "911ad7a44b81270e950c818ac0a3325a", "score": "0.49214438", "text": "def at_set(self, new_value):\n pass", "title": "" }, { "docid": "e9c7a9feb340cc67045b46221bda0f66", "score": "0.49180374", "text": "def assign(self, value):\n \n pass", "title": "" }, { "docid": "e9c7a9feb340cc67045b46221bda0f66", "score": "0.49180374", "text": "def assign(self, value):\n \n pass", "title": "" }, { "docid": "e9c7a9feb340cc67045b46221bda0f66", "score": "0.49180374", "text": "def assign(self, value):\n \n pass", "title": "" }, { "docid": "68ca68ca95e2d3333350eb9592dfec85", "score": "0.49160048", "text": "def __set__(self,obj,value): \n raise TypeError(\"k is not possible to set\")", "title": "" }, { "docid": "dbd9554dff75cb6799761bc8d6e92e08", "score": "0.49121842", "text": "def __setitem__(self, key, value):\r\n if value not in (0, 1):\r\n Util.raiseException(\"The value must be zero (0) or one (1), used (%s)\" % value, ValueError)\r\n G1DBase.__setitem__(self, key, value)", "title": "" }, { "docid": "08a76c0c27f868fd83cbc44ccd546b8f", "score": "0.49084777", "text": "def set(aMap, key, value): # new function called set; not the python builtin class\n bucket = get_bucket(aMap, key) # names get_bucket return 'bucket' \n i, k, v = get_slot(aMap, key) # names get_slot return 'i, k, v' \n\n if i >= 0: # for slot-integer numbers >= 0, set the key and value in place of the integer \n # the key exists, replace it. \n bucket[i] = (key, value) # sets one integer,one key, and one value per bucket, overwriting whatever is in there before. \n else:\n # the key does not, append to create it # if the key does not exist, make a new bucket and add it to aMap list \n bucket.append((key, value))", "title": "" }, { "docid": "613ac4f1fcdf90ac9bac0d974d28fe79", "score": "0.49077055", "text": "def __setitem__(self, key, value):\r\n self.led_ar[key] = int(value) << 8 | (0xe0 | self.brightness)\r\n self.__set_pattern()\r\n return", "title": "" }, { "docid": "e5732a4a3c8e5bf86d675a5de2252e77", "score": "0.489335", "text": "def __setitem__(self, slicex, value):\n self.pot_arr[slicex] = value", "title": "" }, { "docid": "8d0ae5a1be858f585a514fb0741a3d00", "score": "0.48895612", "text": "def _set(self, ky, val):\n dict.__setitem__(self, ky, val)", "title": "" }, { "docid": "b07cbbd15c883eded520f838b070ac08", "score": "0.48842615", "text": "def set(targetInstance, cell):", "title": "" }, { "docid": "1db6d9dc80b8a311ef045e70fe0d545d", "score": "0.48837718", "text": "def set(self, app_id, tag_key, value):\n\n return super(ApplicationTag, self).set(app_id, tag_key, value)", "title": "" }, { "docid": "8ecc93e1767c52c5510ee356681f653f", "score": "0.4872517", "text": "def __setattr__(self, name, value):\n self.__dict__[name] = value;\n if name == \"carry_capacity\":\n self.population = np.arange( int( 1.5 * self.carry_capacity +\n ( self.nbr_species - 1 )\n * 1.5 * self.carry_capacity\n * ( 1. - self.comp_overlap) ) );", "title": "" }, { "docid": "91dd3715811be23fe72aea331056917e", "score": "0.48698452", "text": "def __set__(self,instance,value): \n raise TypeError(\"waist is not possible to set\")", "title": "" }, { "docid": "cf8b1abbd1ee12f1c7f1fd703acd994c", "score": "0.48680466", "text": "def car(*args, **kwargs):\n return _pmt_swig.car(*args, **kwargs)", "title": "" }, { "docid": "287944b2b5f5d1844499dbe3351bf798", "score": "0.48633954", "text": "def __setitem__(self, key, value):\n if value not in (0, 1):\n Util.raiseException(\"The value must be zero (0) or one (1), used (%s)\" % value, ValueError)\n G1DBase.__setitem__(self, key, value)", "title": "" }, { "docid": "84463eb9ac37d1a2afe7e220014612fa", "score": "0.48534116", "text": "def setItem(self, x, y, value):\r\n if value not in [0,1]:\r\n Util.raiseException(\"The item value must be 0 or 1 in the G2DBinaryString chromosome\", ValueError)\r\n self.genomeString[x][y] = value", "title": "" }, { "docid": "7faccf0f2465279f3c19ea11afff048e", "score": "0.485192", "text": "def set_value_i(self, i, val):\n\t\traise NotImplementedError", "title": "" }, { "docid": "640edda6c94fc58859575505edb84390", "score": "0.48514277", "text": "def __setitem__(self, k, v):\n self._field_desc[k] = v\n return", "title": "" }, { "docid": "089ec2d5f3c2a67398aa104245487d33", "score": "0.4851412", "text": "def __setitem__(self, key, value):\n\n assert value is True\n\n if isinstance(key, int):\n i = key\n\n status = wrapper.loaded_dll.cuBool_Vector_SetElement(\n self.hnd,\n ctypes.c_uint(i)\n )\n\n bridge.check(status)\n return\n\n raise Exception(\"Invalid item assignment\")", "title": "" }, { "docid": "05402d6e0c5f62c352739d706402d95d", "score": "0.48489338", "text": "def set(key, value):", "title": "" }, { "docid": "19f0e257c68afc742cb3c4adc9819102", "score": "0.48416737", "text": "def set(self, value):\r\n self.num.set(value)", "title": "" }, { "docid": "19f0e257c68afc742cb3c4adc9819102", "score": "0.48416737", "text": "def set(self, value):\r\n self.num.set(value)", "title": "" }, { "docid": "aae322fbba0d262a5dc9f0d1bc581736", "score": "0.48179236", "text": "def __setitem__(self, index, val):\n self.set(index, val)", "title": "" }, { "docid": "e0e9b3d781880de409f2769b2be45c0c", "score": "0.4815926", "text": "def set_key_pair(self):\n self.__private_key = ec.generate_private_key(ec.SECP256R1(),\n default_backend())\n self.__public_key = self.__private_key.public_key()\n\n private_key_raw = private_key_to_raw(self.__private_key)\n public_key_raw = public_key_to_raw(self.__public_key)\n\n assert(len(private_key_raw) == 32)\n assert(len(public_key_raw) == 64)\n\n self.iaci.send(cmd.KeypairSet(private_key_raw, public_key_raw))", "title": "" }, { "docid": "29254a700de644a73ad7c32220b26f35", "score": "0.4815057", "text": "def setInteger(self, value):\n pass", "title": "" }, { "docid": "91285fdf1241c452b2756a1050b14f3e", "score": "0.48129147", "text": "def __setitem__(self, fieldName, value):\n self.setfield(fieldName, value)", "title": "" }, { "docid": "a33f402a50b0a3310fa8c36e41810d8e", "score": "0.48116893", "text": "def set(self, val: int):\n self.value = val", "title": "" }, { "docid": "7c708604923176892831a715b1f11dfb", "score": "0.48098174", "text": "def set_sensor_ids(self, pairs):\n for coll in self.collections.keys():\n for cnd in self[coll].conditions.keys():\n for bl in self[coll][cnd].blocks.keys():\n self[coll][cnd][bl].set_sensor_id(pairs)", "title": "" }, { "docid": "03a8b783096a9f51cdae7f9dc9bf3acc", "score": "0.48072377", "text": "def set(self, key, value):\n pass", "title": "" }, { "docid": "d1ff7a517e03265bd3db24d966ffe581", "score": "0.4804151", "text": "def __setattr__(self, field, value):\n if field.startswith('_'):\n return super(_StructProxy, self).__setattr__(field, value)\n self[self._datamodel.get_field_position(field)] = value", "title": "" }, { "docid": "4afff2b904ae310af20f6acde4c66f4a", "score": "0.47981063", "text": "def setMeta(self, var, value):", "title": "" }, { "docid": "1070f909a3674b6226074c68581ecf66", "score": "0.4796087", "text": "def pair_count_update(self, pair, count):\n self.pair_count[pair] += count\n \n if self.pair_count[pair] > 0 and self.pair_count[pair] == count:\n self.pairs.add(pair)\n elif self.pair_count[pair] == 0:\n self.pairs.remove(pair)\n del self.pair_count[pair]", "title": "" }, { "docid": "54c6db7580ded2e92ee48725814ab71f", "score": "0.47944605", "text": "def change_cardata(clauses, id_car):\n sql_command = 'UPDATE Cars SET %s WHERE idCars=%s'\n mycursor.execute(sql_command % (clauses, int(id_car)))\n connection.commit()", "title": "" }, { "docid": "45a6318c1668fbca2a219c854308349f", "score": "0.4789047", "text": "def __setitem__(self, *args):\n return _pmt_swig.pmt_vector_uint16___setitem__(self, *args)", "title": "" }, { "docid": "b01c80cca39427deb4d584f6fd9bef36", "score": "0.47864118", "text": "def put(self, key, value):\n pass", "title": "" }, { "docid": "0a3fc3aa08634f64e9ceba5b218a4b6f", "score": "0.4784638", "text": "def add_s(self,value):\n\n self._card_gems[2] += int(value)", "title": "" } ]
c736f23138350c486dd77f9d3c4bbf00
Create and run askOkCancelString dialog .
[ { "docid": "430b9a89b0aefd6448a466c9eb575fcb", "score": "0.852891", "text": "def runAskOkCancelStringDialog(self,c,title,message):\n d = leoSwingDialog.swingAskOkCancelString(c,title,message)\n return d.run(modal=True)", "title": "" } ]
[ { "docid": "f66eb5cdc17560741b7c73d79c9a0604", "score": "0.78492826", "text": "def runAskOkDialog(self,c,title,message=None,text=\"Ok\"):\n d = leoSwingDialog.swingAskOk(c,title,message,text)\n return d.run(modal=True)", "title": "" }, { "docid": "3ac0223cc94fa779bb9ede45da716a0e", "score": "0.7845254", "text": "def runAskOkDialog(self,title,message=None,text=\"Ok\"):\n d = leoTkinterDialog.tkinterAskOk(title,message,text)\n return d.run(modal=True)", "title": "" }, { "docid": "bf33e1c22805d50a8d41166d9a314bfa", "score": "0.78073287", "text": "def promptBox(title, message, okText, cancelText, **kwargs):\n ret = promptDialog(t=title, m=message, b=[okText,cancelText], db=okText, cb=cancelText,**kwargs)\n if ret==okText:\n return promptDialog(q=1,tx=1)", "title": "" }, { "docid": "fea20b06bca463fa746c8ceece7500ff", "score": "0.75351745", "text": "def ask_ok_cancel(message='', default=0, title=''):\n return backend_api.opendialog(\"ask_ok_cancel\" , dict(message=message, default=default, title=title))", "title": "" }, { "docid": "014aa2d27ed1cc52d673848188ede36a", "score": "0.7495962", "text": "def confirmPrompt(self, msg):\n\n msg_box = QtGui.QMessageBox(self.parent)\n msg_box.setText(msg)\n msg_box.setWindowTitle('Please Confirm:')\n msg_box.setStandardButtons(\n QtGui.QMessageBox.Cancel|QtGui.QMessageBox.Ok)\n\n #TODO: figure out why this doesn't work\n #msg_box.setDefaultButton(QtGui.QMessageBox.Ok)\n\n if msg_box.exec_() != QtGui.QMessageBox.Ok:\n raise UIAbortException('User aborted during confirmPrompt')", "title": "" }, { "docid": "f7fcf9d31e48f51d03fffbf5e4fb0eab", "score": "0.7266594", "text": "def runAskYesNoCancelDialog(self,c,title,\n message=None,yesMessage=\"Yes\",noMessage=\"No\",defaultButton=\"Yes\"):\n d = leoSwingDialog.swingAskYesNoCancel(\n c,title,message,yesMessage,noMessage,defaultButton)\n return d.run(modal=True)", "title": "" }, { "docid": "4bfc3d3be05c9144346ce2f458502a31", "score": "0.72470033", "text": "def runAskYesNoCancelDialog(self,title,\n message=None,yesMessage=\"Yes\",noMessage=\"No\",defaultButton=\"Yes\"):\n d = leoTkinterDialog.tkinterAskYesNoCancel(\n title,message,yesMessage,noMessage,defaultButton)\n return d.run(modal=True)", "title": "" }, { "docid": "01ae6e771df89ab06c48587f8607920b", "score": "0.72244364", "text": "def runAskOkCancelNumberDialog(self,c,title,message):\n d = leoSwingDialog.swingAskOkCancelNumber(c,title,message)\n return d.run(modal=True)", "title": "" }, { "docid": "fb743dbbf019bf908f0d6a7a076d26a5", "score": "0.7196083", "text": "def runAskOkCancelNumberDialog(self,title,message):\n d = leoTkinterDialog.tkinterAskOkCancelNumber(title,message)\n return d.run(modal=True)", "title": "" }, { "docid": "7e8057f38d13bdd3e32a455f1203d787", "score": "0.7135979", "text": "def runAskYesNoDialog(self,c,title,message=None):\n d = leoSwingDialog.swingAskYesNo(c,title,message)\n return d.run(modal=True)", "title": "" }, { "docid": "cf422497f14dfed0224a8bee093a0db8", "score": "0.69412136", "text": "def runAskYesNoDialog(self,title,message=None):\n d = leoTkinterDialog.tkinterAskYesNo(title,message)\n return d.run(modal=True)", "title": "" }, { "docid": "15c2fd727b634d14fca1b61298ba2bd2", "score": "0.69369715", "text": "def ok_no_cancel(question=''):\n dial = MessageBox.Show(question, 'iph', MessageBoxButton.YesNoCancel)\n\n action = None\n if dial == MessageBoxResult.Yes:\n action = True\n elif dial == MessageBoxResult.No:\n action = False\n return action", "title": "" }, { "docid": "1a687755b0d0a16813422be2fbfb47ab", "score": "0.6818419", "text": "def AskYesNoCancel(message, title='FontParts', default=0, informativeText=\"\"):\n return dispatcher[\"AskYesNoCancel\"](message=message, title=title,\n default=default, informativeText=informativeText)", "title": "" }, { "docid": "60983d7c331d8c90ec618188a0ffe8c2", "score": "0.6695064", "text": "def okButton(self):\n\n s = self.number_entry.get().strip()\n\n try:\n self.answer=int(s)\n except:\n self.answer=-1 # Cancel the operation.\n\n self.top.destroy()", "title": "" }, { "docid": "e0bc3e9ed3499134925ab569103e8878", "score": "0.6642086", "text": "def prompt_string(label, title, default=\"\"):\n dpi_scale = get_dpi_scale()\n dlg = QtWidgets.QInputDialog(None)\n dlg.setWindowFlags(dlg.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint)\n dlg.setInputMode(QtWidgets.QInputDialog.TextInput)\n dlg.setLabelText(label)\n dlg.setWindowTitle(title)\n dlg.setTextValue(default)\n dlg.resize(\n dpi_scale*400,\n dpi_scale*50\n )\n ok = dlg.exec_()\n text = str(dlg.textValue())\n return (ok, text)", "title": "" }, { "docid": "9b2b18cf457ef4f448ad00f2c879e8a5", "score": "0.6616017", "text": "def popup():\n\t\n\t#print return value\n\t\n\t#Return Value YES/NO\n\tprint(tk.messagebox.askquestion(title='ask YES/NO', message='Hello'))\n\t#Return Value True/False\n\tprint(tk.messagebox.askokcancel(title='ask YES/Cancel', message='Hello'))\n\t#Return Value True/False/None\n\tprint(tk.messagebox.askyesnocancel(title='ask Yes/NO/Cancel', message='Hello'))", "title": "" }, { "docid": "935d805f4a4511d6fdf71ade14241856", "score": "0.653687", "text": "def okButton(self):\n\n self.answer=\"ok\"\n self.top.destroy()", "title": "" }, { "docid": "eeb0e7ee447802d0fc915d1c728e5dfa", "score": "0.6535817", "text": "def _do_confirm(self, wobj):\n dlg = gtk.MessageDialog(self.widget,\n gtk.MESSAGE_QUESTION,\n buttons = gtk.BUTTONS_YES_NO)\n dlg.label.set_text(\"Are you sure ?\")\n dlg.set_default_response(gtk.RESPONSE_NO)\n ret = dlg.run()\n dlg.destroy()\n if ret == gtk.RESPONSE_NO:\n return False\n return True", "title": "" }, { "docid": "d98268ba1468209ab4b961682fd996c5", "score": "0.6516766", "text": "def showDialog(self, message, yesno) :\n self.dialogAnswer = None\n if yesno :\n caption = _(\"Confirmation\")\n style = wx.YES_NO | wx.YES_DEFAULT | wx.ICON_QUESTION\n else :\n caption = _(\"Information\")\n style = wx.OK | wx.ICON_INFORMATION\n style |= wx.STAY_ON_TOP\n dialog = wx.MessageDialog(self, message, caption, style)\n dialog.Raise()\n self.dialogAnswer = ((dialog.ShowModal() == wx.ID_NO) and \"CANCEL\") or \"OK\"\n dialog.Destroy()", "title": "" }, { "docid": "b48a6a533e04e72d83a87e60bbff9969", "score": "0.65136683", "text": "def _confirm_dialog(self, msg, default='n'):\n if self.skip_confirm:\n return True\n else:\n return utils.confirm_dialog(msg, default=default)", "title": "" }, { "docid": "38072c18e9f44b8c32c8e0c94e8311a6", "score": "0.6481988", "text": "def textPrompt(self, msg):\n\n while True:\n result, ok = QtGui.QInputDialog.getText(self.parent, \n 'Input Dialog', msg)\n\n if not ok:\n raise UIAbortException('User aborted during textPrompt')\n\n if result:\n #TODO: insert prompt about bad input\n break\n\n return str(result)", "title": "" }, { "docid": "cca5d2c287cc115e97ba8a0d2b652ad4", "score": "0.6468971", "text": "def cloppy_yesno(master, message, callback) -> CloppyButtonWindow:\n\n dialog = CloppyButtonWindow(master)\n dialog.set_message(\n f'{message}\\n'\n \"So it behooves me to ask:\\n\"\n \"Are you sure you want to do that?\"\n )\n dialog.add_choice('Yes')\n dialog.add_choice('No')\n dialog.choice_made.add_callback(callback)\n\n return dialog", "title": "" }, { "docid": "461535c541e18abd07f1a79c14ad0ac1", "score": "0.6415587", "text": "def ask_string(message='Enter something.', default='', title=''):\n return backend_api.opendialog(\"ask_string\" , dict(message=message, default=default, title=title))", "title": "" }, { "docid": "a37340259557abe6cc66879cbc32de40", "score": "0.6405765", "text": "def okButton(self):\n\n self.answer = self.number_entry.get().strip()\n self.top.destroy()", "title": "" }, { "docid": "fa4f83d24f2164282674a878b1bf1746", "score": "0.6396", "text": "def show_question_dialog(self, title, message):\n dialog = Qt.QMessageBox(None, gtk.DIALOG_MODAL, gtk.MESSAGE_QUESTION, gtk.BUTTONS_OK_CANCEL, title)\n dialog.format_secondary_text(message)\n response = dialog.run()\n dialog.destroy()\n return response", "title": "" }, { "docid": "f0369608517bb8dcb28ef7d766de65e6", "score": "0.63803035", "text": "def show_custom_dialog(shell, message, dialog_type):\n acm.UX().Dialogs().MessageBox(shell, dialog_type, message, 'OK', \n None, None, 'Button1', 'Button1')", "title": "" }, { "docid": "efe93d3037c7f24c56f2fd90b72cf91e", "score": "0.63592994", "text": "def showdialog1():\n\t\n retval = QMessageBox(QMessageBox.Information, \"MessageBox demo\", \"This is a message box\", QMessageBox.Ok | QMessageBox.Cancel).exec_()\n print(\"value of pressed message box button:\", retval)", "title": "" }, { "docid": "65937bd6994ac260ddd0b563c27dfb86", "score": "0.6320235", "text": "def ask_yes_no(message='', default=0, title=''):\n return backend_api.opendialog(\"ask_yes_no\" , dict(message=message, default=default, title=title))", "title": "" }, { "docid": "a2144caa6a869a39e220db1862763ab3", "score": "0.63174015", "text": "def user_confirm():\n confirm_message = \"\\nLa seguente azione modifcherà file in maniera definitiva.\\nSei sicura di voler continuare?\\n\\n\"\n return sg.popup_yes_no(confirm_message)", "title": "" }, { "docid": "3986fe48758047c34b7907dd1164a040", "score": "0.63110375", "text": "def input_dialog(\n title: AnyFormattedText = \"\",\n text: AnyFormattedText = \"\",\n ok_text: str = \"OK\",\n cancel_text: str = \"Cancel\",\n completer: Completer | None = None,\n validator: Validator | None = None,\n password: FilterOrBool = False,\n style: BaseStyle | None = None,\n default: str = \"\",\n) -> Application[str]:\n\n def accept(buf: Buffer) -> bool:\n get_app().layout.focus(ok_button)\n return True # Keep text.\n\n def ok_handler() -> None:\n get_app().exit(result=textfield.text)\n\n ok_button = Button(text=ok_text, handler=ok_handler)\n cancel_button = Button(text=cancel_text, handler=_return_none)\n\n textfield = TextArea(\n text=default,\n multiline=False,\n password=password,\n completer=completer,\n validator=validator,\n accept_handler=accept,\n )\n\n dialog = Dialog(\n title=title,\n body=HSplit(\n [\n Label(text=text, dont_extend_height=True),\n textfield,\n ValidationToolbar(),\n ],\n padding=D(preferred=1, max=1),\n ),\n buttons=[ok_button, cancel_button],\n with_background=True,\n )\n\n return _create_app(dialog, style)", "title": "" }, { "docid": "791dfd18b5af7f9e7a3a4cf23076c1b5", "score": "0.6281873", "text": "def confirm(message, override=askConfirm):\n if override: # i.e. if askConfirm\n if not tkMessageBox.askyesno(\"confirm\", str(message) + \"\\n\\nConfirm?\"):\n print abortMsg\n exit() # if user selects 'No', returns False, so \"if not\"", "title": "" }, { "docid": "09933ceaa8e44e5627b326922bb16961", "score": "0.62798697", "text": "def clickOk():\n\n global season, cr, pr, dor\n\n #subsequent program is expecting numbers and not strings\n season = int(cbs[0].get())\n cr = int(cbs[1].get())\n dor = int(cbs[2].get())\n pr = int(cr) - 1\n\n root.destroy()\n\n return None", "title": "" }, { "docid": "5aaa02ac4bb2f2e8c06a3f65a50c81e7", "score": "0.62669957", "text": "def dialog_box(instruction):\n answer = \"\"\"\n osascript -e '\n tell application \"System Events\"\n set result to (display dialog \"%s\" giving up after 3)\n end tell\n'\n \"\"\" % (instruction)\n os.system(answer)", "title": "" }, { "docid": "c0bc8b5317a5ac028d2ffc63bc87f311", "score": "0.62662035", "text": "def test_dialog_cancel(self):\n button = self.dialog.button_box.button(QtWidgets.QDialogButtonBox.Cancel)\n button.click()\n result = self.dialog.result()\n self.assertEqual(result, QtWidgets.QDialog.Rejected)", "title": "" }, { "docid": "d3d86ff14625c243ee72d7d81fbd16c9", "score": "0.6249804", "text": "def conf_dialog(text):\r\n msg('{0}. Do you wish to continue? (y/N)'.format(text))\r\n user_input = get_user_input()\r\n return user_input and user_input[0].lower() == 'y'", "title": "" }, { "docid": "488335135ce95efab599be2d0363fee0", "score": "0.6242318", "text": "def question_calcul_auto():\r\n msg = QMessageBox()\r\n msg.setText(\"Est tu sur de vouloir lancer le calcul automatique ?\")\r\n msg.setStandardButtons(QMessageBox.Yes | QMessageBox.No)\r\n choix = msg.exec()\r\n return choix", "title": "" }, { "docid": "314d7793310ba78ae99b62f2c8bf4f70", "score": "0.6239318", "text": "def ok_pressed(self):\n # Obtain the user inputs:\n user = self.get_user()\n batch = self.get_batch()\n grp = self.get_grp()\n location = self.get_location()\n\n # Determine whether there are blank inputs\n status = True\n if (\n user == \"\" or\n batch == \"\" or\n grp == \"\" or\n location == \"\"\n ):\n error_message = \"Please fill in all entries\"\n status = False\n\n # Send an alert message:\n if status is False:\n\n reply = QtWidgets.QMessageBox.warning(self, 'Message',\n error_message,\n QtWidgets.QMessageBox.Ok)\n # If all entries are valid move to next window\n else:\n self.okPressed = True\n self.close()", "title": "" }, { "docid": "b501f11c8137d6df0153b85efc874d5c", "score": "0.62316746", "text": "def deployCancelMessageBox(self):\n\t\tif messagebox.askokcancel(\"Cancel\",\"Are you sure? All progress will be lost.\",parent=self.top):\n\t\t\tself.top.destroy()", "title": "" }, { "docid": "668729dd43ebf1364fd40d7901cb432d", "score": "0.6219222", "text": "def popup_message(self, message):\r\n\r\n popup_window = QtWidgets.QMessageBox()\r\n\r\n popup_window.setInformativeText(message)\r\n popup_window.setStandardButtons(QtWidgets.QMessageBox.Ok)\r\n\r\n popup_window.exec_()", "title": "" }, { "docid": "dcc396e5ec700058348a8f898ac1ad98", "score": "0.62120616", "text": "def prompt_user(prompt):\n # test to see what icons are available on the file system from global var\n icon = ICON\n if not os.path.exists(icon):\n # default fail over icon in case our custom one does not exist\n icon = \"/System/Library/CoreServices/Problem Reporter.app/Contents/Resources/ProblemReporter.icns\"\n # build the jamf helper unix command in a list\n cmd = [\n \"/Library/Application Support/JAMF/bin/jamfHelper.app/Contents/MacOS/jamfHelper\",\n \"-windowType\",\n \"utility\",\n \"-title\",\n \"Quit Applications\",\n \"-description\",\n prompt,\n \"-icon\",\n icon,\n \"-button1\",\n \"Restart\",\n \"-button2\",\n \"Cancel\",\n \"-defaultbutton\",\n \"1\",\n ]\n # call the command via subprocess\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n # get stdout and stderr\n out, err = proc.communicate()\n # check for exit status for button clicked, 0 = OK 2 = Cancel\n if proc.returncode == 0:\n # user clicked OK\n return True\n if proc.returncode == 2:\n # user clicked cancel\n return False\n # if there is any other return print it\n else:\n print(\"Error: %s\" % err)", "title": "" }, { "docid": "6a5f64341b088c4c465a15e06be217ba", "score": "0.6206179", "text": "def message_dialog(\n title: AnyFormattedText = \"\",\n text: AnyFormattedText = \"\",\n ok_text: str = \"Ok\",\n style: BaseStyle | None = None,\n) -> Application[None]:\n dialog = Dialog(\n title=title,\n body=Label(text=text, dont_extend_height=True),\n buttons=[Button(text=ok_text, handler=_return_none)],\n with_background=True,\n )\n\n return _create_app(dialog, style)", "title": "" }, { "docid": "41397090c41eb3f7d6fc7ef3589455ad", "score": "0.61905426", "text": "def proceed_dialog(txt, yes=['y', 'yes'], all=['a', 'all', 'yes-to-all']):\n response = raw_input(txt)\n if response.lower() in yes:\n return 1\n if response.lower() in all:\n return 2\n return 0", "title": "" }, { "docid": "bbd910883074fae8bf0b207adf2519dc", "score": "0.6165569", "text": "def run(self):\n # show the dialog\n self.dlg.show()\n # Run the dialog event loop\n result = self.dlg.exec_()\n # See if OK was pressed\n if result:\n # Do something useful here - delete the line containing pass and\n # substitute with your code.\n pass", "title": "" }, { "docid": "bbd910883074fae8bf0b207adf2519dc", "score": "0.6165569", "text": "def run(self):\n # show the dialog\n self.dlg.show()\n # Run the dialog event loop\n result = self.dlg.exec_()\n # See if OK was pressed\n if result:\n # Do something useful here - delete the line containing pass and\n # substitute with your code.\n pass", "title": "" }, { "docid": "62af7666ac02980913f5cb862b8b7aec", "score": "0.615386", "text": "def __onCancel(self):\n self.__showDlg.reject()", "title": "" }, { "docid": "ce857fe2556c473d36b0c67a6d98cd1a", "score": "0.61298615", "text": "def export_showDialog(self, message, yesno) :\n wx.CallAfter(self.frame.showDialog, self.frame.UTF8ToUserCharset(message.data), yesno)\n # ugly, isn't it ?\n while self.frame.dialogAnswer is None :\n time.sleep(0.1)\n retcode = self.frame.dialogAnswer\n self.frame.dialogAnswer = None # prepare for next call, just in case\n return retcode", "title": "" }, { "docid": "56031befb01ed6408bfa761f54da4c14", "score": "0.61225134", "text": "def confirmation_dialog(title, msg, action, shell=False):\n win = Gtk.Window()\n win.connect('destroy', Gtk.main_quit)\n dialog = Gtk.Dialog(parent=win)\n\n font = ImageFont.truetype(FONT, FONT_SIZE)\n w, _ = font.getsize(msg)\n final_w = w + 80\n final_h = 110\n dialog.set_default_size(final_w, final_h)\n\n dialog.set_property('title', title)\n dialog.add_buttons(\n Gtk.STOCK_OK, Gtk.ResponseType.OK, Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL\n )\n dialog.action_area.set_property('halign', Gtk.Align.CENTER)\n\n label = Gtk.Label()\n label.override_font(Pango.FontDescription(f'Noto Sans {FONT_SIZE}'))\n label.set_text(msg)\n box = dialog.get_content_area()\n box.add(label)\n dialog.show_all()\n\n res = dialog.run()\n if res == Gtk.ResponseType.OK:\n Popen(action, shell=shell)\n elif res == Gtk.ResponseType.CANCEL:\n pass\n dialog.destroy()\n quit()", "title": "" }, { "docid": "9287be5ae9e57b4f26602f5fae0d2c73", "score": "0.61144036", "text": "def popupmsg(self, msg):\n popup = tk.Tk()\n def leavemini():\n popup.destroy()\n popup.wm_title(\"!\")\n label = ttk.Label(popup, text=msg, font=NORM_FONT)\n label.pack(side=\"top\", fill=\"x\", pady=10)\n B1 = ttk.Button(popup, text=\"Okay\", command = leavemini)\n B1.pack()\n popup.mainloop()", "title": "" }, { "docid": "5680f1bf4df17b02f2a8515739991a61", "score": "0.60909677", "text": "def exit_clicked(self):\n\n popup = QMessageBox()\n popup.setWindowTitle('Confirmation')\n popup.setIcon(QMessageBox.Warning)\n popup.setText('Are you sure you want to exit?')\n popup.setStandardButtons(QMessageBox.Yes | QMessageBox.No)\n popup.setDefaultButton(QMessageBox.No)\n popup.buttonClicked.connect(self.exit_confirmation)\n popup.exec_()", "title": "" }, { "docid": "54343e3d44a2ad2c0e59ac663c6990f6", "score": "0.60790205", "text": "def cancelButton(self):\n\n self.answer=''\n self.top.destroy()", "title": "" }, { "docid": "e5274f1b04930cb27ba8494125ddc7eb", "score": "0.6064846", "text": "def easydialog(dlg):\n print('Pls. reponse to the open dialog')\n rt = tk.Tk()\n rt.withdraw()\n dlg.master = rt\n rc = dlg.show()\n # non of quit()/destroy() can kill tk while shown in excel, mainloop() even make it non-reponsible\n rt.quit()\n # rt.mainloop()\n # rt.destroy()\n return rc", "title": "" }, { "docid": "852054d9e587cd4a69221711841be197", "score": "0.6048015", "text": "def askExit(self):\n # Open dialog box\n exitDialog = ExitDialog()\n response = exitDialog.exec_()\n if (response == QDialog.Accepted):\n return 1\n else:\n return 0", "title": "" }, { "docid": "17e216c590847213265be91d5e69d978", "score": "0.604232", "text": "def create():\n global dialog\n if dialog is None:\n dialog = NameIt()\n dialog.show()", "title": "" }, { "docid": "7d75e0fb3c4820480a265a5a46b83536", "score": "0.6025634", "text": "def cmd_cancel(self):\n self.dismiss()", "title": "" }, { "docid": "7d75e0fb3c4820480a265a5a46b83536", "score": "0.6025634", "text": "def cmd_cancel(self):\n self.dismiss()", "title": "" }, { "docid": "c47bd01cbea4ce28a293897803a42615", "score": "0.6006495", "text": "def input(message=''):\n from javax.swing import JOptionPane\n return JOptionPane.showInputDialog(frame, message)", "title": "" }, { "docid": "3af5388aa93e5b655522b6242fd6e55c", "score": "0.6004759", "text": "def runAskLeoIDDialog(self):\n d = leoSwingDialog.swingAskLeoID()\n return d.run(modal=True)", "title": "" }, { "docid": "664bac8f8095933b2f8c7702d49cd953", "score": "0.59922427", "text": "def comboFunc(self):\n if messagebox.askyesno(\"Confirmation\", \"Confirm selection?\"):\n self.quit()\n else:\n return", "title": "" }, { "docid": "d44c90d6e9fe23a888ce1c4589f296d9", "score": "0.5989686", "text": "def message_box(self, msg_code, ex=None):\r\n icon_information = 1\r\n icon_warning = 2\r\n icon_error = 3\r\n icon_question = 4\r\n if msg_code == \"no_lights\":\r\n text = \"No lights selected.\"\r\n icon = icon_warning\r\n title = \"Light selection\"\r\n elif msg_code == \"file_save_error\":\r\n text = \"Error saving %s.\" % ex\r\n icon = icon_warning\r\n title = \"File error\"\r\n elif msg_code == \"file_open_error\":\r\n text = \"Error opening %s.\" % ex\r\n icon = icon_warning\r\n title = \"File error\"\r\n elif msg_code == \"file_saved\":\r\n text = (\"%s saved successfully.\" % ex)\r\n icon = icon_information\r\n title = \"File saved\"\r\n else:\r\n text = \"Unknown error.\"\r\n icon = icon_error\r\n title = \"Error\"\r\n self.msgBox.setText(text)\r\n self.msgBox.setIcon(icon)\r\n self.msgBox.setWindowTitle(title)\r\n self.msgBox.setDefaultButton(QtWidgets.QMessageBox.Ok)\r\n self.msgBox.exec_()", "title": "" }, { "docid": "0a95e84331cd1396ae7b87bcbc4eca2f", "score": "0.59870464", "text": "def question_enregistrer():\r\n msg = QMessageBox()\r\n msg.setText(\"Veux-tu enregistrer avant de quitter ?\\n\\\r\n Tu pourrais le regretter !\\n\\\r\n (Pour la prochaine fois Ctrl+S)\")\r\n msg.setStandardButtons(QMessageBox.Yes | QMessageBox.No)\r\n choix = msg.exec()\r\n return choix", "title": "" }, { "docid": "e4b49fd6381b223131ee4ae9b3377d08", "score": "0.5968927", "text": "def onOkButtonClick(self, event):\n self.EndModal(wx.ID_OK)\n event.Skip()", "title": "" }, { "docid": "7630ea3af59e83654c0fb97f21d4b2e4", "score": "0.5958267", "text": "def confirmBox(title, message, yes=\"Yes\", no=\"No\", *moreButtons, **kwargs):\n\n default = kwargs.get(\"db\", kwargs.get(\"defaultButton\")) or yes\n\n ret = confirmDialog(t=title, m=message, b=[yes,no] + list(moreButtons),\n db=default,\n ma=\"center\", cb=no, ds=no)\n if moreButtons:\n return ret\n else:\n return (ret==yes)", "title": "" }, { "docid": "db5567e5018e3fef33af16b73ed5adb5", "score": "0.5949712", "text": "def path_dialog(whatyouwant):\n import Tkinter\n root = Tkinter.Tk()\n root.withdraw()\n\n opt = {}\n opt['parent'] = root\n opt['initialdir'] = './'\n\n if whatyouwant == 'folder':\n from tkFileDialog import askdirectory\n ask_fun = askdirectory\n # dirpath will be to dir that user IS IN when they click confirm\n opt['title'] = 'Please select your experiment directory (be IN this folder)'\n\n if whatyouwant == 'file':\n from tkFileDialog import askopenfilename\n ask_fun = askopenfilename\n opt['title'] = 'Select psd file to detect peaks from'\n opt['filetypes'] = (('CSV files', '*.csv'), ('All files', '*.*'))\n\n path = ask_fun(**opt)\n return path", "title": "" }, { "docid": "1c8927c2e06b644c8260db2683d3ad7f", "score": "0.5941518", "text": "def __show_input_text_dialog_logic(self, text_dialog, ok_button_text=\"OK\", cancel_button_text=\"Cancel\"):\n # type: (str, str, str) -> str\n try:\n self._proxyALTabletService.showInputTextDialog(text_dialog, ok_button_text, cancel_button_text)\n\n self._signal_id = self._proxyALTabletService.onInputText.connect(self.__callback_on_input_text)\n while not self._input_received:\n sleep(0)\n\n self._proxyALTabletService.onInputText.disconnect(self._signal_id)\n except Exception, e:\n print \"Error occurred: \", e\n\n return self.__get_result_and_reset()", "title": "" }, { "docid": "99faa9a168d451b49f968df98b23dcd2", "score": "0.59354126", "text": "def show_input_text_dialog_blocking(self, text_dialog,\n ok_button_text=\"OK\",\n cancel_button_text=\"Cancel\",\n timeout=None):\n # type: (str, str, str, int) -> str\n return self.__show_input_text_dialog_logic(text_dialog, ok_button_text, cancel_button_text)\n # future = self.show_input_text_dialog_concurrently(text_dialog, ok_button_text, cancel_button_text)\n # return future.result(timeout)", "title": "" }, { "docid": "d1fd8c83a9e26a87d95b69e02025e33b", "score": "0.59290135", "text": "def path_dialog_no_p(whatyouwant):\n import Tkinter\n root = Tkinter.Tk()\n root.withdraw()\n\n opt = {}\n opt['parent'] = root\n opt['initialdir'] = './'\n\n if whatyouwant == 'folder':\n from tkFileDialog import askdirectory\n ask_fun = askdirectory\n # dirpath will be to dir that user IS IN when they click confirm\n opt['title'] = 'Please select your experiment directory (be IN this folder)'\n\n if whatyouwant == 'file':\n from tkFileDialog import askopenfilename\n ask_fun = askopenfilename\n opt['title'] = 'Select psd file to detect peaks from'\n opt['filetypes'] = (('CSV files', '*.csv'), ('All files', '*.*'))\n\n path = ask_fun(**opt)\n return path", "title": "" }, { "docid": "b8e1c58c27a6bb244c8f60a3cff988bb", "score": "0.59277385", "text": "def _on_cancel_button(self)->None:\n self._dialog_window.destroy()", "title": "" }, { "docid": "c3d5ff0eb5be4d228da021e6b6c4ac89", "score": "0.591085", "text": "def force_prompt(prompt):\n # pass global var\n icon = ICON\n # test to see what icons are available on the file system\n if not os.path.exists(icon):\n # default fail over icon in case our custom one does not exist\n icon = \"/System/Library/CoreServices/Problem Reporter.app/Contents/Resources/ProblemReporter.icns\"\n # build the jamf helper unix command in a list\n cmd = [\n \"/Library/Application Support/JAMF/bin/jamfHelper.app/Contents/MacOS/jamfHelper\",\n \"-windowType\",\n \"utility\",\n \"-title\",\n \"Quit Applications\",\n \"-description\",\n prompt,\n \"-icon\",\n icon,\n \"-button1\",\n \"Restart\",\n \"-defaultbutton\",\n \"1\",\n \"-timeout\",\n \"60\",\n \"-countdown\",\n \"-alignCountdown\",\n \"center\",\n ]\n # call the command via subprocess\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n # get stdout and stderr\n out, err = proc.communicate()\n # check for exit status for button clicked, 0 = OK 2 = Cancel\n if proc.returncode == 0:\n # user clicked OK\n return True\n if proc.returncode == 2:\n # user clicked cancel\n return False\n # if there is any other return print it\n else:\n print(\"Error: %s\" % err)", "title": "" }, { "docid": "8cf7a2ecdbbd934fbd5a22efb300b1da", "score": "0.59087366", "text": "def rtk_information(prompt, _parent=None):\r\n\r\n _dialog = gtk.MessageDialog(_parent, gtk.DIALOG_DESTROY_WITH_PARENT,\r\n gtk.MESSAGE_INFO, gtk.BUTTONS_OK,\r\n message_format=prompt)\r\n _dialog.set_markup(prompt)\r\n\r\n _dialog.run()\r\n _dialog.destroy()", "title": "" }, { "docid": "6438f5b80d90d46ecbbcecd23ceba2e6", "score": "0.5907847", "text": "def run(self):\n #reloadPlugin('Tilgjengelighet')\n\n self.dlg.show()\n\n result = self.dlg.exec_()\n # See if OK was pressed\n if result:\n # Do something useful here - delete the line containing pass and\n # substitute with your code.\n pass", "title": "" }, { "docid": "24b09c31aa86e6e313c252b69057efd6", "score": "0.5899983", "text": "def runAskLeoIDDialog(self):\n d = leoTkinterDialog.tkinterAskLeoID()\n return d.run(modal=True)", "title": "" }, { "docid": "023afc1e672cd9633b7cf1d757169308", "score": "0.5895213", "text": "def yesButton(self):\n\n self.answer=self.yesMessage.lower()\n self.top.destroy()", "title": "" }, { "docid": "2a1d3008a8bdb9a6c836e7bd6efdbdd3", "score": "0.5895091", "text": "def create_messageBox(title, text):\n message = QMessageBox()\n message.setText(text)\n message.setWindowTitle(title)\n message.exec()", "title": "" }, { "docid": "9134b16c860c4b06e67bb41022e1af53", "score": "0.58927613", "text": "def OnQuit(self, e):\n dial = wx.MessageDialog(None, 'Are you sure to quit?', 'OpenSource Editor/Converter',\n wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)\n ret = dial.ShowModal()\n if ret == wx.ID_YES:\n self.Destroy()\n else:\n e.Veto()", "title": "" }, { "docid": "ec66b62e762f11a32ab81ced9cf8f7ca", "score": "0.5891222", "text": "def closeEvent(self, event):\n if self.okPressed:\n event.accept()\n else:\n mb = QtWidgets.QMessageBox\n reply = mb.question(self, 'Message', \"Do you want to quit?\",\n mb.Yes, mb.No)\n if reply == mb.Yes:\n self.user.setText(\"\")\n self.batch.setText(\"\")\n self.grp.setText(\"\")\n event.accept()\n else:\n event.ignore()", "title": "" }, { "docid": "3204a266a39b21a6b93555e190c0029e", "score": "0.58781826", "text": "def cancel_button_clicked():\n self.dialog_add_strategy.destroy()", "title": "" }, { "docid": "ddd4753d9139f778334b622b2785e8ec", "score": "0.5873595", "text": "def confirm_exit():\n return sg.popup_ok_cancel(\n 'Are you sure you want to exit?',\n title='Unsaved changes',\n button_color=EDITOR_BUTTON_COLOR,\n font=EDITOR_FONT,\n grab_anywhere=True,\n )", "title": "" }, { "docid": "bf3e13b0d6916274d5c89d147a7f00c3", "score": "0.58676404", "text": "def on_ok_button_clicked(self, obj):\n\t\t\n\t\t_lay = self.get_selected_layouts()\n\t\t_loc = self.get_selected_locale()\n\t\t_var = self.get_selected_variant()\n\t\t_mod = self.get_selected_model()\n\t\t_tim = self.get_selected_timezone()\n\t\t\n\t\t# Set to None unchanged things\n\t\tif locale.default == _loc: _loc = None\n\t\tif keyboard.default_layout == _lay: _lay = None\n\t\tif keyboard.default_variant == _var: _var = None\n\t\tif keyboard.default_model == _mod: _mod = None\n\t\tif timezone.default == _tim: _tim = None\n\t\t\n\t\t# Show box and make the notebook unsensitive\n\t\tself.progress_box.show()\n\t\tself.notebook.set_sensitive(False)\n\t\t\n\t\tself.cancel_button.set_sensitive(False)\n\t\tself.ok_button.set_sensitive(False)\n\t\t\n\t\t# Load Apply class, then run it\n\t\tclss = Apply(self, lay=_lay, loc=_loc, var=_var, mod=_mod, tim=_tim, savespace=self.savespace_activated, savespacepurge=self.purge_older_translations)\n\t\tclss.start()", "title": "" }, { "docid": "3e85ff52046dc78972486bd42271f8c4", "score": "0.58584327", "text": "def MessageBox(message, buttons=0, title=\"\"):\n buttontype = buttons & 0x00000007 #111 in binary\n btn = System.Windows.Forms.MessageBoxButtons.OK\n if buttontype==1: btn = System.Windows.Forms.MessageBoxButtons.OKCancel\n elif buttontype==2: btn = System.Windows.Forms.MessageBoxButtons.AbortRetryIgnore\n elif buttontype==3: btn = System.Windows.Forms.MessageBoxButtons.YesNoCancel\n elif buttontype==4: btn = System.Windows.Forms.MessageBoxButtons.YesNo\n elif buttontype==5: btn = System.Windows.Forms.MessageBoxButtons.RetryCancel\n \n icontype = buttons & 0x00000070\n icon = System.Windows.Forms.MessageBoxIcon.None\n if icontype==16: icon = System.Windows.Forms.MessageBoxIcon.Error\n elif icontype==32: icon = System.Windows.Forms.MessageBoxIcon.Question\n elif icontype==48: icon = System.Windows.Forms.MessageBoxIcon.Warning\n elif icontype==64: icon = System.Windows.Forms.MessageBoxIcon.Information\n \n defbtntype = buttons & 0x00000300\n defbtn = System.Windows.Forms.MessageBoxDefaultButton.Button1\n if defbtntype==256:\n defbtn = System.Windows.Forms.MessageBoxDefaultButton.Button2\n elif defbtntype==512:\n defbtn = System.Windows.Forms.MessageBoxDefaultButton.Button3\n if not isinstance(message, str): message = str(message)\n dlg_result = Rhino.UI.Dialogs.ShowMessageBox(message, title, btn, icon, defbtn)\n if dlg_result==System.Windows.Forms.DialogResult.OK: return 1\n if dlg_result==System.Windows.Forms.DialogResult.Cancel: return 2\n if dlg_result==System.Windows.Forms.DialogResult.Abort: return 3\n if dlg_result==System.Windows.Forms.DialogResult.Retry: return 4\n if dlg_result==System.Windows.Forms.DialogResult.Ignore: return 5\n if dlg_result==System.Windows.Forms.DialogResult.Yes: return 6\n if dlg_result==System.Windows.Forms.DialogResult.No: return 7", "title": "" }, { "docid": "3f705f4f3b46c68c5cf2c95d5334f137", "score": "0.5839744", "text": "def cancel_button_clicked():\n self.dialog_edit_strategy.destroy()", "title": "" }, { "docid": "802d95f18f6c1953ae7991b1e731cebe", "score": "0.58304226", "text": "def open_nb_clues_message_box(self):\n if self.app.game.confirm_erase():\n self.create()\n NbCluesMessageBox(self.app)", "title": "" }, { "docid": "ef65af12ecdebaa97bce45b0d7b4df2e", "score": "0.5826975", "text": "def confirm(self) -> bool:\n self.debug.log(\"Confirmation window created\")\n popup = sg.Window(self.title, [\n [sg.Text(\"\")],\n [sg.Text(self.message), ],\n [sg.Text(\"\")],\n [sg.Button(\"YES\", key=\"YES\", button_color='green'), sg.Button(\"NO\", key=\"NO\", button_color='red')],\n ],\n icon=images.ma_logo_png, )\n while True:\n event, values = popup.read()\n if event == \"OK\" or event == \"NO\" or event == sg.WIN_CLOSED:\n self.debug.log(\"User clicked NO\")\n popup.close()\n return False\n if event == \"YES\":\n self.debug.log(\"User clicked YES\")\n popup.close()\n return True", "title": "" }, { "docid": "724acab0cf2510cca384fce236d73e0c", "score": "0.58213466", "text": "def cancelButton(self):\n\n self.answer=\"cancel\"\n self.top.destroy()", "title": "" }, { "docid": "a79540f955ff13c65b2d4cba53fbe60f", "score": "0.58035654", "text": "def msgbox(self, message_text, title=None, buttons=None, icon=None):\n mymessage = QMessageBox(self.window)\n mymessage.setWindowModality(Qt.WindowModal)\n\n if title:\n mymessage.setWindowTitle(title)\n mymessage.setText(message_text)\n\n if buttons:\n # self.pprint('Buttons')\n for key in buttons.keys():\n _btn = mymessage.addButton(key, QMessageBox.ActionRole)\n _btn.clicked.connect(buttons[key])\n\n mymessage.exec_()\n clicked = mymessage.clickedButton()\n return clicked.text()", "title": "" }, { "docid": "09df0912375edf59a42bea34945e8b9c", "score": "0.5802119", "text": "def questionBox(title='', text='', info='', detail='', buttons_def=''):\r\n\r\n qmbox = QtGui.QMessageBox()\r\n qmbox.setIcon(QtGui.QMessageBox.Question)\r\n qmbox.setWindowTitle(title)\r\n qmbox.setText(text)\r\n if info:\r\n qmbox.setInformativeText(info)\r\n if detail:\r\n qmbox.setDetailedText(detail)\r\n qmbox.setDefaultButton(QtGui.QMessageBox.NoButton)\r\n\r\n buttons = {}\r\n for name, (text, role) in buttons_def.items():\r\n buttons[name] = qmbox.addButton(text, role)\r\n\r\n qmbox.exec_()\r\n\r\n # Find out which button has been returned\r\n for name in buttons.keys():\r\n if buttons[name] == qmbox.clickedButton():\r\n return name\r\n return", "title": "" }, { "docid": "eaa20e27aa307577e2945384ea193d53", "score": "0.57988626", "text": "def confirm(self, prompt, destructive=True):\n message = self._format(prompt, '?')\n if destructive:\n message = click.style(message, fg='red', bold=True)\n\n return click.confirm(message)", "title": "" }, { "docid": "f9b4ef7db7890730abb63e1cab18e870", "score": "0.5795238", "text": "def prompt_boolean(text, title=\"Notice\", true_text=\"Yes\", false_text=\"No\", width = None, height = None, font=\"Roboto_Regular12\", cb=None):\n\tglobal wait_for_interrupt, button_pushed, __cb\n\t\n\t__cb = cb\n\n\tif width == None:\n\t\twidth = display.width()\n\tif height == None:\n\t\theight = display.height()\n\n\tx = (display.width() - width) // 2\n\ty = (display.height() - height) // 2\n\tif (x < 0):\n\t\tx = 0\n\tif (y < 0):\n\t\ty = 0\n\t\t \n\tdisplay.drawFill(0xFFFFFF)\n\tdisplay.drawRect(0, 0, display.width(), 14, True, 0)\n\tdisplay.drawText(0, 0, title, 0xFFFFFF, \"Roboto_Regular12\")\n\n\tif false_text:\n\t\tfalse_text = \"B: \" + false_text\n\ttrue_text = \"A: \" + true_text\n\t\n\tdisplay.drawText(0, 36, ugfx.wordWrap(text, None, font), 0x000000, font)\n\t\n\tif false_text:\n\t\t_button(10, height-display.getTextHeight(false_text, font), false_text, 0x000000, font)\n\t\t_button((width - display.getTextWidth(true_text, font) - 10), height - display.getTextHeight(true_text, font), true_text, 0x000000, font)\n\telse:\n\t\t_button(width - 10 - display.getTextWidth(true_text, font), height - display.getTextHeight(true_text, font), true_text, 0x000000, font)\n\n\tdisplay.flush()\n\n\tif cb:\n\t\tugfx.input_attach(ugfx.BTN_A, __asyncSuccess)\n\t\tugfx.input_attach(ugfx.BTN_B, __asyncCancel)\n\t\t#Done :-)\n\telse:\n\t\tugfx.input_attach(buttons.BTN_A, None)\n\t\tugfx.input_attach(buttons.BTN_B, None)\n\t\t\n\t\twhile True:\n\t\t\tif buttons.value(buttons.BTN_A):\n\t\t\t\tdisplay.drawFill(0xFFFFFF)\n\t\t\t\tdisplay.flush()\n\t\t\t\twhile buttons.value(buttons.BTN_A) or buttons.value(buttons.BTN_B):\n\t\t\t\t\ttime.sleep(0.1)\n\t\t\t\treturn True\n\t\t\tif buttons.value(buttons.BTN_B):\n\t\t\t\tdisplay.drawFill(0xFFFFFF)\n\t\t\t\tdisplay.flush()\n\t\t\t\twhile buttons.value(buttons.BTN_A) or buttons.value(buttons.BTN_B):\n\t\t\t\t\ttime.sleep(0.1)\n\t\t\t\treturn False", "title": "" }, { "docid": "3afe203d8cffd6c7994a504b576bfe11", "score": "0.5794453", "text": "def on_ok(self, keypress=None):\n self.on_cancel()", "title": "" }, { "docid": "9df1fa0a9050d73885346fa276d0799a", "score": "0.577538", "text": "def exit_prompt(self):\n if messagebox.askyesno(\n self._(\"box.title.quit\"),\n self._(\"box.text.quit\")\n ):\n self.parent.destroy()", "title": "" }, { "docid": "2754d9ec4a0c89a8b367184338e7f6ab", "score": "0.57530195", "text": "def __updateOK(self):\n enable = self.badEdit.text() != \"\"\n self.okButton.setEnabled(enable)", "title": "" }, { "docid": "1856fc4959e14ac393fc14dc02098ff7", "score": "0.57514936", "text": "def error_message(text):\n error = QtWidgets.QMessageBox()\n error.setIcon(QtWidgets.QMessageBox.Critical)\n error.setText(text)\n error.setWindowTitle('Ошибка!')\n error.setStandardButtons(QtWidgets.QMessageBox.Ok)\n error.exec_()", "title": "" }, { "docid": "00a6e2fea1c0e8feefff99ad5e6a225c", "score": "0.5749108", "text": "def exit_confirmation(self, i):\n\n if i.text() == '&Yes':\n self.close()", "title": "" }, { "docid": "73251c69fb2335e2e2da8c0d5701941a", "score": "0.5747827", "text": "def menunode_confirm_name(caller, raw_string, **kwargs):\n char = caller.new_char\n\n # since we reserved the name by assigning it, you can reference the character key\n # if you have any extra validation or normalization that changed the player's input\n # this also serves to show the player exactly what name they'll get\n text = f\"|w{char.key}|n is available! Confirm?\"\n # let players change their mind and go back to the name choice, if they want\n options = [\n {\"key\": (\"Yes\", \"y\"), \"goto\": \"menunode_end\"},\n {\"key\": (\"No\", \"n\"), \"goto\": \"menunode_choose_name\"},\n ]\n return text, options", "title": "" }, { "docid": "5bd0f7fab6f62ca5a7cd732424843931", "score": "0.5724911", "text": "def cancel(self):\n self.__askedToCancel = True\n self.setDialogMessage(\"Canceled. Waiting for current task to finish...\")", "title": "" }, { "docid": "38b29fb9cac61d27e950792c141631c2", "score": "0.5721575", "text": "def choose_yes_or_no(message):\n bc = ButtonChooser(Key('Y'), Key('N'))\n (_, button, _) = Text(message).present(bc=bc)\n return button == Key('Y')", "title": "" }, { "docid": "9ea78734e68f80752cd6fecb3afcf10a", "score": "0.5712638", "text": "def show_msg(self,msg):\n msg_box = QMessageBox(self.ui)\n msg_box.setText(\"No reference selected. Please select one from the list.\")\n msg_box.show()", "title": "" }, { "docid": "93e9483dc33a7dbfbba4f9d9af474463", "score": "0.5703653", "text": "def OnMsgBtn(self, event=None):\n dlg = wx.MessageDialog(self,\n message='A completely useless message',\n caption='A Message Box',\n style=wx.OK|wx.ICON_INFORMATION\n )\n dlg.ShowModal()\n dlg.Destroy()", "title": "" }, { "docid": "26cf9dd7cca7477efe5572457ae112a6", "score": "0.57023805", "text": "def popupBox(self, text):\n # Initialize widgets + layouts\n self.popupTest1 = QDialog(None, Qt.SplashScreen)\n self.popupTest1.OKButton = QPushButton(\"OK\")\n self.popupTest1.mainText = QLabel(text)\n self.popupTest1.mainLayout = QVBoxLayout()\n self.popupTest1.OKButton.clicked.connect(self.popupTest1.close)\n\n # Customize dialog box appearance (style sheet)\n self.boxStyle = \"\"\"QDialog{\n border: 1px solid gray;\n border-radius: 5px;\n }\"\"\"\n self.popupTest1.setStyleSheet(self.boxStyle)\n\n # Populate layouts\n self.popupTest1.mainLayout.addWidget(self.popupTest1.mainText, alignment = Qt.AlignCenter)\n self.popupTest1.mainLayout.addWidget(self.popupTest1.OKButton, alignment = Qt.AlignCenter)\n self.popupTest1.setLayout(self.popupTest1.mainLayout)\n\n self.popupTest1.exec_()", "title": "" }, { "docid": "af462e7302c6c23b8bca310522485924", "score": "0.568617", "text": "def message_box_warning(title, message, optional=False):\n from avalon.vendor.Qt import QtWidgets\n\n opt_btn = QtWidgets.QMessageBox.NoButton\n if optional:\n opt_btn = QtWidgets.QMessageBox.Cancel\n\n respond = QtWidgets.QMessageBox.warning(None,\n title,\n message,\n QtWidgets.QMessageBox.Ok,\n opt_btn)\n if optional:\n return respond == QtWidgets.QMessageBox.Ok", "title": "" }, { "docid": "ade76365785832bcb3890b0068b59c5d", "score": "0.56819016", "text": "def run_quit_dialog(self, procs, tab):\n # Stop an open \"close tab\" dialog from obstructing a quit\n if self.prompt_dialog is not None:\n self.prompt_dialog.destroy()\n self.prompt_dialog = PromptQuitDialog(self.window, procs, tab)\n response = self.prompt_dialog.run() == gtk.RESPONSE_YES\n self.prompt_dialog.destroy()\n self.prompt_dialog = None\n # Keep Guake focussed after dismissing tab-close prompt\n if tab == -1:\n self.window.present()\n return response", "title": "" } ]
a5910b8a2b1611b02a3d90d9189dc480
Test valid json file of a single dict can be used to update value of existing object in database. First modify one author's url and then recover it.
[ { "docid": "0ea34ceb45587c36eb03dc85ca739ece", "score": "0.8090758", "text": "def test_valid_one_dict_update(self):\n path = JSON_PATH + \"legal_one_author.json\"\n _, author_db = connect_to_mongo()\n with open(path, \"r+\") as file:\n author_dic = json.load(file)\n\n author_id = author_dic[\"_id\"]\n true_author_url = author_dic[\"author_url\"]\n query_key = {\"_id\": author_id}\n update_val = {\"$set\": {\"author_url\": \"duckduckgo.com\"}}\n self.assertTrue(list(author_db.find(query_key)) != [])\n\n # modify author url to duckduckgo\n author_db.update_one(query_key, update_val)\n author_url_modified = author_db.find_one(query_key)[\"author_url\"]\n self.assertEqual(author_url_modified, \"duckduckgo.com\")\n\n # recover author url\n insert_into_db(path, db_type=\"author\")\n author_url_recovered = author_db.find_one(query_key)[\"author_url\"]\n self.assertEqual(author_url_recovered, true_author_url)", "title": "" } ]
[ { "docid": "0bc629ea2a9577bca28d1265639fd4c9", "score": "0.6717318", "text": "def test_malformatted_json(self):\n path = JSON_PATH + \"malformed_author.json\"\n try:\n insert_into_db(path, db_type=\"author\")\n self.assertTrue(False) # An error should be thrown\n except:\n self.assertTrue(True)", "title": "" }, { "docid": "8107b67f75f6cd3f4530296773fe0b05", "score": "0.6704165", "text": "def test_valid_single_dict_create(self):\n path = JSON_PATH + \"legal_one_author.json\"\n _, author_db = connect_to_mongo()\n with open(path, \"r+\") as file:\n author_dic = json.load(file)\n\n author_id = author_dic[\"_id\"]\n self.assertTrue(list(author_db.find({\"_id\": author_id})) != [])\n author_db.delete_one({\"_id\": author_id})\n self.assertTrue(list(author_db.find({\"_id\": author_id})) == [])\n\n insert_into_db(path, db_type=\"author\")\n self.assertTrue(list(author_db.find({\"_id\": author_id})) != [])", "title": "" }, { "docid": "956638a67d432611092ba846793a36ee", "score": "0.6556344", "text": "def test_valid_many_dict_update(self):\n path = JSON_PATH + \"legal_many_books.json\"\n book_db, _ = connect_to_mongo()\n with open(path, \"r+\") as file:\n book_dics = json.load(file)\n\n true_book_urls = []\n # first modify all target book_urls in db\n for book_dic in book_dics:\n book_id = book_dic[\"_id\"]\n true_book_urls.append(book_dic[\"book_url\"])\n query_key = {\"_id\": book_id}\n update_val = {\"$set\": {\"book_url\": \"duckduckgo.com\"}}\n book_db.update_one(query_key, update_val)\n self.assertTrue(list(book_db.find(query_key)) != [])\n\n # recover book url\n insert_into_db(path, db_type=\"book\")\n for i, book_dic in enumerate(book_dics):\n book_id = book_dic[\"_id\"]\n true_url = true_book_urls[i]\n query_key = {\"_id\": book_id}\n self.assertTrue(book_db.find_one(query_key)[\"book_url\"], true_url)", "title": "" }, { "docid": "80e45f8e06e1a74fc59277250dbbb841", "score": "0.651416", "text": "def test_update(self):\n original_data = {\"id\": 5, \"name\": \"John\", \"age\": 33, \"instrument\": \"guitar\",\n \"skillLevel\": \"advanced\", \"lessonPlace\": \"home\", \"lessonDuration\": \"60 mins\"}\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n list_response = sorted(response.json(), key=operator.itemgetter('id'))\n dict_data = list_response[-1]\n self.assertEqual(original_data, dict_data)\n new_data = {\"age\": 35, \"lessonPlace\": \"online\"}\n original_data.update(new_data)\n response = self.client.put(self.url + '5/', data=json.dumps(new_data), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n response = self.client.get(self.url)\n list_response = sorted(response.json(), key=operator.itemgetter('id'))\n dict_data = list_response[-1]\n self.assertEqual(original_data, dict_data)", "title": "" }, { "docid": "9115b4e36d878c932579b0599a166778", "score": "0.6409345", "text": "def test_simple_replacement():\n json = dict(\n key='value',\n object=dict(\n type='material_run',\n uids={'my_id': '1', 'id': '17'}\n )\n )\n replaced_json = replace_objects_with_links(json)\n assert replaced_json == {'key': 'value',\n 'object': {'type': 'link_by_uid', 'scope': 'id', 'id': '17'}}", "title": "" }, { "docid": "66ec554d4830abd4e26cf1064b023ee0", "score": "0.63263977", "text": "def test_json_field_load(self):\n\n json_obj_1 = {'a': 1, 'b': 2}\n\n obj = JsonModel.objects.create(json=json_obj_1)\n\n new_obj = JsonModel.objects.get(id=obj.id)\n\n self.failUnlessEqual(new_obj.json, json_obj_1)", "title": "" }, { "docid": "3ae238e4931cd57f5e490ca3df4ba264", "score": "0.6294463", "text": "def update_json_file(self):", "title": "" }, { "docid": "460ec2e11826fdb1948f3f92495e2365", "score": "0.6278214", "text": "def test_put_asset_makes_the_changes():\n\n asset_to_edit = pages[1]\n\n query_url = paths.get_resource_path(ASSET, asset_to_edit[NAME])\n\n \"\"\" Get the data first \"\"\"\n r = requests.get(query_url, verify=False, auth=credentials)\n data = load_json(r)\n\n assert data[DESCRIPTION] == asset_to_edit[DESCRIPTION]\n\n updated_data = {\n NAME: asset_to_edit[NAME],\n DESCRIPTION: \"This should be seen as a description after the update.\",\n CONNECTED_TO: asset_to_edit[CONNECTED_TO],\n TAGS: asset_to_edit[TAGS]\n }\n\n r = requests.put(query_url, verify=False, auth=credentials, json=updated_data)\n\n print(r.content)\n\n retrieved_data = r.json()\n assert r.status_code == 200\n\n print(\"{} {}\".format(type(retrieved_data),retrieved_data))\n print(\"{} {}\".format(type(updated_data), updated_data))\n\n assert retrieved_data[NAME] == updated_data[NAME]\n assert retrieved_data[DESCRIPTION] == updated_data[DESCRIPTION]\n assert retrieved_data[CONNECTED_TO] == updated_data[CONNECTED_TO]\n assert retrieved_data[TAGS] == updated_data[TAGS]\n\n \"\"\" After the put the page should show the edited text\n and the edited data should have been returned by the\n put request.\n \"\"\"\n\n r = requests.get(query_url, verify=False, auth=credentials, data=updated_data)\n\n received_data = r.json()\n\n # verify that the updated asset returns same asset\n assert updated_data[NAME] == asset_to_edit[NAME]\n\n # verify that the updated description can be found\n # from the rendered html page\n # assert bytes(updated_data['description'], r.encoding) in r.content\n\n # verify that the received data matches sent data.\n assert received_data[DESCRIPTION] == updated_data[DESCRIPTION]", "title": "" }, { "docid": "5df1b3a4936f1d644c190e7b535b473d", "score": "0.615172", "text": "def test_json_field_modify(self):\n\n json_obj_1 = {'a': 1, 'b': 2}\n json_obj_2 = {'a': 3, 'b': 4}\n\n obj = JsonModel.objects.create(json=json_obj_1)\n\n self.failUnlessEqual(obj.json, json_obj_1)\n\n obj.json = json_obj_2\n\n self.failUnlessEqual(obj.json, json_obj_2)\n\n obj.save()\n\n self.failUnlessEqual(obj.json, json_obj_2)\n\n self.assert_(obj)", "title": "" }, { "docid": "9116f2bbc7a71065b54c0fc4f80c873a", "score": "0.6133538", "text": "def test_json_field_load(self):\r\n json_obj_1 = {'a': 1, 'b': 2}\r\n obj = self.json_model.objects.create(json=json_obj_1)\r\n new_obj = self.json_model.objects.get(id=obj.id)\r\n\r\n self.assertEqual(new_obj.json, json_obj_1)", "title": "" }, { "docid": "7be97f37bacf773fa7b47bbd83d89378", "score": "0.61077017", "text": "def test_api_can_update_entry(self):\n\t\tentry = Entry.objects.get()\n\t\tchange_entry = {'weight': 123,'calories':2112,'date':'2001-12-12'}\n\t\tres = self.client.put(\n reverse('details', kwargs={'pk': entry.id}),\n change_entry, format='json'\n )\n\t\tself.assertEqual(res.status_code, status.HTTP_200_OK)", "title": "" }, { "docid": "167bb81f78b83c97954a035504d8afcc", "score": "0.6066595", "text": "def post_one_author_info():\n # new_dict = json.loads(request.data)\n if args.infile is None:\n new_dict = json.loads(request.data)\n else:\n new_dict = json.load(args.infile[0])\n try:\n res = post_helper(False, new_dict)\n if res is None:\n return response_helper(\"cannot post author\", 500)\n print(\"success\")\n return response_helper(res, 200)\n except Exception:\n return response_helper(\"internal error\", 500)", "title": "" }, { "docid": "f33bb92881899f91cf94c0d702f1a9c7", "score": "0.60658985", "text": "def test_update_object(self):\n bad_url = \"/broken/internal/link\"\n good_url = \"/public/\"\n author = Author.objects.create(name=\"John Smith\", website=bad_url)\n self.assertEqual(\n Link.objects.filter(ignore=False, url__status=False).count(),\n 1\n )\n self.assertEqual(\n Link.objects.filter(ignore=False, url__status=True).count(),\n 0\n )\n self.assertEqual(Url.objects.all().count(), 1)\n self.assertEqual(Url.objects.all()[0].url, bad_url)\n # Fix the link\n author.website = good_url\n author.save()\n self.assertEqual(\n Link.objects.filter(ignore=False, url__status=False).count(),\n 0\n )\n self.assertEqual(\n Link.objects.filter(ignore=False, url__status=True).count(),\n 1\n )\n self.assertEqual(Url.objects.all().count(), 1)\n self.assertEqual(Url.objects.all()[0].url, good_url)", "title": "" }, { "docid": "544885975827dc371435d1df4286e6d2", "score": "0.60267025", "text": "def test_json_field_modify(self):\r\n json_obj_1 = {'a': 1, 'b': 2}\r\n json_obj_2 = {'a': 3, 'b': 4}\r\n\r\n obj = self.json_model.objects.create(json=json_obj_1)\r\n self.assertEqual(obj.json, json_obj_1)\r\n obj.json = json_obj_2\r\n\r\n self.assertEqual(obj.json, json_obj_2)\r\n obj.save()\r\n self.assertEqual(obj.json, json_obj_2)\r\n\r\n self.assertTrue(obj)", "title": "" }, { "docid": "f411c811308271e8252742ebd8d7e874", "score": "0.60092205", "text": "async def test_update_with_json_attrs_bad_json(\n hass: HomeAssistant,\n mqtt_mock_entry: MqttMockHAClientGenerator,\n caplog: pytest.LogCaptureFixture,\n) -> None:\n await help_test_update_with_json_attrs_bad_json(\n hass,\n mqtt_mock_entry,\n caplog,\n light.DOMAIN,\n DEFAULT_CONFIG,\n )", "title": "" }, { "docid": "768ea015edb74e6595bfcb6840b04c20", "score": "0.59918255", "text": "def patch_json(main, url, json_dict):\n access_token = create_access_token('[email protected]')\n headers = {'Authorization': 'Bearer {}'.format(access_token)}\n return main.patch(url, data=json.dumps(json_dict), content_type='application/json', headers=headers)", "title": "" }, { "docid": "c6848f39b75efef8686f409acd97af4e", "score": "0.59723175", "text": "async def test_update_with_json_attrs_bad_json(\n hass: HomeAssistant,\n mqtt_mock_entry: MqttMockHAClientGenerator,\n caplog: pytest.LogCaptureFixture,\n) -> None:\n await help_test_update_with_json_attrs_bad_json(\n hass,\n mqtt_mock_entry,\n caplog,\n fan.DOMAIN,\n DEFAULT_CONFIG,\n )", "title": "" }, { "docid": "cf00154a139e988473243602b59bdf99", "score": "0.5917673", "text": "def test_put_broken_json(self):\n\n resp = req.put(settings.get_base_url()+\"/hello/Basil\", data = '{ dateOfBirth\": \"2014-05-01\" }', headers={'Content-type': 'application/json'})\n assert 400==resp.status_code", "title": "" }, { "docid": "c00f2b87d397f3478f2aab083d158881", "score": "0.5914497", "text": "def test_file(self):\n\n b3 = BaseModel()\n b3.save()\n with open(\"file.json\", \"r\") as f:\n self.assertIn(b3.id, f.read())", "title": "" }, { "docid": "41a70daecade9f12ef811351da5fa26e", "score": "0.59062517", "text": "def check_url(self):\n changed = False\n with open('records.json', 'r') as json_file:\n data = json.load(json_file)\n\n url_from_data = data[self.cat][self.name][self.URL_domain]['info']['url']\n\n if url_from_data == '':\n data[self.cat][self.name][self.URL_domain]['info']['url'] = self.short_url\n changed = True\n elif not self.short_url == url_from_data:\n data[self.cat][self.name][self.URL_domain]['info']['url_2'] = self.short_url\n changed = True\n\n if changed:\n with open('records.json', 'w') as json_file:\n json.dump(data, json_file, indent=2)", "title": "" }, { "docid": "08bba0738175a17f61d90e4ff04fa31e", "score": "0.58934826", "text": "def test_failed_replacement():\n json = dict(object=dict(\n some_field='material_run',\n uids={'my_id': '1', 'id': '17'}\n ))\n assert json == replace_objects_with_links(json) # no type field\n\n json = dict(object=dict(\n type='material_run',\n uids='a uid string'\n ))\n assert json == replace_objects_with_links(json) # uids is not a dictionary\n\n json = dict(object=dict(\n type='material_run',\n some_field={'my_id': '1', 'id': '17'}\n ))\n assert json == replace_objects_with_links(json) # no uids field\n\n json = dict(object=dict(\n type='material_run',\n uids={}\n ))\n assert json == replace_objects_with_links(json) # uids is an empty dictionary", "title": "" }, { "docid": "783be12bd1e1ded3f6bd798d70dec1e6", "score": "0.5893285", "text": "def test_author_missing_key(self):\n path = JSON_PATH + \"book_miss_book_url.json\"\n try:\n insert_into_db(path, db_type=\"book\")\n self.assertTrue(False) # An error should be thrown\n except:\n self.assertTrue(True)", "title": "" }, { "docid": "36a98c81274425e39f164827a6ecec11", "score": "0.5873386", "text": "def update_from_json_file(self, file_path_or_obj):\n return self._update_from_file(file_path_or_obj, json.load)", "title": "" }, { "docid": "107e34b5c09b355c13a79d83bfc9960a", "score": "0.58409196", "text": "def test_requester_put(self):\n co = Requester.objects.all()[0]\n self.api_client.client.login(username=username, password=password)\n co_data = self.deserialize(self.api_client.get(\n self.detail_url % co.pk))\n new_data = co_data.copy()\n new_data['slug'] = 'john'\n\n self.assertEqual(Requester.objects.count(), 2)\n self.assertHttpAccepted(self.api_client.put(\n self.detail_url % co.pk, format='json', data=new_data))\n self.assertEqual(Requester.objects.count(), 2)\n\n self.assertEqual(Requester.objects.get(pk=co.pk).slug, \n 'john')", "title": "" }, { "docid": "ba8ea3c813291dc585e6760b5f1d1d12", "score": "0.5830192", "text": "def __save_to_database(self, json):\n data_id = HashService.num_md5(self.url.url_string)\n self.parser_service.update_item(data_id, json)", "title": "" }, { "docid": "4917347bb94645f897e79fc2dc2fcd4e", "score": "0.57983047", "text": "def test_successful_links_update(self, obj_payload, app):\n obj_id = obj_payload['id']\n updated_obj = self.model.get(obj_id)\n links = updated_obj.links\n assert len(links) == 2\n update_link = links[0]\n\n payload = {\n 'links': [{\n 'id': update_link.id,\n 'type': update_link.type,\n 'url': 'https://url.com',\n 'state': update_link.state\n }]\n }\n request = app.put_json('{base}/{id}'.format(base=self.base_path, id=obj_id),\n payload, headers=self.headers, status=200)\n result = request.json\n assert 'application/json' == request.content_type\n\n assert 'links' in result.keys()\n obj_results = result.get('links')\n assert len(obj_results) == 1\n obj_result = obj_results[0]\n result_keys = list(obj_result.keys())\n result_values = list(obj_result.values())\n\n for key, value in payload.get('links')[0].items():\n assert key in result_keys\n assert str(value) in result_values\n\n updated_obj = self.model.get(obj_id)\n assert updated_obj.links is not None\n assert len(updated_obj.links) == 1", "title": "" }, { "docid": "6dc0eb96dc992c05f2450b7cdd9ba30b", "score": "0.5787787", "text": "def test_save(self):\n\n self.auto_continue.database = self.our_dataset.copy()\n self.auto_continue.save()\n\n expected = True\n actual = PyFunceble.helpers.File(self.storage_file).exists()\n\n self.assertEqual(expected, actual)\n\n expected = self.our_dataset.copy()\n actual = PyFunceble.helpers.Dict().from_json_file(self.storage_file)\n\n self.assertEqual(expected, actual)", "title": "" }, { "docid": "1109cf72a938fc3bab773b908dcdb384", "score": "0.577649", "text": "def test_format_put(self):\n co = Format.objects.all()[0]\n self.api_client.client.login(username=username, password=password)\n co_data = self.deserialize(self.api_client.get(\n self.detail_url % co.pk))\n new_data = co_data.copy()\n new_data['name'] = 'jpeg'\n\n self.assertEqual(Format.objects.count(), 2)\n self.assertHttpAccepted(self.api_client.put(\n self.detail_url % co.pk, format='json', data=new_data))\n self.assertEqual(Format.objects.count(), 2)\n\n self.assertEqual(Format.objects.get(pk=co.pk).name, \n 'jpeg')", "title": "" }, { "docid": "c99758377e89e60b8655aff35950610a", "score": "0.57735336", "text": "def test_post_put(cls):\n client = Client()\n new_post_id = uuid.uuid4()\n url = reverse(\"Post\", kwargs={\"author_id\":cls.author_id_2, \"post_id\":new_post_id})\n\n new_title = \"Test Post 2\"\n new_source = \"SomeTestWebsite.com/posts/\"\n new_origin = \"SomeOtherTestWebsite.com/posts/\"\n new_description = \"A changed test post\"\n new_content_type = \"text/markdown\"\n new_content = \"Some different body text.\"\n new_visibility = \"FRIENDS\"\n new_unlisted = True\n json = {\n \"title\":new_title,\n \"source\":new_source,\n \"origin\":new_origin,\n \"description\":new_description,\n \"contentType\":new_content_type,\n \"content\":new_content,\n \"visibility\":new_visibility,\n \"unlisted\":new_unlisted\n }\n\n # Test unauthenticated request\n response = client.put(url, json, content_type=\"application/json\")\n cls.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n # Test incorrectly authenticated request\n client.force_login(Author.objects.get(pk=cls.author_id_1))\n response = client.put(url, json, content_type=\"application/json\")\n cls.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n # Test correctly authenticated request\n client.force_login(Author.objects.get(pk=cls.author_id_2))\n response = client.put(url, json, content_type=\"application/json\")\n cls.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # Try GET on updated object and see if they match\n response = client.get(url)\n cls.assertEqual(response.status_code, status.HTTP_200_OK)\n cls.assertEqual(response.json(), PostToJSON(Post.objects.get(pk=new_post_id)))\n\n # Test a request on an object that doesn't exist (author doesn't exist)\n url = reverse(\"Post\", kwargs={\"author_id\":new_post_id, \"post_id\":new_post_id})\n response = client.put(url, json, content_type=\"application/json\")\n cls.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n\n # Test a request with an invalid ID (bad author ID)\n url = reverse(\"Post\", kwargs={\"author_id\":\"abc\", \"post_id\":new_post_id})\n response = client.put(url, json, content_type=\"application/json\")\n cls.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n # Test a request with an invalid ID (bad post ID)\n url = reverse(\"Post\", kwargs={\"author_id\":cls.author_id_2, \"post_id\":\"abc\"})\n response = client.put(url, json, content_type=\"application/json\")\n cls.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "50857312fa99d74dc89c13c01c158578", "score": "0.57726353", "text": "def validate_json():\n for filename in os.listdir('data/fortune-100-json'):\n if filename.endswith('.json'):\n with open(f'data/fortune-100-json/{filename}') as f:\n company_json = json.load(f)\n\n # Ensure no tweets were truncated\n assert not any([tweet['truncated'] for tweet in company_json])\n # Ensure no IDs were rounded\n assert all([str(tweet['id']) == tweet['id_str'] for tweet in company_json])", "title": "" }, { "docid": "62829b1608bc6f45faf124e5a0ee5163", "score": "0.5771278", "text": "def test_can_reformat_json_object(self):\n data = requests.get('https://jsonplaceholder.typicode.com/todos/1').json()\n self.assertEqual(type(data), dict)\n self.assertEqual(data['userId'], 1)\n self.assertEqual(str(data.keys()), \"dict_keys(['userId', 'id', 'title', 'completed'])\")", "title": "" }, { "docid": "058cfc46b8dd39aa26251e1797d781ad", "score": "0.5761838", "text": "def test_update_book(self):\n\n book3 = {\n\n \"title\": \"book3\",\n \"description\": \"Another awesome read\",\n \"category\": \"fiction\",\n \"price\": 110,\n \"quantity\": 50,\n \"minimum\": 4,\n \"image_url\": \"new_url\",\n \"updated_by\": 0\n }\n access_token = self.get_token(self.test_owner)\n self.client().post(self.url + 'products',\n headers={\"Authorization\": \"Bearer \" + access_token}, json=self.test_book)\n response = self.client().put(self.url + 'products/1',\n headers={\"Authorization\": \"Bearer \" + access_token}, json=book3)\n json_data = json.loads(response.data)\n self.assertTrue(json_data.get('Message'))\n self.assertEqual(json_data.get('Message'),\n \"Success! Book details updated!\")\n self.assertEqual(response.status_code, 201)", "title": "" }, { "docid": "a0670b26f77552221251d675ff6e5f29", "score": "0.5753021", "text": "def test_update_single_todos_succesfully(self):\n todo_1 = Todo(title=\"test\", user_id=self.user_1.id)\n todo_2 = Todo(title=\"test2\", user_id=self.user_1.id)\n todo_1.save()\n todo_2.save()\n\n todo_update = {\"title\": \"new title\"}\n response = self.client.put(f'api/todos/{todo_1.id}', json=todo_update, headers=self.headers_1)\n json_data = response.get_json()\n self.assert200(response)\n self.assertEqual(json_data[\"message\"], \"todo updated\")\n self.assertEqual(json_data[\"todo\"][\"title\"], todo_update[\"title\"])", "title": "" }, { "docid": "c8399878c0ce91aa859e61c3100c6487", "score": "0.57476807", "text": "def test_string_in_json_field(self):\r\n json_obj = 'blah blah'\r\n obj = self.json_model.objects.create(json=json_obj)\r\n new_obj = self.json_model.objects.get(id=obj.id)\r\n\r\n self.assertEqual(new_obj.json, json_obj)", "title": "" }, { "docid": "7096c5ae01a25efe60c6472124b0ed03", "score": "0.57440233", "text": "def test_api_can_update_an_entry(self):\n response = self.client.post(\n '/api/v1/entries',\n json=self.entry\n )\n self.assertEqual(response.status_code, 201)\n response = self.client.put(\n '/api/v1/entries/1',\n json={\n 'title': 'just edit',\n 'journal': 'thats not what really happened'\n }\n )\n self.assertEqual(response.status_code, 201)", "title": "" }, { "docid": "b721840b0c35b689180db2ecc78beb8e", "score": "0.5740454", "text": "def test_load_json_file(self):\n anonymize_json.load_json_file('raw/foo/foo.json')\n self.assertTrue(os.path.exists('raw/foo/foo.json'))\n self.assertTrue(os.path.isfile('raw/foo/foo.json'))", "title": "" }, { "docid": "6eea01d8d06a43ab816818709740205d", "score": "0.57260627", "text": "def test_update_profile(self):\n self.client.login(username=self.username, password=self.password)\n url = '/author/1/'\n user_json = {\n \"type\": \"author\",\n \"id\": host + \"author/1\",\n \"host\": \"https://chatbyte.herokuapp.com/chat/author/2/\",\n \"displayName\": \"asdfasdfasdfadsfasd\",\n \"url\": \"https://chatbyte.herokuapp.com/chat/author/2/profile/\",\n \"github\": \"https://github.com/Jeremy0818\"\n }\n response = self.client.post(url, user_json, format='json', **{'HTTP_X_SERVER': host})\n self.assertEqual(response.status_code, 201)\n self.assertJSONEqual(\n str(response.content, encoding='utf8'),\n user_json\n )", "title": "" }, { "docid": "bd5a4acbd47ba028701be8223b4a569a", "score": "0.57085645", "text": "def test_update_data(self):\n user = User.objects.get(email=self.login_data['email'])\n self.assertIsNone(user.instructor.bio_title)\n self.assertIsNone(user.instructor.bio_description)\n self.assertIsNone(user.instructor.music)\n data = {\"bioTitle\": \"An amateur musician\",\n \"bioDescription\": \"I'm an amateur musician instructor\",\n \"music\": [\"flute\", \"pan flute\"]\n }\n response = self.client.put(self.url, data=json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n user.refresh_from_db()\n self.assertEqual(user.instructor.bio_title, data['bioTitle'])\n self.assertEqual(user.instructor.bio_description, data['bioDescription'])\n self.assertEqual(user.instructor.music, data['music'])", "title": "" }, { "docid": "6216b28ba3164623b0d0644eaad82112", "score": "0.5694046", "text": "def test_source_put(self):\n co = Source.objects.all()[0]\n self.api_client.client.login(username=username, password=password)\n co_data = self.deserialize(self.api_client.get(\n self.detail_url % co.pk))\n new_data = co_data.copy()\n new_data['slug'] = 'eu-jrc'\n\n self.assertEqual(Source.objects.count(), 2)\n self.assertHttpAccepted(self.api_client.put(\n self.detail_url % co.pk, format='json', data=new_data))\n self.assertEqual(Source.objects.count(), 2)\n\n self.assertEqual(Source.objects.get(pk=co.pk).slug, \n 'eu-jrc')", "title": "" }, { "docid": "4d86ec3d930de972f7879c3881a3d8ec", "score": "0.569166", "text": "def test_update_meal(self):\n response = self.client.put('/api/v1/meals/2', data = json.dumps(self.data) , content_type = 'application/json')\n result = json.loads(response.data)\n self.assertEqual(result[\"message\"], \"meal has been modified\")\n self.assertEqual(response.status_code, 200)", "title": "" }, { "docid": "36fd9b199547ce7b5e17199f7b7aafb0", "score": "0.5689623", "text": "def update_json(self, json_file, new_json_file):\n with open(new_json_file) as f:\n new_data = json.load(f)\n if os.path.exists(json_file):\n with open(json_file) as f:\n data = json.load(f)\n for element in new_data.keys():\n if element not in data.keys():\n data[element] = new_data[element]\n else:\n print(\"There are conflicts!! cannot update\")\n else:\n print(\"new file\")\n data = new_data\n self._write_json(json_file, data)", "title": "" }, { "docid": "53c4c96cc66748ad3a0199f4b7f3bb6b", "score": "0.56810564", "text": "def test_put_inplace(self):\n test = Test()\n test.url = self.prefix + '/api/person/1/'\n test.method = u'PUT'\n test.body = '{\"first_name\": \"Gaius\",\"id\": 1,\"last_name\": \"Baltar\",\"login\": \"gbaltar\"}'\n test.headers = {u'Content-Type': u'application/json'}\n test_response = resttest.run_test(test)\n self.assertEqual(True, test_response.passed)\n self.assertEqual(200, test_response.response_code)", "title": "" }, { "docid": "a81d07a17aafb6864bce683d2d8b7177", "score": "0.5672969", "text": "async def test_update_extracted_editor(standard_database: Tuple[CouchDB, dict], http_client: dict) -> None:\n session, objs = standard_database\n response = await http_client['put'](f'/api/jokes/{objs[\"jokes\"][\"two\"][\"_id\"]}',\n body={'type': 'jokes',\n 'id': objs['jokes']['two']['_id'],\n 'attributes': {\n 'actions': [\n {\n 'coordinates': [20, 13, 213, 55]\n },\n ],\n },\n 'relationships': {'source': {'data': {'type': 'sources',\n 'id': objs['sources']['one']['_id']}}}}, # noqa: E501\n token=auth_token(objs['users']['editor']))\n assert response.code == 200\n joke = json.load(response.buffer)['data']\n assert joke\n assert joke['attributes']['title'] == '[Untitled]'\n assert joke['attributes']['status'] == 'extraction-verified'\n assert joke['attributes']['coordinates'] == [20, 13, 213, 55]\n assert len(joke['attributes']['activity']) == 3\n assert joke['attributes']['activity'][2]['action'] == 'extraction-verified'\n assert joke['attributes']['activity'][2]['user'] == objs['users']['editor']['_id']\n assert 'auto' not in joke['attributes']['transcriptions']\n jokes_db = await session['jokes']\n db_joke = await jokes_db[joke['id']]\n image = Attachment(db_joke, 'image')\n with Image.open(BytesIO(await image.fetch())) as img:\n assert img\n assert img.size == (193, 42)", "title": "" }, { "docid": "70bb5d056070282609e46a5e8ab44804", "score": "0.56674474", "text": "def is_json_unique(db, json_object):\n q = Query()\n if len(db.search(q.url == json_object['url'])) == 0:\n return True\n return False", "title": "" }, { "docid": "6d42021515eb086f074ee3eecb3564e6", "score": "0.5666417", "text": "def update_dict(deps_lines, deps_ast, dict_node, git_revision):\n for key, value in zip(dict_node.keys, dict_node.values):\n if key.__class__ is ast.Str and key.s == 'url':\n return update_node(deps_lines, deps_ast, value, git_revision)", "title": "" }, { "docid": "a46779d32dbf4f2727180b48f550802e", "score": "0.5661213", "text": "def test_nested_replacement():\n json = dict(\n object=[dict(type='material_run', uids={'my_id': '1'}),\n dict(type='material_run', uids={'my_id': '2'})]\n )\n replaced_json = replace_objects_with_links(json)\n assert replaced_json == {'object': [{'type': 'link_by_uid', 'scope': 'my_id', 'id': '1'},\n {'type': 'link_by_uid', 'scope': 'my_id', 'id': '2'}]}", "title": "" }, { "docid": "d9c3ff6646fee1c3e315b96b9030a837", "score": "0.5624395", "text": "def test_update_404(client, url):\n data = {\"data\": 'not relevant for this test.'}\n res = client.put(url, data=data)\n assert res.status_code == 404\n assert json.loads(res.data) == {'status': 'Resource not found'}", "title": "" }, { "docid": "8c5e018ac9708db6358b48fbf3c21374", "score": "0.56228095", "text": "def test_valid_dict() -> None:\n tester.test_dict(schema=schema, data=data, reference='placeholder')", "title": "" }, { "docid": "97593d9ca10bcd1e250a428e1851be86", "score": "0.5619147", "text": "def handle_json(f, user):\n\n try:\n data = json.loads(f['file'].read().decode('utf-8'))\n except:\n return -1\n\n current = Creature.objects.filter(owner=user)\n current_name_url = [(x.name, x.img_url) for x in current]\n current_full = [(x.name, x.img_url, x.size, x.CR, x.creature_type) for x in current]\n size_map = {v: k for k, v in dict(Creature.CREATURE_SIZE_CHOICES).items()}\n creature_type_map = {v: k for k, v in dict(Creature.CREATURE_TYPE_CHOICES).items()}\n skip = 0\n obj_list = []\n for k,i in data.items():\n # mandatory fields\n try:\n name = i['name']\n img_url = i['img_url']\n name_url = (name,img_url)\n except:\n skip += 1\n continue\n\n # fix illegal size (default to medium)\n try:\n short_size = size_map[i['creature_size']]\n except:\n short_size = Creature.MEDIUM\n\n # fix illegal types (default to undefined)\n try:\n short_type = creature_type_map[i['creature_type']]\n except:\n short_type = Creature.UNDEFINED\n\n # fix illegal CRs (default to 0)\n try:\n cr = float(Fraction(i['CR']))\n if cr < 0 or cr > 1000: cr = 0\n except:\n cr = 0\n\n # check if unique\n if name_url in current_name_url:\n full_tup = (name, img_url, short_size, cr, short_type)\n if full_tup in current_full:\n # excact duplicate\n skip += 1\n continue\n else:\n # updated attributes\n # this is kinda slow :(\n Creature.objects.filter(owner=user, name=name, img_url=img_url).update(size=short_size, CR=cr, creature_type=short_type)\n current_full.append(full_tup)\n continue\n\n current_name_url.append(name_url)\n\n # if everything is ok, generate the object and store it\n obj = Creature(owner=user, name=i['name'], size=short_size, img_url=i['img_url'], CR=cr, creature_type=short_type)\n obj_list.append(obj)\n\n if len(obj_list) > 0:\n # MUCH faster than one query per entry!\n Creature.objects.bulk_create(obj_list)\n return skip", "title": "" }, { "docid": "5cb37fa8d962f49b3b0e615658dee244", "score": "0.56105316", "text": "def update_cleans(res):\r\n\r\n for isbn, info in res:\r\n path = os.path.join('jsons', 'clean', f'{isbn}.json')\r\n\r\n if os.path.exists(path) is False:\r\n print('No association for', isbn)\r\n continue\r\n\r\n print(path)\r\n with open(path, encoding='utf') as stream:\r\n content = json.load(stream)\r\n\r\n content['url'] = info['url']\r\n content['image'] = info['image']\r\n content['files'] = info['files']\r\n content['root'] = info['root']\r\n write_json(path, content)", "title": "" }, { "docid": "fb8b1eb66c3d0b8b88a1b5834d730922", "score": "0.5609468", "text": "def test_update_book(client, sample_book):\n data = {\n 'new_title': 'An updated book name', 'new_author': 'V. E. Schwab', 'new_isbn': '0765376466', 'new_date_of_publication': '2016-01-19 00:00:00'\n }\n res = client.put(f\"/books/{sample_book['data']['id']}\", data=data)\n loaded_data = json.loads(res.data)\n sample_book['data']['title'] = 'An updated book name'\n clone = deepcopy(sample_book)\n del clone['data']['id']\n verify_model(loaded_data['data'], clone['data'], 'book')\n assert res.status_code == 200\n assert loaded_data['status'] == 'success'", "title": "" }, { "docid": "5283c1951bd7aaf7ef0bac31e119c689", "score": "0.5608563", "text": "async def test_update_with_json_attrs_not_dict(\n hass: HomeAssistant,\n mqtt_mock_entry: MqttMockHAClientGenerator,\n caplog: pytest.LogCaptureFixture,\n) -> None:\n await help_test_update_with_json_attrs_not_dict(\n hass,\n mqtt_mock_entry,\n caplog,\n fan.DOMAIN,\n DEFAULT_CONFIG,\n )", "title": "" }, { "docid": "4ccd30c22b141b282e798d4084ffb690", "score": "0.56053996", "text": "def test_update_not_owner(self) -> None:\n\n self.assertEqual(first=self.post.title, second='first title for posts')\n url = reverse('post:post-detail', args=(self.post.pk,))\n data = {\n 'title': 'new title'\n }\n json_data = json.dumps(data)\n self.client.credentials(HTTP_AUTHORIZATION=self.token_1)\n response = self.client.patch(path=url, data=json_data,\n content_type='application/json')\n self.assertEqual(first=status.HTTP_403_FORBIDDEN, second=response.status_code)\n self.post.refresh_from_db()\n self.assertEqual(first=self.post.title, second='first title for posts')", "title": "" }, { "docid": "50f8e58da7ea7bc44d8c629a6cb02219", "score": "0.56043154", "text": "async def test_update_with_json_attrs_not_dict(\n hass: HomeAssistant,\n mqtt_mock_entry: MqttMockHAClientGenerator,\n caplog: pytest.LogCaptureFixture,\n) -> None:\n await help_test_update_with_json_attrs_not_dict(\n hass,\n mqtt_mock_entry,\n caplog,\n light.DOMAIN,\n DEFAULT_CONFIG,\n )", "title": "" }, { "docid": "1b56d5c8561388468ef60ce6eeda3115", "score": "0.56003255", "text": "def test_json_load_ok(self, copy_base_file, tested_class_obj):\n data_test_func = tested_class_obj.json_load()\n assert type(data_test_func) == type(dict())\n with open(os.path.abspath(\"shopDataBase.json\")) as file:\n data_to_check = simplejson.load(file)\n assert data_test_func == data_to_check", "title": "" }, { "docid": "14273231bb278b328ddf96cf57857f57", "score": "0.55908644", "text": "def test_put_information_assert_other_information_not_updated(self):\n self.client.put(self.url, self.data)\n information2 = Information.objects.get(id=2)\n\n self.assertNotEqual(information2.text, self.data[\"text\"])", "title": "" }, { "docid": "e19e8fd72b27153682ed648f3d69a693", "score": "0.5589484", "text": "def test_save(self):\n storage = FileStorage()\n new_dict = {}\n instance = BaseModel()\n instance_key = instance.__class__.__name__ + \".\" + instance.id\n new_dict[instance_key] = instance\n save = FileStorage._FileStorage__objects\n FileStorage._FileStorage__objects = new_dict\n storage.save()\n FileStorage._FileStorage__objects = save\n for key, value in new_dict.items():\n new_dict[key] = value.to_dict()\n string = json.dumps(new_dict)\n with open(\"file.json\", \"r\") as f:\n js = f.read()\n self.assertEqual(json.loads(string), json.loads(js))", "title": "" }, { "docid": "f30a4398f1e4cda82154e00577caa86f", "score": "0.55803996", "text": "async def test_update_extracted_same_user(standard_database: Tuple[CouchDB, dict], http_client: dict) -> None:\n session, objs = standard_database\n response = await http_client['put'](f'/api/jokes/{objs[\"jokes\"][\"two\"][\"_id\"]}',\n body={'type': 'jokes',\n 'id': objs['jokes']['two']['_id'],\n 'attributes': {\n 'actions': [\n {\n 'coordinates': [20, 13, 213, 55]\n },\n ],\n },\n 'relationships': {'source': {'data': {'type': 'sources',\n 'id': objs['sources']['one']['_id']}}}}, # noqa: E501\n token=auth_token(objs['users']['one']))\n assert response.code == 200\n joke = json.load(response.buffer)['data']\n assert joke\n assert joke['attributes']['title'] == '[Untitled]'\n assert joke['attributes']['status'] == 'extracted'\n assert joke['attributes']['coordinates'] == [20, 13, 213, 55]\n assert len(joke['attributes']['activity']) == 2\n assert joke['attributes']['activity'][1]['action'] == 'extracted'\n assert joke['attributes']['activity'][1]['user'] == objs['users']['one']['_id']\n assert 'auto' not in joke['attributes']['transcriptions']\n jokes_db = await session['jokes']\n db_joke = await jokes_db[joke['id']]\n image = Attachment(db_joke, 'image')\n with Image.open(BytesIO(await image.fetch())) as img:\n assert img\n assert img.size == (193, 42)", "title": "" }, { "docid": "d8fc8b45ba4d4ac49f19da7b0d76e4bd", "score": "0.5579403", "text": "def test_update():\n data = {\n \"first_name\": \"vickey\"\n }\n response = requests.post(\"http://127.0.0.1:5000/edit/1\", json=json.dumps(data))\n \n assert 200== response.status_code", "title": "" }, { "docid": "1674c71b3e22244196d1d12cecd0babb", "score": "0.5569923", "text": "def test_file_json(self):\n my_model = BaseModel()\n my_model.save()\n here = os.path.exists('file.json')\n self.assertEqual(here, True)", "title": "" }, { "docid": "33e35d0a1e117515be941760093673ce", "score": "0.55650264", "text": "def test_app_json_update(self):\n url = '/app.json'\n self.sign_in(self.peter)\n response = self.admin_client.put(url, \"\"\"\\\n{\n\"title\": \"My Application\",\n\"tags\": [\"test\", \"myapp\"]\n}\n\"\"\", content_type='text/plain')\n self.assertContains(response, '\"statusText\": \"Saved\"')\n # Retrieve updated meta info.\n response = self.admin_client.get(url)\n decoded = json.loads(response.content)\n self.assertEqual(decoded['title'], \"My Application\")\n self.assertTrue('myapp' in decoded['tags'])\n self.assertTrue('test' in decoded['tags'])", "title": "" }, { "docid": "f31c40b1ef038bef7c81bce9bbf899f7", "score": "0.5559278", "text": "def test_book_missing_key(self):\n path = JSON_PATH + \"author_miss_related_authors.json\"\n try:\n insert_into_db(path, \"author\")\n self.assertTrue(False) # An error should be thrown\n except:\n self.assertTrue(True)", "title": "" }, { "docid": "a5dd85d106ad1e948cd27413e194c595", "score": "0.55547696", "text": "def test_put_request_remapped(self):\n client = self._create_client('basic.json')\n res = client.put('/user/1')\n expected = self._read_json('basic/user/userid_put.json')\n self.assertEqual(to_unicode(res.data), expected)", "title": "" }, { "docid": "88c00b37080abafbea068190147ec609", "score": "0.5551972", "text": "def test_json_to_dict(self):\n original_dict = {\"key\": \"my_value\"}\n json_content = \"\"\"\n {\n \"key\" : \"my_value\"\n }\n \"\"\"\n\n with open(\"j_file.json\", \"w\") as j_file:\n j_file.write(json_content)\n\n myparams = rtpy.json_to_dict(\"j_file.json\")\n os.remove(\"j_file.json\")\n\n if myparams != original_dict:\n message = \"Created dictionary doesn't match original dictionary !\"\n raise self.RtpyTestError(message)", "title": "" }, { "docid": "d25ad91049405136467a1a7edfbb1a98", "score": "0.55396926", "text": "async def test_update_with_json_attrs_bad_JSON(hass, mqtt_mock, caplog):\n assert await async_setup_component(hass, vacuum.DOMAIN, {\n vacuum.DOMAIN: {\n 'platform': 'mqtt',\n 'name': 'test',\n 'state_topic': 'test-topic',\n 'json_attributes_topic': 'attr-topic'\n }\n })\n\n async_fire_mqtt_message(hass, 'attr-topic', 'This is not JSON')\n await hass.async_block_till_done()\n\n state = hass.states.get('vacuum.test')\n assert state.attributes.get('val') is None\n assert 'Erroneous JSON: This is not JSON' in caplog.text", "title": "" }, { "docid": "dc992f8bd434260e14c99f80a45bcb37", "score": "0.5530304", "text": "def test_put_created(self):\n test = Test()\n test.url = self.prefix + '/api/person/100/'\n test.method = u'PUT'\n test.expected_status = [200, 201, 204]\n test.body = '{\"first_name\": \"Willim\",\"last_name\": \"Adama\",\"login\":\"theadmiral\", \"id\": 100}'\n test.headers = {u'Content-Type': u'application/json'}\n test_response = resttest.run_test(test)\n self.assertEqual(True, test_response.passed)\n self.assertEqual(201, test_response.response_code)\n\n # Test it was actually created\n test2 = Test()\n test2.url = test.url\n test_response2 = resttest.run_test(test2)\n self.assertTrue(test_response2.passed)\n self.assertTrue(\n u'\"last_name\": \"Adama\"' in test_response2.unicode_body())\n self.assertTrue(\n u'\"login\": \"theadmiral\"' in test_response2.unicode_body())", "title": "" }, { "docid": "9fc488571946624bf256cba0285591c5", "score": "0.55254894", "text": "def test_03_update(self):\n\n resp = yield self.client.insert(self.test_fixture)\n obj = yield self.client.find_one_by_id(resp)\n obj[test_attr] = test_val\n resp = yield self.client.update(resp, obj)\n ok_(resp.get(\"updatedExisting\"), \"Update did not succeed.\")", "title": "" }, { "docid": "6108b8dba78f948f7a0e080059dead73", "score": "0.5514404", "text": "def test_put_post(self):\n self.client.login(username=self.username, password=self.password)\n url = self.user.profile.id + '/posts/789789'\n post_json = {\n \"type\": \"post\",\n \"id\": \"789789\",\n \"title\": \"fffffffffff\",\n \"source\": \"https://chatbyte.herokuapp.com/\",\n \"origin\": \"https://chatbyte.herokuapp.com/\",\n \"description\": \"asdf\",\n \"contentType\": \"text\",\n \"content\": \"asdf\",\n \"author\": {\n \"type\": \"author\",\n \"id\": \"1\",\n \"host\": \"https://chatbyte.herokuapp.com/\",\n \"displayName\": \"test\",\n \"url\": \"https://chatbyte.herokuapp.com/\",\n \"github\": \"https://chatbyte.herokuapp.com/\"\n },\n \"categories\": \"text/plain\",\n \"count\": 1,\n \"size\": 1,\n \"comment_url\": \"1\",\n \"comments\": [],\n \"published\": \"2021-03-26T19:04:53Z\",\n \"visibility\": \"public\",\n \"unlisted\": \"False\"\n }\n response = self.client.put(url, post_json, format='json')\n self.assertEqual(response.status_code, 201)", "title": "" }, { "docid": "6201375b31c0093992057916f440a9b3", "score": "0.5505808", "text": "def test_json_list(self):\r\n json_obj = [\"my\", \"list\", \"of\", 1, \"objs\", {\"hello\": \"there\"}]\r\n\r\n obj = self.json_model.objects.create(json=json_obj)\r\n new_obj = self.json_model.objects.get(id=obj.id)\r\n self.assertEqual(new_obj.json, json_obj)", "title": "" }, { "docid": "2a8cbe32ff73e4e25b2dcd34be4ec612", "score": "0.55031574", "text": "def test_save_method(self):\n prev_all_objs = storage.all()\n\n obj = BaseModel()\n key = \"{}.{}\".format(type(obj).__name__, obj.id)\n\n self.assertIn(key, prev_all_objs.keys())\n\n with open('file.json', mode='r', encoding='utf-8') as file:\n dict_loaded = json.load(file)\n\n self.assertIs(type(dict_loaded), dict)\n self.assertNotIn(key, dict_loaded.keys())\n\n storage.save()\n\n with open('file.json', mode='r', encoding='utf-8') as file:\n dict_loaded = json.load(file)\n\n self.assertIs(type(dict_loaded), dict)\n self.assertIn(key, dict_loaded.keys())", "title": "" }, { "docid": "b25542d18e247e5899700c56bd5ca8e4", "score": "0.55023897", "text": "def test_patching_things(self):\n response = self.app.patch(\n \"/api/1.0/news/{}\".format(int(self.news_ids[0])),\n data=json.dumps([\n {\"op\": \"add\", \"path\": \"/title\", \"value\": \"Multi title\"},\n {\"op\": \"copy\", \"from\": \"/contents\", \"path\": \"/author\"},\n {\"op\": \"remove\", \"path\": \"/updated\"},\n ]),\n content_type=\"application/json\",\n headers={\n 'User': self.user_id,\n 'Authorization': self.access_token\n }\n )\n data = json.loads(response.data.decode())\n self.assertEqual(200, response.status_code)\n self.assertEqual(\"Multi title\", data[\"title\"])\n self.assertEqual(\"UnitTest Contents\", data[\"author\"])\n self.assertEqual(None, data[\"updated\"])", "title": "" }, { "docid": "5f6064f17cce7d2e4b273e27a723469d", "score": "0.5502168", "text": "async def help_test_update_with_json_attrs_bad_JSON(\n hass, mqtt_mock, caplog, domain, config\n):\n assert await async_setup_component(hass, domain, config,)\n\n async_fire_mqtt_message(hass, \"attr-topic\", \"This is not JSON\")\n\n state = hass.states.get(f\"{domain}.test\")\n assert state.attributes.get(\"val\") is None\n assert \"Erroneous JSON: This is not JSON\" in caplog.text", "title": "" }, { "docid": "d138858ff71247912dd8188d3d11520f", "score": "0.5498867", "text": "def test_photo_info_updated(self):\n self.assertEquals(self.photo1.title, 'photo1')\n self.assertEquals(self.photo1.description, 'my photo')\n self.client.post(self.url, self.data)\n photo = Photo.objects.filter(user=self.user).first()\n self.assertEqual(photo.title, 'photo1 updated')\n self.assertEqual(photo.description, 'my photo updated')", "title": "" }, { "docid": "24b2e1d7f614c30976a957ffceb6ad88", "score": "0.54971737", "text": "def test_put_request(self):\n client = self._create_client('basic.json')\n res = client.put('/user')\n expected = self._read_json('basic/user_put.json')\n self.assertEqual(to_unicode(res.data), expected)", "title": "" }, { "docid": "698eeb685dbfeac81a316899e85174a5", "score": "0.54742324", "text": "def test_json_field_create(self):\r\n json_obj = {\r\n \"item_1\": \"this is a json blah\",\r\n \"blergh\": \"hey, hey, hey\"}\r\n\r\n obj = self.json_model.objects.create(json=json_obj)\r\n new_obj = self.json_model.objects.get(id=obj.id)\r\n\r\n self.assertEqual(new_obj.json, json_obj)", "title": "" }, { "docid": "add9bf57921654af8f386cef46534059", "score": "0.54733855", "text": "def test_author_post(cls):\n client = Client()\n url = reverse(\"Author\", kwargs={\"author_id\":cls.author_id_2})\n\n # Modify the author's information via POST\n new_username = \"TestAuthor3\"\n new_github = \"github.com/testauthor3\"\n json = {\n \"displayName\":new_username,\n \"github\":new_github\n }\n \n # Test unauthenticated request\n response = client.post(url, json, content_type=\"application/json\")\n cls.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n # Force an incorrect test login and test incorrectly authenticated request\n client.force_login(Author.objects.get(pk=cls.author_id_1))\n response = client.post(url, json, content_type=\"application/json\")\n cls.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n # Force a test login as the second author and test authenticated request\n client.force_login(Author.objects.get(pk=cls.author_id_2))\n response = client.post(url, json, content_type=\"application/json\")\n cls.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # Try GET on updated object and see if they match\n response = client.get(url)\n cls.assertEqual(response.status_code, status.HTTP_200_OK)\n cls.assertEqual(response.json(), AuthorToJSON(Author.objects.get(pk=cls.author_id_2)))\n\n # Test a request on an object that doesn't exist\n url = reverse(\"Author\", kwargs={\"author_id\":uuid.uuid4()})\n response = client.post(url, json, content_type=\"application/json\")\n cls.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n\n # Test a request with an invalid ID\n url = reverse(\"Author\", kwargs={\"author_id\":\"abc\"})\n response = client.post(url, json, content_type=\"application/json\")\n cls.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "3331870519ca752f2a59b99d0d6f65b0", "score": "0.54614437", "text": "def test_that_books_are_updated_correctly(client):\n user = User.query.first()\n access_token = create_access_token(identity=user)\n\n client.set_cookie('localhost', 'access_token', access_token)\n # create new book\n data = book_data()\n response = client.post(base_route, json=data)\n\n # update created book\n response_data = convert_to_dict(response.data)\n data['title'] = 'The Art of Warring'\n response = client.put(\n '{}/{}'.format(base_route, response_data.get('id')), json=data)\n\n assert_that(response.status_code).is_equal_to(200)\n assert_that(response.headers['Content-Type']).contains('application/json')\n\n response_data = convert_to_dict(response.data)\n keys = ('id', 'isbn', 'title', 'num_of_pages',\n 'publisher', 'publication_date',)\n\n assert_that(response_data).contains_key(*keys)", "title": "" }, { "docid": "c86dea4ab8a3e3cf899ecc432f6be98a", "score": "0.54607284", "text": "def test_update_from_dict_param(json_):\n map1 = gnome.map.ParamMap((5, 5), 12000, 40)\n serial = map1.serialize(json_)\n map2 = gnome.map.ParamMap((6, 6), 20000, 40)\n dict_ = gnome.map.ParamMap.deserialize(serial)\n map2.update_from_dict(dict_)\n\n assert map1 == map2", "title": "" }, { "docid": "60826b25de8200a412b3648abe28a061", "score": "0.5455588", "text": "def upload_file(request):\n data = {}\n f = request.FILES['upload_file']\n # Check if file type is correct and get the content\n if f.content_type == 'application/json':\n try:\n content = json.load(f)\n except json.JSONDecodeError:\n message = (\n 'Upload failed. '\n 'File content was either wrong formatted or empty. '\n 'Must be a JSON array of objects with harvester data.'\n )\n messages.warning(request, message)\n return HttpResponseRedirect(reverse('hcc_gui'))\n else:\n message = (\n 'Upload failed. '\n 'File type could not been handled. '\n 'Must be a JSON file!'\n )\n messages.warning(request, message)\n return HttpResponseRedirect(reverse('hcc_gui'))\n\n required_keys = ('name', 'notes', 'url', 'enabled')\n for harvester_data in content:\n # 'content' should be a list of dictionaries\n if not isinstance(harvester_data, collections.Mapping):\n message = (\n 'Validation failed. '\n 'File content could not been handled.'\n 'Should be a list of dictionaries!'\n )\n messages.warning(request, message)\n return HttpResponseRedirect(reverse('hcc_gui'))\n\n # The json file should contain the required harvester data\n if not all(key in harvester_data for key in required_keys):\n message = (\n 'Validation failed. '\n 'Key missmatch! Required: name, notes, url, enabled'\n )\n messages.warning(request, message)\n return HttpResponseRedirect(reverse('hcc_gui'))\n\n data = harvester_data.copy()\n if Harvester.objects.filter(name=harvester_data['name']).exists():\n # Harvester already exists -> update harvester\n harvester = Harvester.objects.get(name=harvester_data['name'])\n data['notes'] = harvester.notes # Notes should not be updated\n if ((harvester.url == harvester_data['url']\n and harvester.enabled == harvester_data['enabled'])):\n continue\n elif not harvester.url == harvester_data['url']:\n if Harvester.objects.filter(\n url=harvester_data['url']).exists():\n # The url should be unique. Leave the existing harvester data\n # and ignore the new one.\n continue\n # Create new Harvester with new url\n harvester = Harvester(owner=request.user)\n counter = 1\n while True:\n # Loop until the harvester name is not already used\n postfix = '_{}'.format(counter)\n temp_name = harvester_data['name'] + postfix\n if not Harvester.objects.filter(name=temp_name).exists():\n data['name'] = temp_name\n break\n counter += 1\n elif Harvester.objects.filter(url=harvester_data['url']).exists():\n # The url should be unique. Leave the existing harvester data\n # and ignore the new one\n continue\n else:\n # Create a new harvester\n harvester = Harvester(owner=request.user)\n\n form = ValidateFileForm(data, instance=harvester)\n if form.is_valid():\n form.save()\n else:\n message = (\n 'Validation failed. '\n 'Content data could not been saved.'\n )\n messages.warning(request, message)\n return HttpResponseRedirect(reverse('hcc_gui'))\n\n messages.success(request, 'Upload successful!')\n return HttpResponseRedirect(reverse('hcc_gui'))", "title": "" }, { "docid": "f6886adfd2d0a181161b7f926a60acaa", "score": "0.5449913", "text": "def json_file_handler(json_dictionary, choice = 2):\n\n os.makedirs(\"./Json-schema/sample\", exist_ok=True) \n\n if choice == 1:\n # uploading document into mongoDB\n try:\n mongoDB.flight.insert_one(json_dictionary)\n return True\n except Exception as e:\n print(\"An exception occurred ::\", e)\n return False\n elif choice == 2:\n # writing document appending time for unique name\n with open('./Json-schema/sample/sample' + str(time.time()) + '.json', 'w') as fp:\n json.dump(json_dictionary, fp)\n \n return True\n else:\n # writing temporary file for validation\n with open('./Json-schema/sample/temp.json', 'w') as fp:\n json.dump(json_dictionary, fp)\n f = open('./Json-schema/sample/temp.json',) \n data = json.load(f) \n\n # try to validate json file\n res = json_validator(json_schema, data)\n\n # Removing the temporary document\n os.remove(\"./Json-schema/sample/temp.json\")\n \n return res", "title": "" }, { "docid": "e3296114903ef0b00aed36ee262d51e1", "score": "0.54477036", "text": "def _update(self, data: dict[str, Any]) -> None:", "title": "" }, { "docid": "b48f8fb54bc541c15690862d99e41a0f", "score": "0.5446561", "text": "def test_put_information_assert_information_updated(self):\n self.client.put(self.url, self.data)\n information_list = Information.objects.all()\n updated_information = Information.objects.filter(id=1)\n\n self.assertEqual(len(information_list), 2)\n self.assertEqual(len(updated_information), 1)\n self.assertEqual(updated_information[0].text, self.data[\"text\"])", "title": "" }, { "docid": "a8214138cd51f7d5f4870b79bfcc3b59", "score": "0.5443571", "text": "def test_change_user_image_with_valid_one(self):\n\n # Setup test\n data = {\n 'f_name': 'ibrahem',\n 's_name': 'amer',\n 'bio': 'bla bla bla',\n 'b_date': '1995-17-12',\n 'contact': {\n 'email': '[email protected]',\n 'phone': '01092053058',\n 'fb': 'ibrahem3amer',\n }\n }\n new_person = Person.objects.create()\n new_person.first_name = data['f_name']\n new_person.sur_name = data['s_name']\n new_person.bio = data['bio']\n new_person.birth_date = data['b_date']\n new_person.contacts = data['contact']\n new_person.save()\n db_result = Person.objects.first()\n\n # Exercise test\n db_result.photo = 'test.jpg'\n db_result.save()\n\n # Assert test\n self.assertIn('test.jpg', db_result.photo.path)", "title": "" }, { "docid": "8abb07ba9ed00dd4b10a26eae583b46a", "score": "0.54350185", "text": "def test_get_put(self):\n # Pull the object we'll deface\n obj = self.private_client.documents.get(\"15144-mitchrpt\")\n # Create random strings we will save to the editable attributes\n title = 'The Mitchell Report (%s)' % get_random_string()\n source = 'DLA Piper (%s)' % get_random_string()\n description = get_random_string()\n if obj.resources.related_article == 'http://documents.latimes.com':\n related_article = 'http://documentcloud.org'\n else:\n related_article = 'http://documents.latimes.com'\n if obj.resources.published_url == 'http://documents.latimes.com':\n published_url = 'http://documentcloud.org'\n else:\n published_url = 'http://documents.latimes.com'\n # Set the random strings our local object's attributes\n obj.title = title\n obj.source = source\n obj.description = description\n obj.resources.related_article = related_article\n obj.resources.published_url = published_url\n # Save the changes up to DocumentCloud\n obj.put()\n # Pull the object again and verify the changes stuck\n obj = self.private_client.documents.get(\"15144-mitchrpt\")\n self.assertEqual(obj.title, title)\n self.assertEqual(obj.source, source)\n self.assertEqual(obj.description, description)\n self.assertEqual(obj.resources.related_article, related_article)\n self.assertEqual(obj.resources.published_url, published_url)", "title": "" }, { "docid": "622fbaf2bf02f14209b5c82dbd547ab9", "score": "0.5425624", "text": "def test_modify_book_author_method(self):\n book_id = 3\n author = \"Chinua Achebe\"\n response = self.book.modify_book_author(book_id, author)\n self.assertEqual(response, [{\"Title\": \"The Storm\", \"Author\": \"Chinua Achebe\", \"Copies\": 3}])", "title": "" }, { "docid": "38d23beedcac969f329ea38faa3854bc", "score": "0.5424808", "text": "def test_update_user_proper_auth(client, sample_user):\n data = {\n 'new_first_name': 'Steve', 'new_last_name': 'Williams', 'new_email': '[email protected]', 'new_password': 'p4$$VV0RD','password': 'p4$$VV0RD'\n }\n res = client.put(f\"/users/{sample_user['data']['id']}\", data=data)\n loaded_data = json.loads(res.data)\n sample_user['data']['email'] = '[email protected]'\n clone = deepcopy(sample_user)\n del clone['data']['id']\n verify_model(loaded_data['data'], clone['data'], 'user')\n assert res.status_code == 200\n assert loaded_data['status'] == 'success'", "title": "" }, { "docid": "ffeeaacff675586efcbdc79ca32d9e9d", "score": "0.54231113", "text": "def test_json(self):\n tmp_file = tempfile.NamedTemporaryFile()\n self.result.to_json_file(tmp_file.name)\n prior_info_from_json = StructureOptResult.load_json(tmp_file.name)\n self.assertEqual(prior_info_from_json.as_dict(), self.result.as_dict())", "title": "" }, { "docid": "803bcad6255bcb6f8cc496bd5bd29708", "score": "0.5417519", "text": "def test_dict_with_fd():\n fd = open(__file__, 'r')\n fd.close()\n obj = {'fd': fd}\n jsonstr = jsonpickle.encode(obj)\n newobj = jsonpickle.decode(jsonstr)\n assert newobj['fd'] is None", "title": "" }, { "docid": "b55f21990ff1b61da6d405f52ae66117", "score": "0.54134893", "text": "def test_invalid_json(self):\n tap_lines = test_utils.get_test_tap_lines('invalid-json.json')\n with self.assertRaises(json.decoder.JSONDecodeError):\n self.persist_lines_with_cache(tap_lines)", "title": "" }, { "docid": "239beb130e31ba3517293b26b929dcdc", "score": "0.5396998", "text": "def test_certificate_slug_academy_id_syllabus_version_put(self):\n model = self.generate_models(authenticate=True, profile_academy=True,\n capability='crud_syllabus', role='potato', syllabus=True,\n certificate=True)\n url = reverse_lazy('admissions:certificate_slug_academy_id_syllabus_version',\n kwargs={'certificate_slug': model['certificate'].slug, 'academy_id': 1,\n 'version': model['syllabus'].version})\n data = {\n 'json': {\n 'ova': 'thus spoke kishibe rohan'\n }\n }\n response = self.client.put(url, data, format='json')\n json = response.json()\n\n self.assertDatetime(json['created_at'])\n self.assertDatetime(json['updated_at'])\n del json['updated_at']\n\n expected = {\n 'academy_owner': model['syllabus'].academy_owner_id,\n 'certificate': model['syllabus'].certificate_id,\n 'created_at': self.datetime_to_iso(model['syllabus'].created_at),\n 'github_url': model['syllabus'].github_url,\n 'id': model['syllabus'].id,\n 'json': data['json'],\n 'private': model['syllabus'].private,\n 'version': model['syllabus'].version\n }\n\n self.assertEqual(json, expected)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(self.all_syllabus_dict(), [{\n 'academy_owner_id': model['syllabus'].academy_owner_id,\n 'certificate_id': model['syllabus'].certificate_id,\n 'github_url': model['syllabus'].github_url,\n 'id': model['syllabus'].id,\n 'json': data['json'],\n 'private': model['syllabus'].private,\n 'version': model['syllabus'].version\n }])", "title": "" }, { "docid": "1462a2358a5aae8ea7bac8c547bf47c5", "score": "0.5396549", "text": "def test_update(self):\n feed_name = 'modified'\n meta_temp_dir = tempfile.mkdtemp(dir=_TEMP_DATA_DIR)\n\n meta = JSONFeedMetadata(feed_name=feed_name,\n data_dir=meta_temp_dir)\n\n future = asyncio.ensure_future(meta.fetch())\n metadata: JSONFeedMetadata = _EVENT_LOOP.run_until_complete(future)\n data: str = metadata.data\n\n self.assertIsInstance(data, str)\n self.assertTrue(data)\n\n # should not be ready\n self.assertFalse(meta.is_ready())\n\n _EVENT_LOOP.run_until_complete(meta.update())\n\n # should be ready now\n self.assertTrue(meta.is_ready())\n\n # metadata present in the directory\n self.assertTrue(meta.filename in os.listdir(meta_temp_dir))", "title": "" }, { "docid": "f3853726c5e0a5fd0e90bc088b4fa8ea", "score": "0.53955674", "text": "def test_dict_update(self):\n manager = ExtentionSettingsManager(KEY_PREFIX)\n manager.update(TEST_DATA)\n self.assertEqual(manager.data, TEST_DATA)", "title": "" }, { "docid": "075334b950805eade89cdd5a8bdda459", "score": "0.53892374", "text": "def test_json_init(self):\n ar = LcArchive('/my/file')\n ar._entities['21d34f33-b0af-3d82-9bef-3cf03e0db9dc'] = None # need to avoid KeyError with nsuuid\n ar.load_from_dict(from_json(test_file), jsonfile='/my/test/json')\n self.assertIn(test_json['dataReference'], ar.catalog_names)\n self.assertEqual(ar.ref, 'local.my.file')\n self.assertSequenceEqual([k for k in ar.get_sources(test_json['dataReference'])], [None])\n uuid = 'cec3a58d-44c3-31f6-9c75-90e6352f0934'\n nsuuid = ar._ref_to_nsuuid('ha')\n self.assertNotEqual(uuid, nsuuid)\n ent = ar['ha']\n ent1 = ar[nsuuid]\n self.assertIs(ent, ent1)\n self.assertEqual(ar[nsuuid].uuid, uuid)", "title": "" }, { "docid": "6912bf45030462daadfaeae5bb70a630", "score": "0.5388745", "text": "def testJson(self):\n self.jsonTest.writeJson('testfiles/testJson.json', 'w', self.listTest)\n with open('testfiles/testJson.json', 'r') as out:\n textToTest = StringIO(out.read()).getvalue()\n with open('testfiles/correctJson.json', 'r') as out:\n textCorrect = StringIO(out.read()).getvalue()\n self.assertEqual(textToTest, textCorrect)", "title": "" }, { "docid": "62d4d8fcfccd67cca71d1b12c2ae717d", "score": "0.53826165", "text": "def test_indesign_update_photos(scandal, scandal_photo, staff_client):\n data = {'bilete': [{'bildefil': 'scandal.22.jpg', 'bildetekst': 'one'}, ]}\n url = f'{api_url}{scandal.pk}/'\n response = staff_client.patch(url, format='json', data=data)\n assert response.status_code == status.HTTP_200_OK\n\n captions = scandal.images.values_list('caption', flat=True)\n assert sorted(captions) == ['one']\n\n updated_data = staff_client.get(url).data\n assert updated_data['bilete'][0]['bildefil'] == 'scandal.00022.jpg'", "title": "" }, { "docid": "027b642e3b7a288e6cab1eb48c49000f", "score": "0.53770804", "text": "def test_update_un_authorize(self) -> None:\n\n self.assertEqual(first=self.post.title, second='first title for posts')\n url = reverse('post:post-detail', args=(self.post.pk,))\n data = {\n 'title': 'new title'\n }\n json_data = json.dumps(data)\n response = self.client.patch(path=url, data=json_data,\n content_type='application/json')\n self.assertEqual(first=status.HTTP_401_UNAUTHORIZED, second=response.status_code)", "title": "" }, { "docid": "3fa9ab497a75810bd9e1684ec313a0d6", "score": "0.53744614", "text": "def test_save(self):\n self.assertTrue(os.path.exists(\"file.json\"))", "title": "" }, { "docid": "4789d4160abc06353010883f7eb1161f", "score": "0.5372456", "text": "def test_put_information_assert_return_information(self):\n response = self.client.put(self.url, self.data)\n\n self.assertEqual(len(response.data), 6)\n\n self.assertEqual(response.data[\"id\"], self.information1.id)\n self.assertEqual(response.data[\"codex\"], self.codex1.id)\n self.assertEqual(response.data[\"text\"], self.data[\"text\"])\n self.assertEqual(\n response.data[\"initial_hash\"], java_string_hashcode(self.data[\"text\"])\n )\n self.assertEqual(\n response.data[\"creation_date\"],\n self.information1.creation_date.strftime(\"%Y-%m-%dT%H:%M:%SZ\"),\n )\n self.assertIsNotNone(response.data[\"update_date\"])", "title": "" }, { "docid": "626dd3ed636124e174fb85c765da4507", "score": "0.53711414", "text": "def test_int_in_json_field(self):\r\n json_obj = 1234567\r\n obj = self.json_model.objects.create(json=json_obj)\r\n new_obj = self.json_model.objects.get(id=obj.id)\r\n\r\n self.assertEqual(new_obj.json, json_obj)", "title": "" }, { "docid": "8c96cf6c8ebc49ff223d09937cb4b512", "score": "0.5370629", "text": "def test_torque_json_update_2(self):\n TorqueTest.g.putj('{\"_id\" : \"2\", \"name\" : \"dani\", \"friends\" : [{ \"name\":\"bob\", \"contact\": {\"phone\" :\"888-888-8888\"}}, {\"name\" : \"emily\"}]}')\n expected = {'result': [{'id': 'emily'}, {'id': 'bob'}]}\n actual = TorqueTest.g.v().has(\"name\", \"dani\").out(\"friends\").out().out(\"name\").all()\n self.assertTrue(ordered(expected) == ordered(actual))\n\n # update the json object\n TorqueTest.g.updatej('{\"_id\" : \"2\", \"name\" : \"dani\", \"friends\": [{ \"name\":\"alice\", \"contact\": { '\n '\"phone\" : '\n '\"555-555-5555\"} }, {\"name\" : \"joe\", \"contact\": { \"phone\" : \"111-222-3333\"}}, '\n '{\"name\" : \"fred\"}]}')\n\n expected = {'result': [{'id': 'fred'}, {'id': 'joe'}, {'id': 'alice'}]}\n actual = TorqueTest.g.v(\"dani\").inc().out(\"friends\").out().out(\"name\").all()\n self.assertTrue(ordered(expected) == ordered(actual))\n\n actual = TorqueTest.g.v().has(\"name\", \"dani\").all()\n self.assertTrue({'result': [{'id': '_:_id_2'}]} == actual)\n\n #update the same object again\n TorqueTest.g.updatej('{\"_id\" : \"2\", \"name\" : \"charlie\"}')\n\n actual = TorqueTest.g.v(\"charlie\").inc().out(\"_id\").all()\n self.assertTrue(actual['result'][0]['id'] == '2')\n\n actual = TorqueTest.g.v(\"dani\").inc().out(\"_id\").all()\n self.assertTrue(len(actual['result']) == 0)", "title": "" } ]
0534129d8f69459003afa67094aa5eab
Get a line from syslog throw meaningless log Make a log to form as list
[ { "docid": "e94aecd27f026bc21f7a641c848dd732", "score": "0.58972514", "text": "def preprocess_line(line):\n\n global hostname\n\n temp_log = []\n\n # Remove space\n line_data = line.strip().split(hostname)\n \n # Time creater\n if(line_data[0] is not None):\n time = []\n date = line_data[0].split(' ')\n\n for item in date:\n if (item is not ''):\n time.append(item)\n\n time = time[0] + ' ' + time[1] + ' ' + time[2]\n datetime_object = datetime.strptime('2017 '+time, '%Y %b %d %H:%M:%S')\n \n temp_log.append(str(datetime_object)) \n \n # Content classifier\n #print(\"line : \",line_data,len(line_data))\n if(line_data[1] is not None):\n \n content = str(line_data[1]).split(' ')\n process = content[1]\n \n temp_log.append(process)\n if process == \"charon:\" or process == \"charon-custom:\":\n category = content[2]\n temp_log.append(category)\n\n message = content[3:]\n temp_log.append(message)\n # Do not Analyze kernel log\n else:\n #print(\"process : \",process)\n return ''\n return temp_log", "title": "" } ]
[ { "docid": "8a651a87029f6443409d75cf50639614", "score": "0.6557162", "text": "def log_splitter(log):\n if log is None:\n return []\n return log.splitlines()", "title": "" }, { "docid": "f78b4f63143ca02ed06004b9da286fc8", "score": "0.63221747", "text": "def parse_log(path_to_logfile):\n logfile = open(path_to_logfile, \"r\")\n loglist = []\n for line in logfile:\n line = line.replace(\"[\", \"\")\n line = line.replace(\"]\", \"\")\n line = line.replace(\"'\", \"\")\n line = line.replace(\"(\", \"\")\n line = line.replace(\")\", \"\")\n line = line.replace(\" \", \"\")\n line = line.split(\",\")\n if \"KTakepic\" in line:\n try:\n loglist.append(log_infos(datetime.datetime.fromtimestamp(float(line[0])), line[1],\n datetime.datetime.fromtimestamp(float(line[5])), int(line[3]), line[6][2:],\n int(line[4])))\n\n except:\n print(\"parse error\")\n logfile.close()\n return loglist", "title": "" }, { "docid": "3c90b638acc4fe494ab2064edae6550d", "score": "0.6211441", "text": "def get_log_list(self) -> List[LogListItem]:", "title": "" }, { "docid": "9d663863a169750a6488c0af90807458", "score": "0.59767", "text": "def _parse_log_line(line, strict=True):\n regex=_default_log_line_rec\n m=re.match(regex,line)\n if m is None:\n return None\n g=dict(zip([\"time\",\"level\",\"kind\",\"origin\",\"message\"],m.groups()))\n g[\"full\"]=line\n if g[\"time\"] is not None:\n try:\n g[\"time\"]=datetime.strptime(g[\"time\"],_default_log_line_time_fmt)\n except ValueError:\n if strict:\n return None\n if g[\"level\"] is not None:\n try:\n g[\"level\"]=int(g[\"level\"])\n except ValueError:\n if strict:\n return None\n return g", "title": "" }, { "docid": "4444c99f54bcecd5a16faf5b5c0d8eb8", "score": "0.58745515", "text": "def readlog(self, val):\n path = str(val)\n combatlines = []\n print str(path)\n logfile = open(path, \"r\")\n for line in logfile:\n event = Attack()\n try:\n event.setVals_fromLine(line, path)\n except:\n event = 0\n if event != 0:\n combatlines.insert(-1, event)\n return combatlines\n # ###maybe use w/ engagement class\n # weapons = []\n # pilots = []\n # if event.asArray()[4] not in weapons:\n # weapons.insert(-1, event.asArray()[4])\n # if event.asArray()[0] not in pilots:\n # pilots.insert(-1, event.asArray()[0])", "title": "" }, { "docid": "19bb90c26b93ceda99b5cf2c6258082d", "score": "0.5862079", "text": "def syslog(self, num_entries=10):\n entries = int(num_entries)\n count = 0\n log_file = ''\n if os.path.exists('/var/log/syslog'):\n log_file = '/var/log/syslog'\n elif os.path.exists('/var/log/messages'):\n log_file = '/var/log/messages'\n else:\n print(\"Unable to find system log file!\")\n sys.exit(1)\n lines = [line.strip() for line in open(log_file, \"r\")]\n lines.reverse()\n print(\"Last %s manila syslog entries:-\" % (entries))\n for line in lines:\n if line.find(\"manila\") > 0:\n count += 1\n print(\"%s\" % (line))\n if count == entries:\n break\n\n if count == 0:\n print(\"No manila entries in syslog!\")", "title": "" }, { "docid": "da1e99a1d376282c048074429a370468", "score": "0.5849954", "text": "def grep_log(self, entry):\r\n return utils.grep_tuple(entry, self.log)", "title": "" }, { "docid": "4f721c42ab7772c9d723ba490de31c4e", "score": "0.5849785", "text": "def parse(self, logline):\r\n try:\r\n match = self.fieldselector.match(logline)\r\n except AttributeError, exc:\r\n raise AttributeError(\"%s needs a valid format string (--format)\" % \\\r\n self.__class__.__name__ )\r\n\r\n if match:\r\n data = self._logline_wrapper\r\n data.clear()\r\n for k, v in zip(self.fieldnames, match.groups()):\r\n data[k] = v\r\n return data \r\n else:\r\n raise ValueError(\"Could not parse log line: '%s'\" % logline)", "title": "" }, { "docid": "647be90307955390d90dd36aca05e4e6", "score": "0.58132845", "text": "def get_log(self):\n return '\\n'.join(self.log) + '\\n'", "title": "" }, { "docid": "695749d6da141968101f4c1e57ceb8da", "score": "0.58117986", "text": "def parse_log_line(str_line):\n log_line = str_line.split()\n\n log_event = {\n 'date': log_line[0],\n 'verbosity': log_line[1],\n 'thread': log_line[2],\n 'module': log_line[3],\n 'message': ' '.join(log_line[4:]),\n 'logs': [str_line]\n }\n\n return log_event", "title": "" }, { "docid": "fd061d46df389b561c6b682cef7338ea", "score": "0.58095086", "text": "def parse_logs(log_fh):\n log_list = []\n for line in log_fh:\n # Filter lines starting with '[' \n if line.startswith(\"[\"):\n errorMessage=[]\n # Consider only error log message\n if line.split(\" \")[3]==\"ERROR\":\n log = line.split(\" \")[0]+\" \"+line.split(\" \")[1]+\" \"+\\\n line.split(\" \")[2]+\" \"+line.split(\" \")[3]\n\n # capture error message\n for i in range(5,len(line.split(\" \"))):\n errorMessage.append(line.split(\" \")[i])\n err_log=\" \".join(errorMessage)\n \n # append error message with rest of log message\n # For ex. [2021-03-04 01:40:18,644] {{models.py:1760}} ERROR - Bash command failed\n parsed_log=log+\" - \"+err_log\n log_list.append(parsed_log)\n\n return log_list", "title": "" }, { "docid": "b2034bea4616c6971c4e582ad5153db3", "score": "0.5792789", "text": "def enlog(self, line):", "title": "" }, { "docid": "56abe1195d2decc9c83d140830ebc7fe", "score": "0.57786196", "text": "def read_single_line_log_file(log_file):\n log_lines = []\n\n if os.path.exists(log_file):\n with open(log_file) as f: \n log_lines = f.readlines() \n else:\n print (\"Log file not found: %s\" % str(log_file))\n\n return log_lines", "title": "" }, { "docid": "b09c2946be9f5ffbd4c64cd036c58205", "score": "0.57723457", "text": "def retreive_log_messages(self, min_level=0):\n query = \"\"\"\n SELECT message\n FROM p_log_provenance\n WHERE level >= ?\n \"\"\"\n messages = self.run_query(query, [min_level])\n return list(map(lambda x: x[0], messages))", "title": "" }, { "docid": "5ed609b612f830ab2b9ed3514857e374", "score": "0.57496053", "text": "def new_log():\n if os.path.exists(\"lnt.log\"):\n lines = {line for line in open(\"lnt.log\", 'r').readlines()}\n else:\n lines = set()\n with hide('warnings'):\n get('/srv/lnt/install/lnt.log', 'lnt.log', )\n new_lines = {line for line in open(\"lnt.log\", 'r').readlines()}\n for l in new_lines - lines:\n print ' '.join(l.split()[2:]),", "title": "" }, { "docid": "20253ebb93b0e5c33afbc4c3c06fcfd3", "score": "0.56959414", "text": "def lineReceived(self, line):\n code = line[0]\n if code is 'L': # Log\n line = line[1:].rstrip()\n self.log.debug(\"log: %s\" % line)\n try:\n method, uri, link_str, age_str, cc_str = line.split(None, 4)\n except ValueError:\n self.log.error(\"Misformatted squid log line received\")\n return\n links = self._extract_relations(\n self._parse_link(unquote(link_str))\n )\n if method in self.safe_methods \\\n and links.has_key('inv-by'):\n # remember dependencies so we can act on them later\n if age_str == '-':\n age = 0\n else:\n try:\n age = int(age_str)\n except ValueError:\n self.log.warning(\n \"Invalid response age (%s) for <%s>; assuming 0\" %\n (age_str, uri)\n )\n age = 0\n # TODO: implement complete ageing algorithm \n # (using date, transit times, etc.)\n cc = self._parse_cc(unquote(cc_str))\n try:\n max_age = int(cc.get('max-age', 0))\n except ValueError:\n self.log.warning(\n \"Invalid response max-age (%s) for <%s>; assuming 0\" %\n (cc.get('max-age', '-'), uri)\n )\n max_age = 0\n ttl = max(\n max_age - age + self.gc_ttl_fudge, \n self.gc_ttl_fudge, \n self.gc_ttl_min\n )\n self.mgr.set_groups(uri, links['inv-by'], ttl)\n elif method not in self.safe_methods:\n if links.has_key(\"invalidates\"):\n # this response says it invalidates something else\n r_scheme, r_host, r_port, r_path = client._parse(uri)\n for invalid_uri in \\\n [urljoin(uri, u) for u in links['invalidates']]:\n i_scheme, i_host, i_port, i_path = \\\n client._parse(invalid_uri)\n if i_scheme.lower() != r_scheme.lower() or \\\n i_host.lower() != r_host.lower() or \\\n i_port != r_port:\n self.log.warning(\n \"Not letting <%s> response invalidate <%s>.\" %\n (uri, invalid_uri)\n )\n continue\n # Use PURGE so that the invalidations are \n # propagated to other peers as CLRs\n self.mgr.purge(invalid_uri)\n # Then invalidate what we know is related with CLR\n # (which isn't propagated)\n self.mgr.clr_related(invalid_uri)\n elif code is 'R': # Rotate\n self.mgr.note_state()\n self.log.info(\"Rotating logs...\")\n for hdlr in self.log.handlers:\n try:\n hdlr.doRollover()\n except AttributeError:\n pass\n else:\n pass", "title": "" }, { "docid": "89a0658377d4b4502b9e519b48d4a4a9", "score": "0.5678213", "text": "def extract_log(cls,log_file): \n \n with open( log_file,'r') as f:\n content = f.readlines()\n msg = {}\n for line in content:\n # line = line.replace(\" \",\" \")\n if len(line) <= 3:\n continue\n a = line.split(\" \")\n if a[1] not in msg:\n msg[a[1]] = [1, a[2]]#,' '.join(a[4:])]\n else:\n msg[a[1]][0] +=1 \n return msg", "title": "" }, { "docid": "1d1a982fdd596e1190b56316a89af23d", "score": "0.56656677", "text": "def parse_line(line):\n match = LOG_LINE_MASK.findall(line)\n if not match:\n return None\n\n url = match[0][4]\n request_time = match[0][-1]\n if not (url and request_time):\n return None\n return LogLine(url, float(request_time))", "title": "" }, { "docid": "4dbfe68e1e412a9a45246fe1b794e513", "score": "0.56638336", "text": "def syslog():\n return Logger._syslog", "title": "" }, { "docid": "6479fd4a224f7d05d61bcb0fd5a98aef", "score": "0.5658789", "text": "def getLog(filepath):\n log = []\n try:\n with open(filepath) as f:\n for line in f:\n log.append(line.replace(\"\\n\", \"\"))\n except: \n return None\n return log", "title": "" }, { "docid": "776ef49b86e632ab929fe261acb956f7", "score": "0.5646108", "text": "def handleLogLine(self, event):\n timestamp = \"\"\n if self.addTimestamp:\n timestamp = datetime.datetime.fromtimestamp(\n event.ts).strftime('[%Y-%m-%d %H:%M:%S] ')\n\n # Write down the log line\n self.file.write(timestamp + event.line + \"\\n\")", "title": "" }, { "docid": "7c05630c6b58bbe6b6ebe047ec01e22b", "score": "0.56406826", "text": "def log_extract(self, log_data):\n for line in log_data:\n # check if user access is accepted or rejected\n match_accept = 'Accept for user' in line\n match_reject = 'Reject for user' in line\n\n try:\n time = line.split()[2].strip()\n except IndexError:\n # Skips blank line\n continue\n\n if match_reject:\n # Access is rejected for the user\n self.update_hour_array(self.rejects, time)\n if match_accept:\n # Access is accepted for the user\n self.update_hour_array(self.accepts, time)", "title": "" }, { "docid": "e648e3293b981a9562436fcfad942025", "score": "0.5609068", "text": "def __log_line(self, line):\n logger.info(\"CM: %s\" % line)\n self.log.append(line)\n if len(self.log) > self.log_lines:\n self.log = self.log[-(self.log_lines):]", "title": "" }, { "docid": "37d14cfa66fd7fceb6361484cee7d7cd", "score": "0.5597849", "text": "def parse_log(line):\n\n url_format = re.compile(r\"\"\"((?:(?<=PUT )|(?<=GET )|(?<=POST )|(?<=HEAD ))(.*)(?=\\ http))\"\"\", re.IGNORECASE)\n request_time_format = re.compile(r\"\"\"(([0-9]*[.])?[0-9]+(?!.*\\d))$\"\"\", re.IGNORECASE)\n url_data = re.search(url_format, line)\n request_time_data = re.search(request_time_format, line)\n if url_data:\n url_data = url_data.group()\n if request_time_data:\n request_time_data = request_time_data.group()\n return request_time_data, url_data", "title": "" }, { "docid": "a764a1e2df42893ba3f3ff901e64a3e8", "score": "0.55515385", "text": "def grab_it(self, line):\n elog_grab()", "title": "" }, { "docid": "1a80415a07cd79a9ce98a37ca3f21ebd", "score": "0.5520146", "text": "def logline(self) -> Optional[str]:\n return pulumi.get(self, \"logline\")", "title": "" }, { "docid": "4d347b1cc9f51f4c7b949a5f93b0decb", "score": "0.5501331", "text": "def readLog(iostatLog):\n \n #first line describes the system a little\n f=open(iostatLog)\n header=f.readline()\n data=[]\n \n \n tmp=f.readline()#skip empty line\n \n #save date line\n dateTimeStr=f.readline()\n \n \n tmp=f.readline()#skip cpu header\n cpuUsage=f.readline()\n \n tmp=f.readline()#skip empty line\n tmp=f.readline()#skip device header\n deviceUsage=f.readline()\n \n if (not cpuUsage) or (not dateTimeStr) or (not deviceUsage):\n return data\n \n dateTime=datetime.datetime.strptime(dateTimeStr.strip(),\"%m/%d/%Y %I:%M:%S %p\")\n \n data.append([dateTime])\n for item in cpuUsage.split():\n data.append([float(item)])\n for item in deviceUsage.split()[1:]:\n data.append([float(item)])\n \n while True:\n \n tmp=f.readline()#skip empty line\n \n #save date line\n dateTimeStr=f.readline()\n \n \n tmp=f.readline()#skip cpu header\n cpuUsage=f.readline()\n \n tmp=f.readline()#skip empty line\n tmp=f.readline()#skip device header\n deviceUsage=f.readline()\n \n if (not cpuUsage) or (not dateTimeStr) or (not deviceUsage):\n break\n \n dateTime=datetime.datetime.strptime(dateTimeStr.strip(),\"%m/%d/%Y %I:%M:%S %p\")\n \n data[0].append(dateTime)\n count=1\n for item in cpuUsage.split():\n data[count].append(float(item))\n count+=1\n for item in deviceUsage.split()[1:]:\n data[count].append(float(item))\n count+=1\n return data", "title": "" }, { "docid": "418732bacb6013f197d2da6f03852fbc", "score": "0.5493074", "text": "def parse_log_line(line):\n try:\n record = ujson.loads(line.strip())\n except ValueError:\n record = dict()\n if 'timestamp' in record:\n # convert timestamp\n try:\n tmp = [int(x) for x in re.split('T|-|\\\\:|\\\\.|\\\\+', record['timestamp'])]\n tz = record['timestamp'][-5:]\n ts = datetime.datetime(*tmp[:7])\n ts -= datetime.timedelta(hours=int(tz[2:3]), minutes=int(tz[-2:])) * int(tz[0:1] + '1')\n record['__timestamp__'] = ts\n return record\n except ValueError:\n pass\n\n return None", "title": "" }, { "docid": "d74f7bb11166f62d9c8b52cf238647d1", "score": "0.54869664", "text": "def get_tor_mcu_event_log(self, mcu_event_source):\n\n\n log_list = []\n\n log_size = 6\n event_end = 1\n ts_end = 5\n with self.lock.acquire_timeout(self.PORT_LOCK_TIMEOUT) as result:\n if result:\n # read log events\n log_dump = self.__read_tor_event_log(mcu_event_source, 0x1)\n if log_dump is not None and log_dump != self.EEPROM_ERROR and log_dump is not False:\n for i in range(0, 10):\n tor_mcu_event_log = cable_mcu_event_log_s()\n log_start_idx = log_size * i\n tor_mcu_event_log.event_type = log_dump[log_start_idx : log_start_idx+event_end]\n tor_mcu_event_log.timestamp = struct.unpack('<I', log_dump[log_start_idx + event_end : log_start_idx + ts_end])[0]\n tor_mcu_event_log.run_no = log_dump[log_start_idx + ts_end]\n\n log_list.append(tor_mcu_event_log)\n #print(\"In get_tor_mcu_event_log: adding run_no {} time_stamp {}\".format(tor_mcu_event_log.run_no, tor_mcu_event_log.timestamp))\n\n else:\n self.log(self.LOG_ERROR, \"Port Event-lock timed-out!\")\n return self.ERROR_PORT_LOCK_TIMEOUT, None\n\n return log_list", "title": "" }, { "docid": "fdd735181812307e37204f60ea20e4e5", "score": "0.5483156", "text": "async def extract_info(log_file):\n info = []\n marker = \"###\"\n info.append(f\"{marker} Logfile: {log_file}\")\n async for line in read_log(log_file):\n if line.startswith(marker):\n info.append(line)\n return info", "title": "" }, { "docid": "8dc28f9ac7950504414f3570749460a6", "score": "0.5475922", "text": "def collect_lines(bot, trigger):\n\n # Don't log things in PM\n if trigger.is_privmsg:\n return\n\n # Add a log for the channel and nick, if there isn't already one\n if trigger.sender not in bot.memory[log_key]:\n bot.memory[log_key][trigger.sender] = SopelMemory()\n if Identifier(trigger.nick) not in bot.memory[log_key][trigger.sender]:\n bot.memory[log_key][trigger.sender][Identifier(trigger.nick)] = list()\n\n # Create a temporary list of the user's lines in a channel\n templist = bot.memory[log_key][trigger.sender][Identifier(trigger.nick)]\n line = trigger.group()\n if line.startswith(\"s/\") or line.startswith(bot.config.core.help_prefix):\n # Don't remember substitutions or commands\n return\n elif line.startswith(\"\\x01ACTION\"): # For /me messages\n line = line[:-1]\n templist.append(line)\n else:\n templist.append(line)\n\n del templist[:-10] # Keep the log to 10 lines per person\n\n bot.memory[log_key][trigger.sender][Identifier(trigger.nick)] = templist", "title": "" }, { "docid": "0b6695fa1fbc5f45ece4db025f706918", "score": "0.54735553", "text": "def log_it(self, line):\n if line.rstrip() == '':\n post_elog()\n\n else:\n post_elog(line.strip())", "title": "" }, { "docid": "d24fd3a536eadf23bb594557f2504fd8", "score": "0.54598725", "text": "def parse_row(row):\n if row[3][:5] == \"DEBUG\":\n return [row[0], row[1], \"DEBUG\",\n row[2] + \": \" + row[3][5:]]\n elif row[3][:8] == \"CRITICAL\":\n return [row[0], row[1], \"CRITICAL\",\n row[2] + \": \" + row[3][8:]]\n elif row[3][:5] == \"ERROR\":\n return [row[0], row[1], \"ERROR\",\n row[2] + \": \" + row[3][5:]]\n elif row[3][:7] == \"WARNING\":\n return [row[0], row[1], \"WARNING\",\n row[2] + \": \" + row[3][7:]]\n elif row[3][:4] == \"INFO\":\n return [row[0], row[1], \"INFO\",\n row[2] + \": \" + row[3][4:]]", "title": "" }, { "docid": "3854fc89b17f8b7ab673fc224b184d69", "score": "0.54563475", "text": "def recordLogsToList(log):\n print log\n# global LOGLIST\n LOGLIST.append(log)", "title": "" }, { "docid": "977dcf8c6ec1dce0ac6d63760a6137bb", "score": "0.545554", "text": "def read_log(path, strict=True, required=None, bad_line_action=\"append\"):\n required=required or {\"message\",\"time\"}\n funcargparse.check_parameter_range(bad_line_action,\"bad_line_action\",{\"append\",\"ignore\"})\n log_lines=[]\n with open(path,\"r\") as f:\n for line in f:\n line=line.strip()\n if re.match(_default_log_skip_rec,line):\n continue\n parsed=_parse_log_line(line,strict=strict)\n if parsed is None:\n bad_line=True\n else:\n bad_line=False\n for r in required:\n if parsed.get(r) is None:\n bad_line=True\n break\n if bad_line:\n if bad_line_action==\"append\" and log_lines:\n log_lines[-1][\"message\"]=(log_lines[-1][\"message\"] or \"\")+line\n else:\n log_lines.append(parsed)\n return log_lines", "title": "" }, { "docid": "51831ae8d078684cbde0351f8840fb39", "score": "0.545253", "text": "def cable_get_nic_mcu_event_log(self, mcu_event_source):\n\n log_list = []\n\n # read all logs for all mcu event sources.\n log_size = 6\n event_end = 1\n ts_end = 5\n # read log events from given start index\n log_dump = self.__get_mcu_event_log(mcu_event_source)\n\n if log_dump is not None:\n for i in range(0, 10):\n nic_mcu_event_log = cable_mcu_event_log_s()\n log_start_idx = log_size * i\n nic_mcu_event_log.event_type = log_dump[log_start_idx : log_start_idx+event_end]\n nic_mcu_event_log.timestamp = struct.unpack('<I', log_dump[log_start_idx + event_end : log_start_idx + ts_end])[0]\n nic_mcu_event_log.run_no = log_dump[log_start_idx + ts_end]\n\n log_list.append(nic_mcu_event_log)\n #print(\"In get_nic_mcu_event_log: adding run_no {} time_stamp {} event_type {}\".format(nic_mcu_event_log.run_no, nic_mcu_event_log.timestamp, nic_mcu_event_log.event_type))\n\n return log_list", "title": "" }, { "docid": "7d5bbff7f5ae7a0e993fc7ea87037c85", "score": "0.54144156", "text": "def transform_log(log):\n log['id'] = int(str(log['id'])[:11])\n message = log.pop('message')\n splits = message.rstrip().split('\\t')\n log_type = splits[0].split(' ')[0]\n splits[0] = splits[0].replace(log_type + ' ', '')\n\n for item in splits:\n k, v = item.split(': ')\n if k == 'ItemLinks':\n v = v.split('\"')\n if len(v) == 1:\n v = v[0]\n else:\n v = '[' + \",\".join(v) + ']'\n else:\n v = v.split(' ')[0]\n k = k.replace(' ','')\n log.update({k:v})", "title": "" }, { "docid": "98320ad7a9aade612ab9504bd149e2bd", "score": "0.5391393", "text": "def parse_log(input_file, regexes):\n\n rejection_warnings = []\n\n report_legend = '\"Datestamp\",\"Remote Host\",\"Reason\",\"Claimed sender\",\"Recipient\",\"Helo greeting\"'\n\n if DEBUG_ON:\n print report_legend\n\n rejection_warnings.append(report_legend)\n\n try:\n input_fh = open(input_file,'r')\n except:\n print \"[!] Error accessing %s\" % input_file\n print sys.exc_info()[0]\n\n sys.exit()\n else:\n for line in input_fh:\n\n # We're only interested in reject warnings\n # FIXME: Replace this hard-coded value\n if 'reject_warning' in line:\n\n for regex in regexes:\n \n # The Regular Expression pattern we're going to use when examining\n # the log file\n pattern = re.compile(regex, re.X)\n\n try:\n matches = pattern.match(line).groups()\n except:\n pass\n else:\n if DEBUG_ON:\n print \"We found:\\n\\t%s,%s,%s,%s,%s,%s\" % matches\n\n # Build CSV string, add to list\n csv_string='\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\"' % matches\n rejection_warnings.append(csv_string)\n input_fh.close()\n\n return rejection_warnings", "title": "" }, { "docid": "4b6383977fc2ee97832c94bfd4478d52", "score": "0.5384521", "text": "def parse_job_result_from_log(\n self, lava_lines: list[dict[str, str]]\n ) -> list[dict[str, str]]:\n\n last_line = None # Print all lines. lines[:None] == lines[:]\n\n for idx, line in enumerate(lava_lines):\n if result := re.search(r\"hwci: mesa: (pass|fail)\", line):\n self.is_finished = True\n self.status = result.group(1)\n\n last_line = idx + 1\n # We reached the log end here. hwci script has finished.\n break\n return lava_lines[:last_line]", "title": "" }, { "docid": "8dd381e5b80a58d73e833aef5d15ec1e", "score": "0.53825194", "text": "def read_line(self, line, allow_session_start=True):\n matcher = my_im.match(line)\n if matcher is None:\n if allow_session_start:\n ##### Check for session start\n session_start_match = self.session_start_matcher.match(line)\n if session_start_match:\n self.previous = dateutil.parser.parse(\n session_start_match.group(1),\n ignoretz=True)\n return None\n\n sender = util.simplify_SN(matcher.group(2))\n storage = None # which person's list to store this line (IM) in\n ##### determine sender\n for (aliases, lines) in zip(self.both_aliases, self.both_lines):\n if sender in aliases:\n storage = lines\n break\n else:\n return None # didn't match either set of aliases\n ##### make sure it's not empty\n processed = util.strip_punctuation(matcher.group(3).lower()).strip()\n if processed == '':\n return None\n ##### parse timestamp. strptime works the vast majority of the time\n raw_timestamp = matcher.group(1)\n try:\n line_time = datetime.datetime.strptime(raw_timestamp,\n STANDARD_TIMESTAMP)\n except ValueError:\n try:\n line_time = dateutil.parser.parse(raw_timestamp, ignoretz=True)\n except ValueError:\n return None # rare edge case from weird pastes, etc\n ##### Compute datetime from timestamp (time) and previous (date)\n timestamp = datetime.datetime.combine(self.previous.date(),\n line_time.time())\n # handle conversations that span multiple days\n if util.crossed_day(self.previous, line_time):\n timestamp += ONE_DAY\n\n if timestamp < self.previous:\n return None\n\n ##### Store the timestamp and word counters for the message text\n storage['line_dates'].append(timestamp)\n storage['line_counters'].append(util.make_ngram_counter(self.ngram_N, matcher.group(3)))\n\n words = util.strip_punctuation(matcher.group(3).lower()).split()\n word_dates = [timestamp] * len(words)\n # intervals = [0] * len(words)\n # intervals[0] = (timestamp - self.previous).total_seconds()\n\n storage['words'].extend(words)\n storage['word_dates'].extend(word_dates)\n #storage['intervals'].extend(intervals)\n\n self.previous = timestamp", "title": "" }, { "docid": "dd871aba26a25c474fb2ef58dd16516b", "score": "0.53718174", "text": "def read_min_log(gromacs_log, unit):\n ## Make empty lists for each of the energy types\n bond_lines = []\n angle_lines = []\n prop_dihed_lines = []\n imp_dihed_lines = []\n lj_14_lines = []\n coul_14_lines = []\n lj_sr_lines = []\n disper_corr_lines = []\n coul_sr_lines = []\n coul_recip_lines = []\n pos_rest_lines = []\n potential_lines = []\n pres_dc_bar_lines = []\n pres_bar_lines = []\n constr_rmsd_lines = []\n pattern = '(?i)Energies \\(kJ/mol\\)' # case insensitive, escape parentheses\n\n s_line = 0\n for line in open(gromacs_log).readlines():\n # re.search is anywhere, re.match is explicitly beginning of line\n if re.search(pattern, line):\n # If it matches \"Energies\", start counter\n s_line += 1\n elif s_line == 1:\n s_line += 1\n elif s_line == 2:\n sl = line.split()\n bond_lines.append(sl[0])\n angle_lines.append(sl[1])\n prop_dihed_lines.append(sl[2])\n imp_dihed_lines.append(sl[3])\n lj_14_lines.append(sl[4])\n s_line += 1\n elif s_line == 3:\n s_line += 1\n elif s_line == 4:\n sl = line.split()\n coul_14_lines.append(sl[0])\n lj_sr_lines.append(sl[1])\n disper_corr_lines.append(sl[2])\n coul_sr_lines.append(sl[3])\n coul_recip_lines.append(sl[4])\n s_line += 1\n elif s_line == 5:\n s_line += 1\n elif s_line == 6:\n sl = line.split()\n pos_rest_lines.append(sl[0])\n potential_lines.append(sl[1])\n pres_dc_bar_lines.append(sl[2])\n pres_bar_lines.append(sl[3])\n constr_rmsd_lines.append(sl[4])\n s_line = 0\n\n df = pd.DataFrame(list(zip(\n bond_lines, angle_lines, prop_dihed_lines, imp_dihed_lines, lj_14_lines,\n coul_14_lines, lj_sr_lines, disper_corr_lines, coul_sr_lines,\n coul_recip_lines,\n pos_rest_lines, potential_lines, pres_dc_bar_lines, pres_bar_lines,\n constr_rmsd_lines)),\n columns=[\"Bond\", \"Angle\", \"Proper Dih.\", \"Improper Dih.\", \"LJ-14\",\n \"Coulomb-14\", \"LJ (SR)\", \"Disper. corr.\", \"Coulomb (SR)\", \"Coul. recip.\",\n \"Position Rest.\", \"Potential\", \"Pres. DC (bar)\", \"Pressure (bar)\",\n \"Constr. rmsd\"])\n ## Save data file\n df.to_csv(outfile, sep='\\t', index=True, encoding='utf8', header=True)\n return df", "title": "" }, { "docid": "b7f32ca6df05058967adb27d8b23916b", "score": "0.5356041", "text": "def logs(self):\n with lcd(self.path):\n logs = local('hg log', capture=True).split(\"\\n\\n\")\n return [\n dict([\n (a.strip(),b.strip()) \n for a,b in [\n line.split(':',1) for line in chunk.split('\\n')\n ]\n ]) for chunk in logs\n ]", "title": "" }, { "docid": "5758fdf49d749889e1a2467f6558ed69", "score": "0.5352138", "text": "def _parse_log(expression, loglist):\n i = 1\n count = 0\n while count < _loglim+1:\n if loglist[-i]['type'] == 'PRIVMSG':\n match = loglist[-i]['message']\n if expression.findall(match):\n return True, loglist[-i]\n i += 1\n count += 1\n else:\n i += 1\n continue\n else:\n return False, None", "title": "" }, { "docid": "06fa149a165e5b27dd1db737e5e6ae24", "score": "0.5350359", "text": "def syslog(value):\n if value == Logger._syslog:\n # Same value, let's silently return\n return\n\n if value is None:\n # Stop sending logs to IOS XE logger and to syslog server\n # Uninstall IOS logging\n commands = [\n 'no logging host %s' % Logger._syslog,\n 'no logging discriminator ztp',\n ]\n elif Logger._syslog is None:\n # Start sending logs to IOS XE logger and to syslog server\n # Install IOS logging\n commands = [\n 'logging discriminator ztp msg-body includes Message from|HA_EM|INSTALL',\n 'logging host %s discriminator ztp' % value,\n ]\n else:\n # Syslog server changes\n # Update IOS logging\n commands = [\n 'logging host %s discriminator ztp' % value,\n 'no logging host %s' % Logger._syslog,\n ]\n\n for command in commands:\n # noinspection PyBroadException\n try:\n cli.configure(command)\n except Exception as ex:\n Logger.error('Error while sending command \"%s\" to IOS' % command, _skip_syslog=True)\n Logger.debug(ex, _skip_syslog=True)\n time.sleep(2)\n\n Logger._syslog = value", "title": "" }, { "docid": "5b868712616a550dd0b0e0b7d936e4e1", "score": "0.53374255", "text": "def read_file():\n\n file_data = open(\"/var/log/syslog\",\"r\")\n \n return file_data", "title": "" }, { "docid": "996231cf648610ff6a8765972d7d86af", "score": "0.5335419", "text": "def parse_log_line(self, line: str) -> tuple[DateTime | None, str]:\n split_at = line.find(\" \")\n if split_at == -1:\n self.log.error(\n \"Error parsing timestamp (no timestamp in message %r). \"\n \"Will continue execution but won't update timestamp\",\n line,\n )\n return None, line\n timestamp = line[:split_at]\n message = line[split_at + 1 :].rstrip()\n try:\n last_log_time = cast(DateTime, pendulum.parse(timestamp))\n except ParserError:\n self.log.error(\"Error parsing timestamp. Will continue execution but won't update timestamp\")\n return None, line\n return last_log_time, message", "title": "" }, { "docid": "df85d00fd091db374bb2610a1e364a05", "score": "0.53307575", "text": "def update_log_rows(self, widget=None):\n self.log_rows[:] = []\n\n try:\n for line in open(self.logfile_entertainer, 'r'):\n try:\n line_table = line.split()\n message = ' '.join(line_table[3:])\n row = line_table[:3] + [message]\n parsed_row = parse_row(row)\n self.log_rows.append(parsed_row)\n except IndexError:\n print \"Cannot parse log line: \", line\n except IOError:\n print \"Cannot find logfile: \", self.logfile_entertainer\n\n # Reverse so that the latest message is at top\n self.log_rows.reverse()\n # Filter unwated message types\n self.filter_messages()", "title": "" }, { "docid": "86a4e034564d0b2566c810c9904280f8", "score": "0.53297335", "text": "def get_tor_peer_mcu_event_log(self, mcu_event_source):\n\n log_list = []\n\n log_size = 6\n event_end = 1\n ts_end = 5\n with self.lock.acquire_timeout(self.PORT_LOCK_TIMEOUT) as result:\n if result:\n # read log events\n log_dump = self.__read_tor_event_log(mcu_event_source, 0x2)\n if log_dump is not None and log_dump != self.EEPROM_ERROR and log_dump is not False:\n #if log_dump is not None and log_dump != -1:\n for i in range(0, 10):\n tor_peer_mcu_event_log = cable_mcu_event_log_s()\n log_start_idx = log_size * i\n tor_peer_mcu_event_log.event_type = log_dump[log_start_idx : log_start_idx+event_end]\n tor_peer_mcu_event_log.timestamp = struct.unpack('<I', log_dump[log_start_idx + event_end : log_start_idx + ts_end])[0]\n tor_peer_mcu_event_log.run_no = log_dump[log_start_idx + ts_end]\n\n log_list.append(tor_peer_mcu_event_log)\n #print(\"In get_tor_peer_mcu_event_log: adding run_no {} time_stamp {}\".format(tor_peer_mcu_event_log.run_no, tor_peer_mcu_event_log.timestamp))\n else:\n self.log(self.LOG_ERROR, \"Port Event-lock timed-out!\")\n return self.ERROR_PORT_LOCK_TIMEOUT, None\n\n return log_list", "title": "" }, { "docid": "3e9fa872572472eaab53821621787b5f", "score": "0.5329484", "text": "def read_logfile(file_name):\n with codecs.open(file_name, 'r', encoding='utf-8', errors='ignore') as fp:\n data = fp.readlines()\n data = [dd.strip() for dd in data]\n return data", "title": "" }, { "docid": "77077cd37e986ac763e35de84d31bc47", "score": "0.53261966", "text": "def log(self):\n return []", "title": "" }, { "docid": "97805a58ddd5dd282cf72dfee73ea3a2", "score": "0.5309138", "text": "def GetLogInfoByTimeStamp(ManualO2Ologfilename,StartTime,StopTime):\n #Open the log:\n ManualO2Olog=open(ManualO2Ologfilename,'r')\n #Loop to extract only the wanted lines\n PrintLines=False\n WantedLines=[]\n for line in ManualO2Olog:\n if 'Tmax:' in line and StopTime in line:\n PrintLines=True\n if PrintLines:\n if 'Tmin:' in line:\n break\n else:\n print(line.strip())\n WantedLines.append(line)\n ManualO2Olog.close()\n return WantedLines", "title": "" }, { "docid": "c45d33416b72700ac73571a4f8ff738d", "score": "0.52976763", "text": "def _read_log_timestamp(self, item_id: int, timestamp: int, cur=None) -> Union[list, None]:\n\n params = {'item_id': item_id, 'timestamp': timestamp}\n query = \"SELECT * FROM log WHERE item_id = :item_id AND time = :timestamp;\"\n return self._fetchall(query, params, cur=cur)", "title": "" }, { "docid": "6d83f6d10d2cb8bc16326be5314aad16", "score": "0.52914166", "text": "def cable_get_phy_chip_event_log(self):\n\n log_list = []\n\n # read all 24 logs. 6*4\n for k in range(0, 6):\n start_idx = k*4\n log_size = 15\n in_fault_end = 5\n out_fault_end = 10\n ts_end = 14\n\n # read log events from given start index\n log_dump = self.__get_phy_chip_event_log(start_idx)\n if log_dump is not None:\n for i in range(0, 4):\n phy_chip_event_log = cable_phy_chip_event_log_s()\n log_start_idx = log_size * i\n phy_chip_event_log.in_fault = log_dump[log_start_idx : log_start_idx+in_fault_end]\n phy_chip_event_log.out_fault = log_dump[log_start_idx+in_fault_end : log_start_idx+out_fault_end]\n phy_chip_event_log.timestamp = struct.unpack('<I', log_dump[log_start_idx + out_fault_end : log_start_idx + ts_end])[0]\n\n phy_chip_event_log.run_no = log_dump[log_start_idx + ts_end]\n log_list.append(phy_chip_event_log)\n #print(\"In get_phy_chip_event_log: adding run_no {} time_stamp {}\".format(phy_chip_event_log.run_no, phy_chip_event_log.timestamp))\n\n return log_list", "title": "" }, { "docid": "bd497bca439ca4532e6814b09b779cdf", "score": "0.5273196", "text": "def readLog(self):\n self.H.__sendByte__(CP.COMMON)\n self.H.__sendByte__(CP.READ_LOG)\n log = self.H.fd.readline().strip()\n self.H.__get_ack__()\n return log", "title": "" }, { "docid": "846fc2da5ccf3082360dae99a04c8672", "score": "0.5272596", "text": "def get(self, request, format=None, param=None):\r\n filedebug = 'temp\\logs\\log%s.txt' % (param)\r\n LOG_FILE = os.path.join(settings.BASE_DIR, filedebug)\r\n\r\n list = []\r\n try:\r\n\r\n audit = None\r\n try:\r\n audit = open(LOG_FILE, 'r')\r\n except (OSError, IOError) as e:\r\n print(\"%s no se encuentra\" % filedebug)\r\n pass\r\n if audit:\r\n try:\r\n for row in reversed(audit.readlines()):\r\n\r\n data = row.split(']')\r\n print (data)\r\n\r\n list.append({\r\n \"date\": data[0].strip().strip('['),\r\n \"type\": data[1].strip().strip('['),\r\n \"mod\": data[2].strip().strip('['),\r\n \"path\": data[3].strip().strip('['),\r\n \"ip\": data[4].strip().strip('['),\r\n \"user\": data[5].strip().strip('['),\r\n \"method\": data[6].strip().strip('['),\r\n\r\n \"desc\": data[7].strip(),\r\n })\r\n except:\r\n pass\r\n\r\n if audit:\r\n audit.close()\r\n\r\n except Exception as e:\r\n print(e)\r\n\r\n return Response(list)", "title": "" }, { "docid": "1ad77d5aa740f2de34b82874ff562b8d", "score": "0.5255908", "text": "def parse_experience(log):\r\n # list of tuple of shape [(dev, lines_add, lines_removed), ...]\r\n exp = []\r\n\r\n # entry lines were zero separated with -z\r\n entry_lines = log.split('\\0')\r\n\r\n current_entry = []\r\n \r\n for entry_line in entry_lines:\r\n if not entry_line.strip():\r\n # blank entry line marks the end of an entry, we're ready to process\r\n local_entry = current_entry\r\n current_entry = []\r\n if len(local_entry) < 2:\r\n print >> sys.stderr, \"Weird entry, cannot parse: %s\\n-----\" % '\\n'.join(local_entry)\r\n continue\r\n author, changes = local_entry[:2]\r\n author = safe_author_name(author)\r\n try:\r\n changes_split = re.split(r'\\s+', changes)\r\n # this can be two fields if there were file renames\r\n # detected, in which case the file names are on the\r\n # following entry lines, or three fields (third being\r\n # the filename) if there were no file renames\r\n lines_added, lines_removed = changes_split[:2]\r\n lines_added = int(lines_added)\r\n lines_removed = int(lines_removed)\r\n\r\n # don't record revisions that don't have any removed or\r\n # added lines...they mean nothing to our algorithm\r\n if lines_added or lines_removed:\r\n exp.append((author, lines_added, lines_removed))\r\n except ValueError:\r\n print >> sys.stderr, \"Weird entry, cannot parse: %s\\n-----\" % '\\n'.join(local_entry) \r\n continue\r\n else:\r\n # continue to aggregate the entry\r\n lines = entry_line.split('\\n')\r\n current_entry.extend([line.strip() for line in lines])\r\n\r\n # we need the oldest log entries first.\r\n exp.reverse()\r\n return exp", "title": "" }, { "docid": "e61bbecce1236e5cc94a536b31c091a9", "score": "0.52489215", "text": "def line_processor(self, line):\n line = line.replace('\\n', '').split('###')\n if line and line != ['']:\n try:\n line = [line[0], line[1] if 'None' in line[2] else '%s %s' % (line[1],\n ' '.join(ast.literal_eval(line[2])))]\n except UnicodeDecodeError:\n logging.debug('#TODO: UnicodeDecodeError needs handling')\n array.append({'id': line[0], 'line': line[1]})", "title": "" }, { "docid": "6d12f83a055162b8e8d03070f915db66", "score": "0.52423435", "text": "async def get_logs_by_id(request: web.Request) -> web.Response:\n ident = request.match_info['syslog_identifier']\n if ident == 'api.log':\n ident = 'opentrons-api'\n elif ident == 'serial.log':\n ident = 'opentrons-api-serial'\n opts = _get_options(request.query, 500000)\n return await _get_log_response(\n ident, opts['records'], opts['format'])", "title": "" }, { "docid": "7b0d6e9acb678682015ce55945941a2d", "score": "0.5239609", "text": "def parse_trace(trace_line):\n trace_line_list = trace_line.split(\" \")\n num_re = re.compile(r\"(^[1-9]$|^[1-9][0-9]$)\")\n ip_re = re.compile(r\"[0-9]+\\.[0-9]+\\.[0-9]+\\.[0-9]+\")\n\n for trace_num_item in trace_line_list[:3]:\n match_num = num_re.search(trace_num_item)\n if match_num:\n break\n\n if match_num:\n ip_list = [match_num.group()]\n for trace_item in trace_line_list:\n match_ip = ip_re.search(trace_item)\n if match_ip:\n ip_list.append(match_ip.group(0))\n return ip_list\n else:\n for trace_item in trace_line_list:\n match_ip = ip_re.search(trace_item)\n if match_ip:\n return match_ip.group(0)", "title": "" }, { "docid": "bd1b7dc3c556c6d2ec3920a1a6d934b1", "score": "0.523158", "text": "def log(msg):\n\n if type(msg) is str:\n logging.info(msg)\n print(msg)\n else:\n origin = msg_origin(msg)\n message_type = msg_type(msg)\n\n log_str = \"[\" + dt.datetime.fromtimestamp(int(msg[\"date\"])).strftime('%Y-%m-%d %H:%M:%S') + \"] \"\n log_str += msg[\"from\"][\"first_name\"] + \"(\" + str(msg[\"from\"][\"id\"]) + \")\" + \" enviou \" + message_type + \" \"\n\n # Suportando supergroups e outros no log\n if origin == \"group\" or origin == \"supergroup\" or origin == \"channel\":\n log_str += \"em \\\"\" + msg[\"chat\"][\"title\"] + \"\\\" (\" + str(msg[\"chat\"][\"id\"]) + \")\"\n elif origin == \"private\":\n log_str += \"em PRIVADO\"\n\n if message_type == \"text\":\n log_str += \": \" + msg[\"text\"]\n\n #if message_type == \"audio\":\n # print(msg)\n\n logging.info(log_str)\n print(log_str)", "title": "" }, { "docid": "0ddb26a2bde11017ad03553ec414e21b", "score": "0.52232254", "text": "def get_line(self, row):\n try:\n return self.lines[row]\n except:\n return [[]]", "title": "" }, { "docid": "7a0bab9bffb86f5d6b9ff11ec223cd02", "score": "0.5222653", "text": "def parse(self, line):\n expected_min_no_fields = 5\n if len(line) < expected_min_no_fields:\n raise LineParserException('line too short')\n\n pid = line[1]\n log_level = line[2].lstrip(\"[\").rstrip(\"]\")\n timezone = 'UTC'\n\n return {\n '@timestamp': self.compose_timestamp(line[0], timezone),\n 'log_level': log_level,\n 'process_id': int(pid),\n 'message': ' '.join(map(str, line[3:]))\n }", "title": "" }, { "docid": "fe6c706f046f4cda66a2fad4f9087d13", "score": "0.52205706", "text": "def parse_it(self, line):\r\n\r\n reg_1 = r\"(\\w\\w\\w \\d\\d \\d\\d:\\d\\d:\\d\\d).*\\\\\\\\\\?\\\\(.*)\\(\\\\\\\\\\?\\\\.*\\\\\\\\\\?\\\\(.*)\"\r\n reg = re.findall(reg_1, line)\r\n if reg:\r\n return list(reg[0])\r\n # return \"TIME: {reg[0][0]}\\nFILE: {reg[0][1]}\\nPROCESS: {reg[0][2]}\\n\\n\"\r", "title": "" }, { "docid": "2770bf0d5b6610025da4d7319fe30368", "score": "0.5219169", "text": "def __call__(self, line):\n if b\"GLib-GObject-CRITICAL\" in line:\n return\n if line:\n if self.state < OutputHandlerState.AFTER_HANDLER_START:\n self.line_buffer.append(line)\n return\n data = line.decode(\"utf8\", \"replace\")\n if self.stack_fixer:\n data = self.stack_fixer(data)\n if self.lsan_handler:\n data = self.lsan_handler.log(data)\n if data is not None:\n self.logger.process_output(self.pid,\n data,\n command=\" \".join(self.command))", "title": "" }, { "docid": "14132db35afd70ed78fa685ebd26d33f", "score": "0.5200606", "text": "def extract_sld_from_log(log_content):\n data_block_list = extract_multi_sld_from_log(log_content)\n if data_block_list is not None and len(data_block_list) > 0:\n return data_block_list[0][1]\n return None", "title": "" }, { "docid": "7ea7747b46d85ceacb98f9afa1e62998", "score": "0.5191766", "text": "def loadLog(self):\r\n print('Loading logs...')\r\n headers, regex = self.generate_logformat_regex(self.para.logformat)\r\n self.df_log = self.log_to_dataframe(os.path.join(self.para.path, self.logname), regex, headers,\r\n self.para.logformat)\r\n for idx, line in self.df_log.iterrows():\r\n line = line['Content']\r\n if self.para.rex:\r\n for currentRex in self.para.rex:\r\n line = re.sub(currentRex, '', line)\r\n\r\n wordSeq = line.strip().split()\r\n self.wordLL.append(tuple(wordSeq))", "title": "" }, { "docid": "f7d2e2708978e68ac3e7d752298de648", "score": "0.5187679", "text": "def _read_log(self):\n data = [] # format: [energy, status, temperature, ediff]\n f = open(os.path.join(self._rundirectory, self._logname), 'r')\n lines = f.read().splitlines()\n f.close()\n step_almost_over = False\n step_over = False\n for line in lines:\n if line.startswith('msg: Molecular dynamics:'):\n status = 'performing MD'\n elif line.startswith('msg: Optimization:'):\n status = 'performing QN'\n elif line.startswith('ene:'):\n status = 'local optimum reached'\n energy = floatornan(line.split()[1])\n elif line.startswith('msg: Accepted new minimum.'):\n status = 'accepted'\n step_almost_over = True\n elif line.startswith('msg: Found previously found minimum.'):\n status = 'previously found minimum'\n step_almost_over = True\n elif line.startswith('msg: Re-found last minimum.'):\n status = 'previous minimum'\n step_almost_over = True\n elif line.startswith('msg: Rejected new minimum'):\n status = 'rejected'\n step_almost_over = True\n elif line.startswith('par: '):\n temperature = floatornan(line.split()[1])\n ediff = floatornan(line.split()[2])\n if step_almost_over:\n step_over = True\n step_almost_over = False\n if step_over:\n data.append([energy, status, temperature, ediff])\n step_over = False\n if data[-1][1] != status:\n data.append([np.nan, status, temperature, ediff])\n self._data = data", "title": "" }, { "docid": "99afe7d9601fc4074915e207c73268d6", "score": "0.5185275", "text": "def parse_line(self, currentDate, line_str):\n node_number = \"-1\" \n sensor_type = \"nosensor\"\n sensor_value = \"-1.0\"\n\n coma_list = line_str.split(',')\n keys1 = []\n keys2 = []\n\n if (len(coma_list) < 2):\n sensor_type = \"blogmessage\"\n message = Message(currentDate, node_number, sensor_type, sensor_value)\n return message\n else:\n keys1 = coma_list[0].split(':')\n keys2 = coma_list[1].split(':')\n\n if (len(keys1) > 1):\n #if keys[0] == \"nodeid\"\n node_number = keys1[1] \n else:\n # No node id found\n node_number = \"-2\"\n\n if (len(keys2) > 1):\n sensor_type = keys2[0]\n sensor_value = keys2[1]\n # else sensor_type = \"nosensor\"\n \n message = Message(currentDate, node_number, sensor_type, sensor_value)\n return message", "title": "" }, { "docid": "83bfe2bc46fbdd518491040cba1bd36d", "score": "0.51840943", "text": "def log_reader(file_path):\n lines = i_get_unicode_lines(file_path)\n\n return multi_line_records(lines, is_line_start=is_log_start_line)", "title": "" }, { "docid": "ab508e2a23b4a66b79181ca368d5e492", "score": "0.51764065", "text": "def process_log_entries(self, cmd_output):\n pass", "title": "" }, { "docid": "02e8039689b0f7fde7a70fb5bc5f2ffc", "score": "0.5174318", "text": "def read_log(self, jail):\n self.data = []\n try:\n with open(self.log_file, encoding=\"utf-8\") as file_data:\n self.data = self.ip_regex[jail].findall(file_data.read())\n\n except (IndexError, FileNotFoundError, IsADirectoryError, UnboundLocalError):\n _LOGGER.warning(\"File not present: %s\", os.path.basename(self.log_file))", "title": "" }, { "docid": "73e0625543d69a8e66c412d393ae7146", "score": "0.5167468", "text": "def extract_multi_sld_from_log(log_content):\n # Parse out the portion we need\n data_started = False\n data_content = []\n data_block_list = []\n model_names = []\n\n for line in log_content.split('\\n'):\n if line.startswith(\"SIMULTANEOUS\"):\n clean_str = line.replace(\"SIMULTANEOUS \", \"\")\n model_names = json.loads(clean_str)\n if line.startswith(\"SLD_START\"):\n data_started = True\n elif line.startswith(\"SLD_END\"):\n data_started = False\n if len(data_content) > 0:\n data_path = ''\n if len(model_names) > len(data_block_list):\n data_path = model_names[len(data_block_list)]\n data_block_list.append([data_path, '\\n'.join(data_content)])\n data_content = []\n elif data_started is True:\n data_content.append(line)\n return data_block_list", "title": "" }, { "docid": "b95c8973c936b2cc2981132408a1cbb9", "score": "0.5162459", "text": "def get_event_log(self):\n stdout, stderr = self.__execute_command_with_retries([\"sel\", \"list\"])\n return [SystemEvent(line) for line in stdout.splitlines()]", "title": "" }, { "docid": "eae41e9a9a7699cc8abdd28afa5503e9", "score": "0.51601064", "text": "def fetchLogList(self):\n loglist = None\n loglist = self.read_query( \\\n \"SELECT ID, CALLSIGN FROM LOGHEADER WHERE 1\")\n return loglist", "title": "" }, { "docid": "8b0b9bdb218874f1516d344509da3b05", "score": "0.51524407", "text": "def extract_data_from_log(log_content):\n data_block_list = extract_multi_data_from_log(log_content)\n if data_block_list is not None and len(data_block_list) > 0:\n return data_block_list[0][1]\n return None", "title": "" }, { "docid": "dc9fe82f03a4c2601e76897a8e60e5ff", "score": "0.5149587", "text": "def log_info(message):\n syslog.syslog(message)", "title": "" }, { "docid": "03a97798d0e65e6f744498fddddc23e5", "score": "0.5149518", "text": "def _format_log_entries(self):\n rows = []\n cols = [\"Date\", \"Entry\"]\n if self.logging and self.log_file is not None:\n self.comment = _LOG_LIST\n log = open(self.log_file, \"r\")\n for row in log.readlines():\n rows.append(\n (row[0:_DATE_LEN], row[_DATE_LEN + 1:].strip('\\n')))\n log.close()\n self.start_list = 0\n self.end_list = len(rows)\n\n self.report_mode = 'L'\n return(cols, rows)", "title": "" }, { "docid": "c952301b56f569d12c670c6f8bd31d92", "score": "0.51414716", "text": "def setVals_fromLine(self, logline, logpath):\n lvs = combat_regexp.match(logline).groups()\n self.timestamp = lvs[0]\n self.damageAmmount = lvs[1]\n if str(lvs[2]) == \"to\":\n self.attacked = lvs[3]\n self.attacker_name = \"You\"\n if str(lvs[2]) == \"from\":\n self.attacked = \"You\"\n self.attacker_name = lvs[3]\n self.attacker_weapon = lvs[4]\n self.hitType = lvs[5]\n self.logpath = \"\".join(logpath)", "title": "" }, { "docid": "c4cad8f2c948b5cce4aa8c664c820603", "score": "0.51368576", "text": "def getlog(username):\n try:\n reqs = RequestLog.objects.filter(username=username).order_by('-datetime')\n\n return reqs\n except RequestLog.DoesNotExist:\n return None", "title": "" }, { "docid": "ce951c80ed5350df66b9d6ff65878f73", "score": "0.5129132", "text": "def process_single_log(logfilename):\r\n global rows_to_insert\r\n\r\n with open(logfilename, 'r') as in_file:\r\n for _ in range(1):\r\n next(in_file)\r\n for line in in_file:\r\n modifiedline = re.sub(' +', ',', line)\r\n if re.match(r',', modifiedline):\r\n modifiedline = modifiedline[1:]\r\n mod_line_arr = modifiedline.split(\",\")\r\n if str(mod_line_arr[5]) == str('1'):\r\n mod_line_arr[6] = mod_line_arr[6][:1] + ',' + mod_line_arr[6][1:]\r\n #print(len(mod_line_arr))\r\n modifiedline = ','.join(str(e) for e in mod_line_arr)\r\n mod_line_arr = modifiedline.split(\",\")\r\n if len(mod_line_arr) == 12:\r\n #print(len(mod_line_arr))\r\n #print(mod_line_arr)\r\n mod_line_arr = mod_line_arr[:-1]\r\n mod_line_arr[0] = int(mod_line_arr[0])\r\n mod_line_arr[1] = datetime.datetime.strptime(mod_line_arr[1], '%m/%d/%Y').date()\r\n mod_line_arr[2] = datetime.datetime.strptime(mod_line_arr[2], '%H:%M:%S').time()\r\n mod_line_arr[3] = int(mod_line_arr[3])\r\n mod_line_arr[4] = datetime.datetime.strptime(mod_line_arr[4], '%H:%M:%S').time()\r\n mod_line_arr[5] = int(mod_line_arr[5])\r\n mod_line_arr[6] = int(mod_line_arr[6])\r\n mod_line_arr[9] = int(mod_line_arr[9])\r\n mod_line_arr[10] = int(mod_line_arr[10])\r\n rows_to_insert.append(tuple(mod_line_arr))\r\n else:\r\n mline = rg.search(line)\r\n ml.append(int(mline.group(1)))\r\n ml.append(datetime.datetime.strptime(mline.group(2), '%m/%d/%Y').date())\r\n ml.append(datetime.datetime.strptime(mline.group(3), '%H:%M:%S').time())\r\n ml.append(int(mline.group(4)))\r\n ml.append(datetime.datetime.strptime(mline.group(5), '%H:%M:%S').time())\r\n ml.append(int(mline.group(6)))\r\n ml.append(int(mline.group(7)))\r\n ml.append(mline.group(8))\r\n ml.append(mline.group(9))\r\n ml.append(int(mline.group(10)))\r\n ml.append(int(mline.group(11)))\r\n rows_to_insert.append(tuple(ml))", "title": "" }, { "docid": "8d9aa249affb0dea84c06de70209c6c5", "score": "0.5119175", "text": "def _strip_logfile_line(self, logfile_line):\n # strip off any trailing linefeed or newline hidden characters\n working_logfile_line = logfile_line.rstrip('\\r\\n')\n\n # strip off the preceding 24 characters (the DCL time) of the log line\n stripped_logfile_line = self._strip_time(working_logfile_line)\n\n return stripped_logfile_line", "title": "" }, { "docid": "c86843252f3965ee082d951bd2c8e577", "score": "0.5118174", "text": "def log(self, line):\n self.body.append(line)", "title": "" }, { "docid": "20a859acdf083d20f755a90fd01546c4", "score": "0.51162595", "text": "def get_logs():\n return Log.select()", "title": "" }, { "docid": "c8e5ae4c8978990f1697bec5fbacb46e", "score": "0.51112723", "text": "def retrieve_logs(lines=200):\n fp = open(os.path.join(LOGGING_DIR, 'audio_play.logs'))\n return \"<pre>{}</pre>\".format(fp.read())", "title": "" }, { "docid": "e2c82803f532ee3deaff6c4e714c0077", "score": "0.51074487", "text": "def get_record(file_path):\n with open(file_path, 'r', encoding='utf8') as log_file:\n for line in log_file:\n yield line", "title": "" }, { "docid": "1f5a232f6ddc1f680c4132c98b369c41", "score": "0.5099618", "text": "def process_log_record(self, log_record):\n return log_record", "title": "" }, { "docid": "2d4e2ac0e47d2038a07b6ea0aba20586", "score": "0.50947297", "text": "def _base_line(self):\n\n if len(self.raw) != 0:\n wp = 1.5 * 2 / 512\n ws_ = 0.2 * 2 / 512\n devel = 0.005\n rp = 20 * math.log10((1 + devel) / (1 - devel))\n rs = 20\n n, wn = ellipord(wp, ws_, rp, rs, True)\n sos = ellip(n, rp, rs, wn, 'high', output='sos')\n res = sosfilt(sos, self.raw)\n return res\n else:\n return []", "title": "" }, { "docid": "230ef083de293e13135ff3bb4aeb5731", "score": "0.5082878", "text": "def sales_reports(log_file):\n for line in log_file:\n # loop through each item in log_file\n line = line.rstrip()\n # removes trailing characters at the end of each line\n day = line[0:3]\n # sets variable day as the first 3 characters of line\n if day == \"Mon\":\n # if the day is Monday\n print(line)\n #prints the log", "title": "" }, { "docid": "af29c17aa64abaf4a29d4af265fe5e49", "score": "0.5081204", "text": "def get_line_list(line):\r\n\r\n temp = line.strip('*\\n')\r\n line_list = temp.split('|')\r\n return line_list", "title": "" }, { "docid": "149a6ec43332b93de18b0dbf139d2d3d", "score": "0.50807154", "text": "def get_log_line(req, res, trans_time, additional_info):\r\n\r\n return '%s - - [%s] \"%s %s\" %s %s \"%s\" \"%s\" \"%s\" %.4f \"%s\"' % (\r\n req.remote_addr,\r\n time.strftime('%d/%b/%Y:%H:%M:%S +0000', time.gmtime()),\r\n req.method, req.path, res.status.split()[0],\r\n res.content_length or '-', req.referer or '-',\r\n req.headers.get('x-trans-id', '-'),\r\n req.user_agent or '-', trans_time, additional_info or '-')", "title": "" }, { "docid": "edba53cd9f2de629df725eb495cc9582", "score": "0.50692433", "text": "def _parse_getevent_line(line):\n device_name, event_info = line.split(\":\", 1)\n integers = [int(x, 16) for x in event_info.strip().split()[:3]]\n return \"sendevent {} {} {} {}\".format(device_name, *integers)", "title": "" }, { "docid": "9abba4abbc2b13832908834fd81de38a", "score": "0.5067945", "text": "def log_lines(self, *lines):\n pass", "title": "" }, { "docid": "362ab01a3f0e62cc03b1cb4a92941a39", "score": "0.5065212", "text": "def tracelog( data ) :\n\n msg = ctypes.cast( data, ctypes.POINTER (Ra_TraceLog_Message))[0]\n print( \"%s: %s %s %s\" % ( msg.entry_type\n , msg.zgt_stamp\n , msg.component_id\n , msg.data ) )", "title": "" }, { "docid": "0a9471e767a8131af8763d0514330348", "score": "0.5063036", "text": "def prepare_log(logfile_name, firstLine=\"here we go:\\n\"):\n completed_students_list = open(logfile_name, \"w\")\n completed_students_list.write(firstLine)\n completed_students_list.close()", "title": "" }, { "docid": "0a9471e767a8131af8763d0514330348", "score": "0.5063036", "text": "def prepare_log(logfile_name, firstLine=\"here we go:\\n\"):\n completed_students_list = open(logfile_name, \"w\")\n completed_students_list.write(firstLine)\n completed_students_list.close()", "title": "" }, { "docid": "e02a7b08d6ee8d83d96c2a9fe3e74f77", "score": "0.5051403", "text": "def retr_lines(line):\n# m_line = re_expr.match(CONV_FROM_FTP(line))\n m_line = re_expr.match(line)\n if m_line != None:\n if m_line.group(2) not in ['.', '..']:\n l_new_files.append(\n (m_line.group(1), os.path.join(s_dir, m_line.group(2)))\n )", "title": "" }, { "docid": "1140ac42f191a46cd4fc957fae1dc5ed", "score": "0.5048404", "text": "def genSysLogLine(self,\n logInfoObj):\n # Format:\n # [<prefix>:]<date> <prio> <host> <location> <msg>\n if (self.getSysLogPrefix() == \"\"):\n prefix = \"\"\n else:\n prefix = self.getSysLogPrefix() + \":\"\n hostName = os.uname()[1]\n logEntry = prefix + logInfoObj.getDate() + \" \" +\\\n logNo2DfsName(logInfoObj.getType()) + \" \" +\\\n hostName + \" \" + logInfoObj.getLocObj() + \" \" +\\\n logInfoObj.getMessage()\n return logEntry", "title": "" }, { "docid": "37f3826e7c2af03fc3d37852d7554be3", "score": "0.50451636", "text": "def server_logs():", "title": "" }, { "docid": "944dacded11e4e8d8428b0ccb3c7bc84", "score": "0.5036465", "text": "def get_line(self):\n\t\tline = None\n\t\twhile not line:\n\t\t\tline = self.debug.readline().decode('utf-8').strip()\n\t\t\tself.parse_line(line)\n\t\treturn line", "title": "" }, { "docid": "0fe9895aef8f1641036476b8424bc1e3", "score": "0.5035837", "text": "def get_log(self):\n log_path = self.meta_data['logs_resource']\n conn=Qubole.agent()\n r=conn.get_raw(log_path)\n return r.text", "title": "" } ]
e1f21db3ba0f5c708ddcdb84c7e9bdf3
Rotate with a given angle around x axis
[ { "docid": "818e1330d879aaadfbfd054c753eef39", "score": "0.0", "text": "def rx(self, angle: float) -> \"Mate\":\n a = angle / 180 * pi\n self.y_dir = Mate._rotate(self.y_dir, self.x_dir, a)\n self.z_dir = Mate._rotate(self.z_dir, self.x_dir, a)\n return self", "title": "" } ]
[ { "docid": "d20982e96b47fde51e7401a4cb1b9bff", "score": "0.8066317", "text": "def rotate_x(self, angle):\n angle *= np.pi / 180\n return self.transform(np.matrix([[1, 0, 0],\n [0, np.cos(angle), -np.sin(angle)],\n [0, np.sin(angle), np.cos(angle)]]))", "title": "" }, { "docid": "da1aed469640a693b5e60155ae6a2180", "score": "0.79214704", "text": "def rotate(self, angle=0.0):\n # TODO: Implement the rotate function. Remember to record the value of\n # rotation degree.\n self.rotDegree = angle\n self.x = rotate(self.x, angle = angle, axes=(0, 1), reshape=False, \n output=None, order=3, mode='constant', cval=0.0, prefilter=True)\n # This rotation isn't working correctly. Get shit for non right anlge rotatations\n # raise NotImplementedError\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################", "title": "" }, { "docid": "477e2daafb75f4c11d62db6fb5fc9c36", "score": "0.77660215", "text": "def rotate(self, angle):\n self.call('rotate', angle)", "title": "" }, { "docid": "1b06522e1c67bb003fd7dea2100a91cf", "score": "0.7701052", "text": "def rotate(x: torch.Tensor, angle: int) -> torch.Tensor:\n # B C H W\n h_dim = 2\n w_dim = 3\n\n if angle == 0:\n return x\n elif angle == 90:\n return x.flip(w_dim).transpose(h_dim, w_dim)\n elif angle == 180:\n return x.flip(w_dim).flip(h_dim)\n elif angle == 270:\n return x.flip(h_dim).transpose(h_dim, w_dim)\n else:\n raise NotImplementedError(\"Must be rotation divisible by 90 degrees\")", "title": "" }, { "docid": "e287d02ad5556f6d9c9cb15aece4319f", "score": "0.76847064", "text": "def rotate(self, angle):\n n, a = Vector.polar([self.x, self.y])\n a += angle\n self.x = n * cos(a)\n self.y = n * sin(a)", "title": "" }, { "docid": "a726ac310a9e6d456a49acb28ddc1d0e", "score": "0.7682423", "text": "def rotate_x(angle):\n log.dev(\"lib.mathp.rotate_x is deprecated. Use lib.rotation.R1 instead.\")\n\n cosA = np.cos(angle)\n sinA = np.sin(angle)\n R = np.array([[1, 0, 0], [0, cosA, sinA], [0, -sinA, cosA]])\n return R", "title": "" }, { "docid": "984d3c60025d32872bd7085038fb828a", "score": "0.75961083", "text": "def rot_x(angle):\n sangle = math.sin(angle)\n cangle = math.cos(angle)\n rx = np.array([[1.0, 0.0, 0.0],\n [0.0, cangle, sangle],\n [0.0, -sangle, cangle]])\n return rx", "title": "" }, { "docid": "0d55337da74fa7e81fef8db96cffa50b", "score": "0.75421506", "text": "def rotate(self, angle):\n old_angle, tilt = self.rotation\n new_angle = old_angle + angle\n while new_angle > 90:\n new_angle = new_angle - 90\n while angle < -90:\n new_angle = new_angle + 90\n self.rotation = (new_angle, tilt)", "title": "" }, { "docid": "f643faf980a8c510846b1598e24038c8", "score": "0.75354624", "text": "def _rotate(self, angle):\n angle *= self._degreesPerAU\n self._orient = self._orient.rotate(angle)", "title": "" }, { "docid": "2f39baa39836e0a5a5d811b25fcfddec", "score": "0.7450419", "text": "def XXRotation(angle):\r\n return SuperPosition([IGate * IGate, XGate * XGate],\r\n [np.cos(angle), np.sin(angle) * 1j],\r\n 'R_XX({})'.format(angle))", "title": "" }, { "docid": "596cba7af26dcb382b7d99122e3fd110", "score": "0.7405082", "text": "def rotate(x, y, angle):\n return x * cos(angle) - y * sin(angle), y * cos(angle) + x * sin(angle)", "title": "" }, { "docid": "1c78343cf49805fdb161449afd23bbf9", "score": "0.74049246", "text": "def rotate_x(self, angle: float):\n self.vertices = list(\n Matrix44.x_rotate(angle).transform_vertices(self.vertices)\n )\n return self", "title": "" }, { "docid": "96fef99f6baff17d4f4ece080771d832", "score": "0.72892493", "text": "def rotateDegrees(angle):\n rotate(angle *2*math.pi / 360)", "title": "" }, { "docid": "e48b53f64d947553bffbedf2b7f793a7", "score": "0.7270533", "text": "def rotate(X):\n return X", "title": "" }, { "docid": "cca19f56aa442d1960d3301bf5ea236a", "score": "0.7241288", "text": "def srotate(self, angle):\n\n self.angle = self.angle + angle", "title": "" }, { "docid": "93c57f53a9e64be8307ae841acc20b3c", "score": "0.7240693", "text": "def rotateX(self, angleInRadians) -> None:\n ...", "title": "" }, { "docid": "24327fd9551fcc781f7d24893c651ae8", "score": "0.7237308", "text": "def rotate(self, angle):\r\n radians = angle*pi/180\r\n return Vector(self.x*cos(radians) - self.y*sin(radians),\r\n self.x*sin(radians) + self.y*cos(radians))", "title": "" }, { "docid": "3f94ba971ba6a8dc29e02bde1e015058", "score": "0.722882", "text": "def _rotate(self, angle):\n if self.undobuffer:\n self.undobuffer.push((\"rot\", angle, self._degreesPerAU))\n angle *= self._degreesPerAU\n neworient = self._orient.rotate(angle)\n tracing = self.screen._tracing\n if tracing == 1 and self._speed > 0:\n anglevel = 3.0 * self._speed\n steps = 1 + int(abs(angle)/anglevel)\n delta = 1.0*angle/steps\n for _ in range(steps):\n self._orient = self._orient.rotate(delta)\n self._update()\n self._orient = neworient\n self._update()", "title": "" }, { "docid": "ad8e5f167879c411472790217c0740e6", "score": "0.72254646", "text": "def rotateX(self, angle):\r\n if angle:\r\n c = cos(radians(angle))\r\n s = sin(radians(angle))\r\n self.mtrx = dot([[1, 0, 0, 0],\r\n [0, c, s, 0],\r\n [0, -s, c, 0],\r\n [0, 0, 0, 1]], self.mtrx)\r\n self.rtn[0] = angle\r\n self.was_moved = True", "title": "" }, { "docid": "01d969ce99f6a6145d9e891bf002f604", "score": "0.72239614", "text": "def rotatedBy(self, angle):\n\t\tx, y = self.x, self.y\n\t\tc, s = cos(angle), sin(angle)\n\t\treturn Vector((c * x) - (s * y), (s * x) + (c * y))", "title": "" }, { "docid": "2f898086f45d02af68ea0e4fa1e6e437", "score": "0.7146165", "text": "def rotate(self, angle, axis):\r\n R=self.rotation(angle, axis)\r\n self.mlist = (self*R).mlist\r\n return self", "title": "" }, { "docid": "0526af6f37e18f0b3cd46bd4894eb930", "score": "0.7114516", "text": "def RotationX(theta):\n\n return Rotation([1., 0., 0.], theta)", "title": "" }, { "docid": "b3701d825ecdeec6654f8f762d01b717", "score": "0.71098155", "text": "def rotate_axis(self, axis: \"Vertex\", angle: float):\n self.vertices = list(\n Matrix44.axis_rotate(axis, angle).transform_vertices(self.vertices)\n )\n return self", "title": "" }, { "docid": "e9a062ec720eb4f41f5bb019a7bb17e9", "score": "0.710595", "text": "def rotate(self,center, angle):\n \n self.coord = [x-np.repeat([[center[0],center[1]]],[x.shape[0]],axis = 0) for x in self.coord]\n\n alpha = angle\n R = np.array([[np.cos(alpha),-np.sin(alpha)],[np.sin(alpha),np.cos(alpha)]])\n \n for i in range(len(self.coord)):\n self.coord[i] = np.squeeze([np.dot([x],R) for x in self.coord[i]])\n\n self.coord = [x+np.repeat([[center[0],center[1]]],[x.shape[0]],axis = 0) for x in self.coord]\n\n return self", "title": "" }, { "docid": "db256974cb9c45c2c73adde55b291890", "score": "0.7043385", "text": "def rotate(self, angle):\n rotmat = rotation_matrix_2d(angle)\n rotated = np.dot(rotmat.T, [self.pix_x.value, self.pix_y.value])\n self.pix_x = rotated[0] * self.pix_x.unit\n self.pix_y = rotated[1] * self.pix_x.unit\n self.pix_rotation -= angle", "title": "" }, { "docid": "da5f29739985b635fdf75c6bfb95e113", "score": "0.69998366", "text": "def rotate_rad(self, angle):\n self.beam_angle += angle\n self.xy = rotate(self.xy, angle)\n self.angle += angle", "title": "" }, { "docid": "940302e900f954da714734ded3e06172", "score": "0.6981311", "text": "def rotate(self, angle):\n\t\tif not isinstance(angle, Angle):\n\t\t\tangle = Angle(angle)\n\t\treturn angle.matrix() * self", "title": "" }, { "docid": "871cac9c4d8003554be101bd8cb77c9d", "score": "0.6956875", "text": "def rotation(self, x, omega):\n \n x0, y0 = x.T[0], x.T[1]\n c, s = np.cos(omega), np.sin(omega)\n x1 = c*x0 - s*y0\n y1 = s*x0 + c*y0\n x_1 = np.array([x1, y1])\n return x_1", "title": "" }, { "docid": "f0456622fa37b56f6e111a7094e429ed", "score": "0.6941352", "text": "def rotate(self, axis, theta):\n return NotImplemented", "title": "" }, { "docid": "52a98c5233080768b0beabd0baf42cb8", "score": "0.6938393", "text": "def _rotate_coordinate(self, x, y, angle):\n\n sin = math.sin(angle)\n cos = math.cos(angle)\n\n x_ = x * cos - y * sin\n y_ = x * sin + y * cos\n\n return (x_, y_)", "title": "" }, { "docid": "51fd03b1a1019a82bcaa7190bd26d4cd", "score": "0.688909", "text": "def rotator(angle):\n c = np.cos(angle)\n s = np.sin(angle)\n return np.array([[c,-s],[s,c]])", "title": "" }, { "docid": "6ccb484a59ba4a1355cb38286fa1c088", "score": "0.6878394", "text": "def rotate(self, angle, axis, position=None):\n if position is not None:\n pos = np.array(position)\n self.translate(-pos)\n self._rotate_about_origin(angle, axis)\n self.translate(pos)\n else:\n self._rotate_about_origin(angle, axis)", "title": "" }, { "docid": "4d3a3d46affd349bc71ae2c9fb65f106", "score": "0.68746996", "text": "def rotate(self,X):\n alpha = random.rand() * 2*pi\n R = Rotator.rotation_matrix(alpha,0.0,0.0)\n return np.dot(R,X)", "title": "" }, { "docid": "c4c06ed9f70c5c5f6fbbdfe4549090b8", "score": "0.686439", "text": "def rotate(self, axis, theta):\n v = Vector3(self) # ensure vector\n k = Vector3(axis.uv())\n return type(self)(\n cosd(theta) * v\n + sind(theta) * k.cross(v)\n + (1 - cosd(theta)) * k * (k.dot(v))\n )", "title": "" }, { "docid": "19972d209664069325f06ec8b27975f3", "score": "0.6864177", "text": "def rotate(self,X):\n alpha = random.rand() * 2*pi\n\n beta = np.arccos(1.0-2*random.rand())\n psi = random.rand() * 2*pi\n\n R = Rotator.rotation_matrix(alpha,beta,psi)\n return np.dot(R,X)", "title": "" }, { "docid": "45e156f252658f1d937d42fcf6ba3f86", "score": "0.684865", "text": "def rotate(self, angle_radians):\n cos = math.cos(angle_radians)\n sin = math.sin(angle_radians)\n x = self.x*cos - self.y*sin\n y = self.x*sin + self.y*cos\n self.x = x\n self.y = y", "title": "" }, { "docid": "74464fb7653100ae30f2a43ef4bd742b", "score": "0.6842638", "text": "def rotate_global(self, angle, axis=(0., 0., 1.)):\n self.rotation = aa2q(angle, glm.vec3(axis)) * self.rotation", "title": "" }, { "docid": "3852150e9efcec055bdd4e3b3c8327b9", "score": "0.68218243", "text": "def rotate((x, y), theta):\n\n return math.cos(theta) * x + math.sin(theta) * y, -math.sin(theta) * x + math.cos(theta) * y", "title": "" }, { "docid": "5a341ad9d61354adc303afd9c425c832", "score": "0.6798374", "text": "def rotate_by(self, angle, degrees = False):\n\t\ttarget = angle * pi / 180 if degrees else angle\n\t\tif self.inv:\n\t\t\ttarget = -target\n\n\t\tif target > 0:\n\t\t\tn = int(target // self.step_size) + 1\n\t\t\tfor _ in range(n):\n\t\t\t\tself.step_c()\n\n\t\telse:\n\t\t\tn = int(-target // self.step_size) + 1\n\t\t\tfor _ in range(n):\n\t\t\t\tself.step_cc()\n\n\t\tif self.inv:\n\t\t\tdiff = -diff", "title": "" }, { "docid": "0d5f244284a10c04b03970b2353a2f1f", "score": "0.6797492", "text": "def rotate(xy, theta):\n sin_theta, cos_theta = sin(theta), cos(theta)\n R = np.array([[cos_theta, -sin_theta], [sin_theta, cos_theta]])\n return np.dot(R, xy)", "title": "" }, { "docid": "97e83cbd4ea284834f4bac24ad60f837", "score": "0.67916346", "text": "def rotate(self,angle):\n radians = (angle * math.pi)/180\n self.direction += angle\n for object in self.objects:\n y = object.position[0]\n x = object.position[1]\n\n object.position[0] = x * math.sin(radians) + y * math.cos(radians)\n object.position[1] = x * math.cos(radians) - y * math.sin(radians)", "title": "" }, { "docid": "d63ffb5a71e16c67449a70c8aa2ff8d9", "score": "0.6791417", "text": "def rotate(self, x_angle, y_angle, z_angle, center=None):\n self._transform(\n Object.generate_rotation_matrix(x_angle, y_angle, z_angle),\n center)", "title": "" }, { "docid": "c08157b2c91898d8ba9ee7cd84848ae9", "score": "0.67909735", "text": "def rotateAroundAxis(self, rotation_axis, angle):\n # For the mathematics look for: Rodrigues rotation formula.\n # http://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula\n unit_rotation_axis = rotation_axis.getNormalizedVector()\n\n rotated_vector = self.scalarMultiplication(np.cos(angle))\n\n tmp_vector = unit_rotation_axis.crossProduct(self)\n tmp_vector = tmp_vector.scalarMultiplication(np.sin(angle))\n rotated_vector = rotated_vector.addVector(tmp_vector)\n\n scalar_factor = self.scalarProduct(unit_rotation_axis) * (1.0 - np.cos(angle))\n tmp_vector = unit_rotation_axis.scalarMultiplication(scalar_factor)\n rotated_vector = rotated_vector.addVector(tmp_vector)\n\n return rotated_vector", "title": "" }, { "docid": "b5e355618696772140cfa6b9bf036067", "score": "0.67841715", "text": "def rotate (vect, angle, axis):\n\n cosine = np.cos (angle)\n sine = np.sin (angle)\n\n return (vect * cosine + \\\n sine * np.cross (axis, vect) + \\\n np.dot (axis, vect) * (1 - cosine) * axis)", "title": "" }, { "docid": "44e5327a9870dce1d709cd97b45c6ca6", "score": "0.6779984", "text": "def _rotate_xy(self, xyz, angle):\n theta = np.deg2rad(angle)\n x = xyz[:,0].copy()\n y = xyz[:,1].copy()\n xyz[:,0] = x * np.cos(theta) - y * np.sin(theta)\n xyz[:,1] = x * np.sin(theta) + y * np.cos(theta)\n xyz[:,2] = xyz[:,2]\n return xyz", "title": "" }, { "docid": "ae183d55d628beb0a84233f2f49b493a", "score": "0.6770631", "text": "def rotate_axis(self):\n try:\n self.obj.rotate(angle=self.rotation_speed * self.time_scale / self.refresh_rate, axis=vector(0, 1, 0))\n except ZeroDivisionError:\n print(\"ERROR: REFRESH_RATE is 0\")\n except (AttributeError, TypeError):\n print(\"ERROR: wrong arguments type while initializing!!\")", "title": "" }, { "docid": "89197d24ac32b545689b989ebf6d5f05", "score": "0.6770141", "text": "def rotate_clockwise(self, angle):\r\n angle = degrees_to_radians(angle)\r\n current_angle = atan(self.x / self.y)\r\n angle += current_angle\r\n\r\n length = self.length\r\n self.x = length*sin(angle)\r\n self.y = length*cos(angle)", "title": "" }, { "docid": "b8413c334d3c9688132b11f4b37f476b", "score": "0.6767557", "text": "def rotate(img, angle, resample=False, expand=False, center=None):\r\n \r\n return img.rotate(angle, resample, expand, center)", "title": "" }, { "docid": "6c14a6113b3967705c18397a65863ddb", "score": "0.6746859", "text": "def rotate(angle: float) -> Callable:\n return lambda img: TF.rotate(img, angle)", "title": "" }, { "docid": "6a65e38220e3961ddadfe202c83a2f13", "score": "0.6746076", "text": "def rotation(self, angle, axis):\r\n\r\n sqr_a = axis.x*axis.x\r\n sqr_b = axis.y*axis.y\r\n sqr_c = axis.z*axis.z\r\n len2 = sqr_a+sqr_b+sqr_c\r\n\r\n k2 = math.cos(angle)\r\n k1 = (1.0-k2)/len2\r\n k3 = math.sin(angle)/math.sqrt(len2)\r\n k1ab = k1*axis.x*axis.y\r\n k1ac = k1*axis.x*axis.z\r\n k1bc = k1*axis.y*axis.z\r\n k3a = k3*axis.x\r\n k3b = k3*axis.y\r\n k3c = k3*axis.z\r\n\r\n return mat4( k1*sqr_a+k2, k1ab-k3c, k1ac+k3b, 0.0,\r\n k1ab+k3c, k1*sqr_b+k2, k1bc-k3a, 0.0,\r\n k1ac-k3b, k1bc+k3a, k1*sqr_c+k2, 0.0,\r\n 0.0, 0.0, 0.0, 1.0)", "title": "" }, { "docid": "307d58b35096a2203c59e9a6859657c0", "score": "0.67436576", "text": "def rotate(self, angle):\n perp = Vec2D(-self[1], self[0])\n angle = angle * math.pi / 180.0\n c, s = math.cos(angle), math.sin(angle)\n return Vec2D(self[0] * c + perp[0] * s, self[1] * c + perp[1] * s)", "title": "" }, { "docid": "b7b6771b4f8a350be10b76febe5b415b", "score": "0.67296505", "text": "def rotate90(self):", "title": "" }, { "docid": "80e85a72d8f6e2101e83a0e8a586009b", "score": "0.6718789", "text": "def rotateXYZ(self, angle):\n return self.transform(Matrix3(\n Vec3(1, 0, 0),\n Vec3(0, math.cos(angle), -math.sin(angle)),\n Vec3(0, math.sin(angle), math.cos(angle)),\n ) * Matrix3(\n Vec3(math.cos(angle), 0, math.sin(angle)),\n Vec3(0, 1, 0),\n Vec3(-math.sin(angle), 0, math.cos(angle)),\n ) * Matrix3(\n Vec3(math.cos(angle), -math.sin(angle), 0),\n Vec3(math.sin(angle), math.cos(angle), 0),\n Vec3(0, 0, 1),\n ))", "title": "" }, { "docid": "cc23e46c4dcf590d98e9207e5195da71", "score": "0.6714367", "text": "def rotate_local(self, angle, axis=(0., 0., 1.)):\n self.rotation *= aa2q(angle, glm.vec3(axis))", "title": "" }, { "docid": "027c52f9b19b812b833d76a2ae5ffd66", "score": "0.6706987", "text": "def rotate(self, angle=45, center=(0, 0)):\n self.position = _rotate_points(self.position, angle=angle, center=center)\n return self", "title": "" }, { "docid": "26891ed23a59e1ea1da7b7d90b4c14b1", "score": "0.6704838", "text": "def rotate_coordinate(x, y, angle, rotc_x, rotc_y):\n rad = math.radians(angle)\n return rotc_x + (x - rotc_x) * math.cos(rad) + (y - rotc_y) * math.sin(rad),\\\n rotc_y - (x - rotc_x) * math.sin(rad) + (y - rotc_y) * math.cos(rad)", "title": "" }, { "docid": "8bd03a9365d1d412676ccebad1967703", "score": "0.6704321", "text": "def rotate(self,amount):\n self.angle += amount\n if self.drawn == True:\n self.draw()", "title": "" }, { "docid": "50f1868171253b743017f7e80e764171", "score": "0.6682982", "text": "def rotate3(x, angle_x=0, angle_y=0, angle_z=0, origin=(0, 0, 0)):\n origin = np.asarray(origin)\n x = np.asarray(x) - origin\n r = rotation_matrix3(angle_x, angle_y, angle_z)\n return x.dot(r.T) + origin", "title": "" }, { "docid": "66c25a0029863152ab017d4ac9a30098", "score": "0.668132", "text": "def rotation_around_axis(self,axis,angle,**kwargs):\n xyz = self.get('x,y,z',**kwargs)\n\n # get the data\n ct,st = np.cos(angle),np.sin(angle)\n ux,uy,uz = axis\n\n # get the center of the molecule\n xyz0 = np.mean(xyz,0)\n\n # definition of the rotation matrix\n # see https://en.wikipedia.org/wiki/Rotation_matrix\n rot_mat = np.array([\n [ct + ux**2*(1-ct), ux*uy*(1-ct) - uz*st, ux*uz*(1-ct) + uy*st],\n [uy*ux*(1-ct) + uz*st, ct + uy**2*(1-ct), uy*uz*(1-ct) - ux*st],\n [uz*ux*(1-ct) - uy*st, uz*uy*(1-ct) + ux*st, ct + uz**2*(1-ct) ]])\n\n # apply the rotation\n xyz = np.dot(rot_mat,(xyz-xyz0).T).T + xyz0\n self.update('x,y,z',xyz,**kwargs)\n\n return xyz0", "title": "" }, { "docid": "4d7d82c7f29d8eac143999b89ebbebd0", "score": "0.6677299", "text": "def rotate_about_x(self, deg):\n if self.y is None:\n raise AttributeError('rotate_about_x: no Y component')\n if self.z is None:\n raise AttributeError('rotate_about_x: no Z component')\n\n sine = sin(radians(deg))\n cosine = cos(radians(deg))\n y_new = self.y*cosine - self.z*sine\n self.z = self.z*cosine + self.y*sine\n self.y = y_new", "title": "" }, { "docid": "7e1e1f69cc77ccf913b6a9aa24fc1bc5", "score": "0.66754246", "text": "def rotation(period=10):\r\n angle = (((time.time() - starttime) % period) / period) * 360\r\n glRotate(angle, 0, 1, 0)\r\n return angle", "title": "" }, { "docid": "73b7e69e992a838f15a079c76bb4ef04", "score": "0.66749406", "text": "def rotate(self, angle=pi, point=None):\n if not point: point = Point.origin(d=self.dimension)\n v = Vector.createFromTwoPoints(point, self)\n v.rotate(angle)\n self.components = v(point).components", "title": "" }, { "docid": "869f5b1b253f9dede2aba8ea1d90b572", "score": "0.66685045", "text": "def set_rotation(self, angle):\n self._rotation = angle\n self._reset_slot_bounds()", "title": "" }, { "docid": "9c12b1b3caa4b8b5130dc61e4e671ded", "score": "0.6666384", "text": "def _rotate_(self, x: np.array, m: np.array) -> (np.array, np.array):\n # get a random angle\n angle = np.random.randint(0, self.rotate)\n # get a random sign for the angle\n sign = np.random.randint(0, 2)\n x = rotate(x, -sign * angle, reshape=False)\n m = rotate(m, -sign * angle, axes=(0, 1),\n mode='nearest',\n reshape=False)\n return x, m", "title": "" }, { "docid": "8eec9b0ccc93963412ffa5a48ed71ff9", "score": "0.6664997", "text": "def rot_x(theta):\n theta_rad = np.radians(theta)\n rotation_matrix = [[1, 0, 0],\n [0, np.cos(theta_rad), -np.sin(theta_rad)],\n [0, np.sin(theta_rad), np.cos(theta_rad)]]\n return np.matrix(rotation_matrix)", "title": "" }, { "docid": "04615542a4dcf76b07017b966f7b801e", "score": "0.66578656", "text": "def apply_rotation_x(self, eta=0.0 ):\n \n eta = radians(eta)\n new_rotation_matrix = [[ 1 , 0 , 0 ],\n [ 0 , +cos(eta) , -sin(eta) ],\n [ 0 , +sin(eta) , +cos(eta) ]] \n \n self.rotation_matrix_exp = np.dot( new_rotation_matrix, self.rotation_matrix_exp )", "title": "" }, { "docid": "73dc54c3347b66b08f5306b34a800137", "score": "0.6652435", "text": "def rotate(self, angle, point=None):\n # Actually not working\n if not point:\n point = self.center\n for i in range(len(self.points)):\n self.points[i].rotate(angle, point)", "title": "" }, { "docid": "fb7822dfc1b220c5448304c34aa23292", "score": "0.66428083", "text": "def rotate(self, x=0, y=0, z=0):\n\t\tquaternion = R.from_euler('xyz', [x, y, z], degrees=True)\n\t\trotation_matrix = np.array(quaternion.as_matrix())\n\t\trotation_matrix = np.pad(rotation_matrix, [(0, 1), (0, 1)], mode='constant')\n\t\trotation_matrix[3,3] = 1\n\n\t\tself.matrix = np.matmul(self.matrix, rotation_matrix)", "title": "" }, { "docid": "aed0e1df3884cc3cb863099b6cf438d8", "score": "0.6641206", "text": "def rotate_degrees(self, angle_degrees):\n self.rotate(math.radians(angle_degrees))", "title": "" }, { "docid": "b4283109276ca84ab01a8db0e89cc011", "score": "0.66391414", "text": "def rotate(mat,angle):\n return np.dot(Mueller.rotator(angle), np.dot(mat, Mueller.rotator(-angle)))", "title": "" }, { "docid": "810df9a5e40f648423b3091811134711", "score": "0.663493", "text": "def rotate(self, angle=45, center=(0, 0)):\n if angle == 0:\n return self\n if hasattr(center, \"center\"):\n center = center.center\n self.rotation += angle\n self.origin = _rotate_points(self.origin, angle, center)\n if self.owner is not None:\n self.owner._bb_valid = False\n return self", "title": "" }, { "docid": "489d6eb0e7925c6bd0381369d8b3b5c9", "score": "0.6632804", "text": "def rotate(self,X):\n alpha = random.rand() * 2*pi\n beta = self.beta_sample()\n R = Rotator.rotation_matrix(alpha,beta,0.0)\n X = np.dot(R, X)\n if self.random_flip and (random.rand() > 0.5):\n X[2,:] = -X[2,:]\n X[1,:] = -X[1,:]\n return X", "title": "" }, { "docid": "84a12102b48a3226e64b994595087ad9", "score": "0.66250026", "text": "def rotate((x,y)):\n orientation = parameter('Orientation',90) # in degrees counter-clockwise\n if orientation == None: orienation = 0\n w,h = image_size()\n if orientation == 0: return (x,y)\n if orientation == -90: return (h-y,x)\n if orientation == 90: return (y,w-x)\n if orientation == 180: return (w-x,h-y)\n return (x,y)", "title": "" }, { "docid": "a8a82ab40974fc66b4ad10dc9fd5420a", "score": "0.6624428", "text": "def Rotate(angle):\n def rotate_img(img, angle=angle):\n img = Ft.rotate(img, angle, resample=BILINEAR)\n return img\n return rotate_img", "title": "" }, { "docid": "57ec0a4b1a5b717b21885c826b39d3b0", "score": "0.6620944", "text": "def rotate(x_or_y,degree):\r\n\r\n #axis=0 represents x-axis\r\n #axis=1 represents y-axis\r\n \r\n if x_or_y=='X' or x_or_y=='x':\r\n axis=0\r\n elif x_or_y=='Y' or x_or_y=='y':\r\n axis=1\r\n elif x_or_y==0:\r\n axis=0\r\n elif x_or_y==1:\r\n axis=1\r\n else:\r\n print(\"Illeagel argument in rotate_degree\")\r\n return\r\n\r\n #decide which pins to use accroding to the axis\r\n #info is for debug used it can be eliminated\r\n if axis==0:\r\n info=\"x-axis\"\r\n stepsPin=xCwPin;\r\n cwOrCcwPin=xCcwPin\r\n elif axis==1:\r\n info=\"y-axis\"\r\n stepsPin=yCwPin;\r\n cwOrCcwPin=yCcwPin\r\n\r\n if degree>0:\r\n info=info+\" rotate cw\"\r\n GPIO.output(cwOrCcwPin, True) #cw\r\n elif degree<0:\r\n info=info+\" rotate ccw\"\r\n GPIO.output(cwOrCcwPin, False) #ccw\r\n elif degree==0:\r\n return\r\n\r\n tmp=abs(degree)/0.36\r\n steps=round(tmp)\r\n\r\n info=info+\" for \"+str(degree)+\" degrees \"+str(steps)+\" steps\"\r\n\r\n i=0\r\n while i<steps:\r\n GPIO.output(stepsPin, True)\r\n time.sleep(0.001)\r\n GPIO.output(stepsPin, False)\r\n time.sleep(0.05)\r\n i=i+1\r\n #GPIO.output(cwOrCcwPin, True)\r\n\r\n if SHOW_ROTATE:\r\n print(info)", "title": "" }, { "docid": "ec2160113967908d5c0392028b7cf84b", "score": "0.6615168", "text": "def rotate(self, center, angle):\n center = self.center.rotate(center, angle)\n angle = self.angle + angle\n return self.copy(center=center, angle=angle)", "title": "" }, { "docid": "9f189b905ad3c1a032abbb22b5710c49", "score": "0.65974617", "text": "def rotateEuler(self,axis, angle):\n if(axis == 'Z'):\n return np.array([[cos(angle), -sin(angle),0,0],[sin(angle), cos(angle),0,0],[0,0,1,0],[0,0,0,1]])\n if(axis == 'Y'):\n return np.array([[cos(angle),0,sin(angle),0],[0,1,0,0],[-sin(angle),0,cos(angle),0],[0,0,0,1]])\n if(axis == 'X'):\n return np.array([[1,0,0,0],[0,cos(angle), -sin(angle),0],[0,sin(angle), cos(angle),0],[0,0,0,1]])", "title": "" }, { "docid": "25e6293e1f87142ece454e3223f944bd", "score": "0.6597233", "text": "def rotate(self, angle):\n\t\tself.currentPixbuf = self.currentPixbuf.rotate_simple(angle)\n\t\tself.scaleCache[1] = 0\n\t\tgc.collect()\n\t\tself.autoScale()", "title": "" }, { "docid": "a614753afbe5596b693210651b3fdddb", "score": "0.6595791", "text": "def rotate_shape(shape, xy_center, angle_degrees):", "title": "" }, { "docid": "ac8b7f748d6be30b7bc28e70a7b51cb3", "score": "0.65842026", "text": "def rotate_degree(x_degree, y_degree):\r\n rotate(0,x_degree)\r\n rotate(1,y_degree)\r\n\r\n \r\n \r\n global _x_degrees\r\n global _y_degrees\r\n global _x_last_degrees\r\n global _y_last_degrees\r\n global _x_rotate_degrees\r\n global _y_rotate_degrees\r\n \r\n \r\n _x_degrees=_x_degrees+x_degree\r\n _y_degrees=_y_degrees+y_degree\r\n _x_last_degrees=x_degree\r\n _y_last_degrees=y_degree\r\n _x_rotate_degrees=_x_rotate_degrees+round((x_degree)/0.36)*0.36\r\n _y_rotate_degrees=_y_rotate_degrees+round((y_degree)/0.36)*0.36", "title": "" }, { "docid": "1a8b3f741a0d57f775eacc8cb6146c17", "score": "0.6583555", "text": "def rotate(self, angle, axis, position=None):\n for bound in self._bounds:\n bound.rotate(angle, axis, position)", "title": "" }, { "docid": "fb27a064b3641d0511c7354ebe86d3c7", "score": "0.65799576", "text": "def rotation(axis, angle):\n axis = np.asarray(axis)\n try:\n angle = angle[:,None]\n except:\n pass\n return np.hstack([np.asarray(axis)*np.sin(angle/2.),np.cos(angle/2.)])", "title": "" }, { "docid": "7c3161c75295c74fc44bcbecfe355288", "score": "0.65792507", "text": "def _rotate_about_origin(self, angle, axis):\n matrix = rotation_matrix(angle, axis)\n self._center = matrix.dot(self._center)", "title": "" }, { "docid": "26e6b9fde1b721866af4c5e5e1553cee", "score": "0.6571457", "text": "def R_axis_angle(axis, angle):\n\n # Trig factors.\n ca = math.cos(angle)\n sa = math.sin(angle)\n C = 1 - ca\n\n # Depack the axis.\n x, y, z = axis\n\n # Multiplications (to remove duplicate calculations).\n xs = x * sa\n ys = y * sa\n zs = z * sa\n xC = x * C\n yC = y * C\n zC = z * C\n xyC = x * yC\n yzC = y * zC\n zxC = z * xC\n\n # Update the rotation matrix.\n matrix = np.zeros((3, 3))\n matrix[0, 0] = x * xC + ca\n matrix[0, 1] = xyC - zs\n matrix[0, 2] = zxC + ys\n matrix[1, 0] = xyC + zs\n matrix[1, 1] = y * yC + ca\n matrix[1, 2] = yzC - xs\n matrix[2, 0] = zxC - ys\n matrix[2, 1] = yzC + xs\n matrix[2, 2] = z * zC + ca\n return matrix", "title": "" }, { "docid": "00a34f025ad2cbdc1cc889d6a695e2d7", "score": "0.65665084", "text": "def rotator(angle):\n\n c = np.cos(2*angle)\n s = np.sin(2*angle)\n return np.array([[1,0,0,0],[0,c,-s,0],[0,s,c,0],[0,0,0,1]])", "title": "" }, { "docid": "f696cb7aec35237076de49aced3de18e", "score": "0.6557947", "text": "def rotate(self, angle, reshape=False):\n return IntensityMap.rotate(self, angle, reshape=reshape)", "title": "" }, { "docid": "9e09ed73e9aa63de1b0277d29230b269", "score": "0.65444297", "text": "def setPlotRotation(ang, x,y):\n dislin.trfrot(ang,x,y)", "title": "" }, { "docid": "0e2796e230b7c68d0c51cba413565174", "score": "0.65413463", "text": "def rotate(self, angle):\n perp = TwoDV(-self[1], self[0])\n angle = angle * math.pi / 180.0\n c, s = math.cos(angle), math.sin(angle)\n return TwoDV(self[0]*c+perp[0]*s, self[1]*c+perp[1]*s)", "title": "" }, { "docid": "af68f3ae9af735c5839569a0677f7dc7", "score": "0.65344244", "text": "def rotate(mat,angle):\n return np.dot(Jones.rotator(angle), np.dot(mat, Jones.rotator(-angle)))", "title": "" }, { "docid": "e802f4a351d57ffc97dc8e2e63e42707", "score": "0.6529892", "text": "def rotate_x(p, a=0):\n # turn value to radians\n a = math.radians(a)\n translation_mat = np.matrix([\n [1,0,0,0],\n [0,math.cos(a),math.sin(a),0],\n [0,-math.sin(a),math.cos(a),0],\n [0,0,0,1],\n ], dtype=\"float32\")\n\n new_p = p @ translation_mat\n\n return new_p", "title": "" }, { "docid": "9cc191e69f6a8f77e4c6d09ea8e289e5", "score": "0.6529437", "text": "def setRotation(self, angle=0.0):\n axis = (0, 0, 1)\n oldp = self.transform.pos\n newpos = oldp + glm.vec3(0, -40, 0)\n self.transform.setPos(newpos)\n self.transform.setRot(glm.angleAxis(glm.radians(angle),\n glm.vec3(axis)))\n self.transform.setPos(oldp)", "title": "" }, { "docid": "a9ea03fccf864c005f25fc96bedb6a88", "score": "0.6523584", "text": "def rotate( self, degrees, axis ):\n # copy and normalize axis\n axis = Vector3( axis ).normalize()\n\n # get stub of self projected onto axis\n stub = Vector3( self ).project( axis )\n\n # subtract stub from self\n self -= stub\n\n # get new vector crossed with axis\n crossed = Vector3( axis ).cross( self )\n\n # trigify self and crossed to account for rotation\n crossed *= math.sin( math.radians(degrees) )\n self *= math.cos( math.radians(degrees) )\n\n # add crossed and stub components to self\n self += crossed\n self += stub\n \n return self", "title": "" }, { "docid": "6c466b29b174eba6bfd18820c59f0838", "score": "0.65212995", "text": "def rotate(vector, angle):\n return np.cos(angle) * vector[0] + np.sin(angle) * vector[1], \\\n -np.sin(angle) * vector[0] + np.cos(angle) * vector[1]", "title": "" }, { "docid": "d82b347abe0351fbd77c985e2c13379b", "score": "0.6519837", "text": "def rotate2(x, angle, origin=(0, 0)):\n origin = np.asarray(origin)\n x = np.asarray(x) - origin\n r = rotation_matrix2(angle)\n return x.dot(r.T) + origin", "title": "" }, { "docid": "c4bd59e8d878d5ae893486c2f42180f5", "score": "0.6515658", "text": "def rot_x_deg(self):\n # TODO describe which axis this is\n return math.degrees(self._rot_x_rad)", "title": "" }, { "docid": "91a67c4069970f9caac0975200369597", "score": "0.6513588", "text": "def _rotate_about_origin(self, angle, axis):\n print 'Invoked abstract {}._rotate_about_origin({}, {})'.format(\n self, angle, axis)\n return", "title": "" }, { "docid": "e1ce4998ff8ba0d638c21e60e69bd12e", "score": "0.6497074", "text": "def left(self, angle):\r\n self.rotation -= angle", "title": "" }, { "docid": "d70fd08ac6cda48cbc99edceff28424d", "score": "0.6486738", "text": "def rotate(self, angle):\n self._surf = pygame.transform.rotate(self._surf, angle).convert_alpha()", "title": "" }, { "docid": "ea72df70c1cd7c4610ce7581ce3e6072", "score": "0.64845425", "text": "def rotate(self, angle: int):\n self._rotation = (self._rotation + angle) % 360\n # Rotate all sub-spinners recursively\n for item in self.sub_spinners:\n item.rotate(angle)", "title": "" }, { "docid": "dcae370c844e875b0ab19d55a0579b02", "score": "0.64796025", "text": "def rotateEuler(axis, angle):\n if(axis == 'Z'):\n return np.array([[cos(angle), -sin(angle),0,0],[sin(angle), cos(angle),0,0],[0,0,1,0],[0,0,0,1]])\n if(axis == 'Y'):\n return np.array([[cos(angle),0,sin(angle),0],[0,1,0,0],[-sin(angle),0,cos(angle),0],[0,0,0,1]])\n if(axis == 'X'):\n return np.array([[1,0,0,0],[0,cos(angle), -sin(angle),0],[0,sin(angle), cos(angle),0],[0,0,0,1]])", "title": "" }, { "docid": "a466e69d2c591acaabd9b65ae433f7c5", "score": "0.64790076", "text": "def rotate(self, rotation):\n self.coords = dot(rotation, self.coords)\n return self", "title": "" } ]
f41f95f0d47bf4698f1b3e6d929154ee
return dict to copy self when calling make_address(result)
[ { "docid": "6a1668551d2e929f85c5adc0a12f833c", "score": "0.0", "text": "def to_dict(self):\n raise NotImplementedError(\"derived class must override this method\")", "title": "" } ]
[ { "docid": "f98dc2ea3a10e34509c40b246be66dee", "score": "0.6085023", "text": "def copy(self):\n return dict(self)", "title": "" }, { "docid": "08bb84a713f287a2e3ba6a811ef324e3", "score": "0.5942571", "text": "def copy(self):\n return ndict(super().copy())", "title": "" }, { "docid": "abc7b87630baf8ab31be28ce490c2dd6", "score": "0.5904105", "text": "def to_dict(self, *args, **kwargs):\n copy = self.copy(deep=False)\n copy.data = _prepare_data(copy.data, kwargs.get(\"context\"))\n return super(LookupData, copy).to_dict(*args, **kwargs)", "title": "" }, { "docid": "8927d66d8a88163abb379d14809c565c", "score": "0.5819295", "text": "def result_as_dict(self, result):\n result_url = getattr(result, '_source_url', None)\n\n if result_url:\n return {'source_url': result_url}\n else:\n return {'value': result.pk}", "title": "" }, { "docid": "0975f0a1e30dbd5ed6ea832bc42d42c2", "score": "0.58143884", "text": "def to_digest(self):\n return {\n 'street_1': self.street_1,\n 'street_2': self.street_2,\n 'city': self.city,\n 'state': self.state,\n 'zip': self.zip,\n }", "title": "" }, { "docid": "ddefe30f79ad1734aeee069a60bd46a9", "score": "0.5555994", "text": "def _generate_address(self, key_iterator):\n # type: (KeyIterator) -> Address\n return self.address_from_digest(self._get_digest(key_iterator))", "title": "" }, { "docid": "e24e78fc649a7ae0fcd9a9ff04058f39", "score": "0.55336744", "text": "def __copy__(self):\n return self._copy(False, {})", "title": "" }, { "docid": "e06d176d63ec11f0f40aa061d1e3546f", "score": "0.5491245", "text": "def make_an_address(id_person):\n return {\n \"num_address\": \"0\",\n \"person_id_person\": str(id_person),\n \"is_current\": 1,\n \"street_num\": random.randint(1, 200),\n \"street\": FAKE_FRANCAIS.street_name(),\n \"city\": FAKE_FRANCAIS.city(),\n \"zip_code\": f\"{random.randint(1000, 99000):05}\",\n \"country\": \"France\",\n \"localization\": \"ASK_GG\",\n }", "title": "" }, { "docid": "7b80cd87c840ca47273e4660e17d3e55", "score": "0.54887056", "text": "def gotPhysAddress(self, results):\r\n self.portAddress = {}\r\n for oid in results:\r\n port = int(oid.split(\".\")[-1])\r\n if self.normPort is not None:\r\n port = self.normPort(port)\r\n if port not in self.ports:\r\n continue\r\n address = [ \"%x\" % ord(a) for a in str(results[oid])]\r\n if address and len(address) == 6:\r\n self.portAddress[port] = \":\".join(address)", "title": "" }, { "docid": "f2770bb4ddd42a32378bdfed51c4427e", "score": "0.5481426", "text": "def clone(self):\n # dict(self.params) -> Make a copy of the dict\n return self.__class__(self.name, dict(self.params), self.value)", "title": "" }, { "docid": "531dec446aa2de2353e76eba7c6a5ef7", "score": "0.54674196", "text": "def _copy(self):\r\n obj = OrderDict()\r\n obj.dic = self.dic.copy()\r\n obj.order_list = self.order_list.copy()\r\n\r\n return obj", "title": "" }, { "docid": "4adc34a3eef855d860df18a7d4d71575", "score": "0.54539573", "text": "def get_address(self, address):\n address, _ = Address.objects.get_or_create(\n hash=address\n )\n return address", "title": "" }, { "docid": "2ea9bc8b0673df67e83e2587a7aafc15", "score": "0.5444063", "text": "def to_dict(self):\n output = copy.deepcopy(self.__dict__)\n return output", "title": "" }, { "docid": "f17d13ba129460eeaf21aa245970a524", "score": "0.5428665", "text": "def json(self):\n # Response legacy data: allow for any column to be null.\n address = {\n }\n if self.street:\n address['street'] = self.street\n if self.street_additional:\n address['streetAdditional'] = self.street_additional\n if self.city:\n address['city'] = self.city\n if self.region:\n address['region'] = self.region\n if self.country:\n address['country'] = self.country\n if self.postal_code:\n address['postalCode'] = self.postal_code\n\n return address", "title": "" }, { "docid": "882112916a78d2e9eb162609196d144f", "score": "0.5415016", "text": "def add_result_to_self(self, result={}):\n self.__dict__.update(result)", "title": "" }, { "docid": "15a1f38596b5fba0e8656948c871ec9d", "score": "0.5404093", "text": "def create_from_dict(new_info: dict):\n address = Address()\n # API requests everything but streetAdditional is mandatory.\n address.street = new_info.get('street')\n address.street_additional = new_info.get('streetAdditional')\n address.city = new_info.get('city')\n address.region = new_info.get('region')\n # address.country = pycountry.countries.search_fuzzy(new_info.get('country'))[0].alpha_2\n address.country = new_info.get('country')\n address.postal_code = new_info.get('postalCode')\n address.format_postal_code()\n return address", "title": "" }, { "docid": "9a590ec4ba2468490798676fda917e8b", "score": "0.54016745", "text": "def store(self, result_dict):\n pass # Leave up to implementation", "title": "" }, { "docid": "313991ab47e8f2c5674c7df7b3e9feb6", "score": "0.5357627", "text": "def to_dict(self):\n output = copy.deepcopy(self.__dict__)\n return output", "title": "" }, { "docid": "313991ab47e8f2c5674c7df7b3e9feb6", "score": "0.5357627", "text": "def to_dict(self):\n output = copy.deepcopy(self.__dict__)\n return output", "title": "" }, { "docid": "313991ab47e8f2c5674c7df7b3e9feb6", "score": "0.5357627", "text": "def to_dict(self):\n output = copy.deepcopy(self.__dict__)\n return output", "title": "" }, { "docid": "313991ab47e8f2c5674c7df7b3e9feb6", "score": "0.5357627", "text": "def to_dict(self):\n output = copy.deepcopy(self.__dict__)\n return output", "title": "" }, { "docid": "313991ab47e8f2c5674c7df7b3e9feb6", "score": "0.5357627", "text": "def to_dict(self):\n output = copy.deepcopy(self.__dict__)\n return output", "title": "" }, { "docid": "df00289c694ef9f05bfffedc6a9b9649", "score": "0.5357419", "text": "def dictcopy(self):\n return dict(self.__store)", "title": "" }, { "docid": "90e64a631d6258eee93012d3ad3a47e7", "score": "0.5326074", "text": "def _proxy(self):\n if self._context is None:\n self._context = AddressContext(\n self._version,\n account_sid=self._solution['account_sid'],\n sid=self._solution['sid'],\n )\n return self._context", "title": "" }, { "docid": "784f47dd6edf786b6ec2c873c1bc1eef", "score": "0.5320322", "text": "def construct(self):\n return address.render(self.values)", "title": "" }, { "docid": "9cb2b127cf38ac41578612b98d8bdc2e", "score": "0.5288331", "text": "def __init__(self, results):\n addresses = []\n for index, result in enumerate(results):\n address = Address(result)\n addresses.append(address)\n self.index_lookup[address.index] = index\n if address.id:\n self.id_lookup[address.id] = index\n super(AddressCollection, self).__init__(addresses)", "title": "" }, { "docid": "9cf7336909050f9d89f29a9c2ad0c1cb", "score": "0.52818626", "text": "def copy(self):\n return self.__deepcopy__({})", "title": "" }, { "docid": "518f5bcef6e23f24424662bc95ea716b", "score": "0.5276979", "text": "def __deepcopy__(self, memo=None):\n my_cp = self.__local_dict['_cp']\n my_id = self.__local_dict['_id']\n request = Request('copy', {'id':my_id})\n return my_cp.make_request(request)", "title": "" }, { "docid": "5cab2df183e87b1f0ec60708ead738d2", "score": "0.5274421", "text": "def get_dict(self):", "title": "" }, { "docid": "856557643ae75018cfea9a2412d51cf3", "score": "0.5261632", "text": "def extend_network_dict(self, session, base_model, result):\n self._call_on_dict_driver(\"extend_network_dict\", session, base_model,\n result)", "title": "" }, { "docid": "3b12f7c3fce044f77821649d8c8cf39e", "score": "0.524867", "text": "def get_address(self) -> MultisigAddress:\n if not self._digests:\n raise ValueError(\n 'Must call ``add_digest`` at least once '\n 'before calling ``get_address``.',\n )\n\n if not self._address:\n address_trits = [0] * HASH_LENGTH\n self._sponge.squeeze(address_trits)\n\n self._address = MultisigAddress.from_trits(\n address_trits,\n digests=self._digests[:],\n )\n\n return self._address", "title": "" }, { "docid": "32ec98b5644bca1a201370576c9e300f", "score": "0.52385753", "text": "def test_NDB_todict2(self):\n A = self.aclass\n a = A(address={'line1': 'Juniusstraat'})\n self.assertEqual(a.to_dict(), {'akey': None,\n 'name': None,\n 'gender': None,\n 'address': {'line1': 'Juniusstraat'}})", "title": "" }, { "docid": "fd6bd3bccf188ee0f44722372b4394be", "score": "0.52223694", "text": "def __result_to_hash(r):\n return {\n \"store_id\" : r.get('tcin'),\n \"upc\" : r.get('upc'),\n \"title\" : r.get('title'),\n \"publisher\" : r.get('brand'),\n \"sale_price\" : str(r['offer_price']['price']).replace(\".\",\"\"),\n \"retail_price\" : str(r['list_price']['price']).replace(\".\",\"\"),\n \"store_url\" : \"http://www.target.com/p/-/-/A-%s\" % r.get('tcin'),\n \"img_url\" : None,\n }", "title": "" }, { "docid": "529ee4f5229f7852b754804d8cdb89c5", "score": "0.52097285", "text": "def as_dict(self):\n return copy.deepcopy(self._combined_dict)", "title": "" }, { "docid": "e9392060a8ebfeb4b587663d9b1f02bf", "score": "0.519813", "text": "def source_address_type_api_to_user_mapper(self) -> dict[int, str] | dict[str, str]:\n return reverse_dict(self.source_address_type_user_to_api_mapper)", "title": "" }, { "docid": "e9392060a8ebfeb4b587663d9b1f02bf", "score": "0.519813", "text": "def source_address_type_api_to_user_mapper(self) -> dict[int, str] | dict[str, str]:\n return reverse_dict(self.source_address_type_user_to_api_mapper)", "title": "" }, { "docid": "51446a56b5fa1fd44c920a62b37d0567", "score": "0.5197148", "text": "def to_dict(self):\n obj_dict = super().to_dict()\n obj_dict['T_ref'] = self.T_ref\n obj_dict['HoRT_ref'] = self.HoRT_ref\n return obj_dict", "title": "" }, { "docid": "6f9bab8c50ad8bd49790ef012195fd6d", "score": "0.5193242", "text": "def generate_dict(self):", "title": "" }, { "docid": "5437b5d47383eeaca19762006a6d75e1", "score": "0.51913846", "text": "def convert(instance) -> dict:", "title": "" }, { "docid": "ec77fab62cf9b1968975deae2c6820e6", "score": "0.51888263", "text": "def __deepcopy__(self, memo):\n return self.__class__().deserialize(self.serialize())", "title": "" }, { "docid": "42acd9f5f6f6cc830dd47252e538cfb9", "score": "0.5185012", "text": "def copy(self):\n return self.__class__(self.map, self.data)", "title": "" }, { "docid": "29e0b27cd396545f9081e197597732f7", "score": "0.51824313", "text": "def to_dict(self):\n\n return {\n 'passout_year': self.passout_year,\n 'current_address': self.current_address,\n 'permanent_address': self.permanent_address\n }", "title": "" }, { "docid": "27e25fac92adcae88c5d43052b76745c", "score": "0.51768976", "text": "def _as_dict(self,res):\n return self._zip_rows( res )", "title": "" }, { "docid": "6b9f625c96a20cffb7a679a3a30d6fe6", "score": "0.51759213", "text": "def info_extraction(r, res):\n name = \"N/A\"\n street = \"N/A\"\n house_number = \"N/A\"\n name = r.findAll(\"strong\")[1].get_text()\n print(name)\n fake_address = r.find(\"i\", {\n \"class\": \"fas fa-map-marker\"}) # variable representing the address regardless if it exists or not\n if fake_address != None:\n address = fake_address.next_sibling.strip()\n if address == \"Ort jährlich wechselnd\": # if the address is constantly changing the street and house number are as well\n street = \"Ort jährlich wechselnd\"\n house_number = \"Ort jährlich wechselnd\"\n else:\n street = r.findAll(\"strong\")[2].next_sibling.strip().replace(\"- \", \"\").split(\",\")[\n 0].strip() # gets the address and removing \"- \" and getting the first string after dividing the string with \",\" in order to obtain the street\n house_number = r.findAll(\"strong\")[2].next_sibling.strip().replace(\"- \", \"\").split(\",\")[\n 1].strip() # gets the address and removing \"- \" and getting the second string after dividing the string with \",\" in order to obtain the house number\n print(street)\n print(house_number)\n res = {'name': name,\n 'street': street,\n 'house_number': house_number,\n 'branche': 'health',\n 'category': 'care'}\n return (res)", "title": "" }, { "docid": "d7d77aa3d7ffc0115d88a37b3899d9ec", "score": "0.5170763", "text": "def gen_address():\n state_iso_code = get_random_state_iso_code()\n zip_code = get_random_area_code(state_iso_code)\n address = {'stateIsoCode': state_iso_code,\n 'zipCode': zip_code, }\n return address", "title": "" }, { "docid": "61c493105b806a904ce07aa2ea592f6d", "score": "0.51574785", "text": "def as_private_simple_dict(self) -> dict:\n\n return {\n \"device_id\": self.id,\n \"device_owner\": self.owner,\n \"power\": self.power,\n \"address\": self.address\n }", "title": "" }, { "docid": "1bafe326285f98164ec24b570fc9fea2", "score": "0.51456815", "text": "def to_map(self):\n\t\treturn {\n\t\t\t'id':self.id\n\t\t\t,'name':self.name\n\t\t\t,'last_name':self.last_name\n\t\t\t,'birth_date':utilsattr.parse_datetime_to_str(self.birth_date)\n\t\t\t,'telephones':[telephone.to_map() for telephone in self.telephones]\n\t\t\t,'addresses':[address.to_map() for address in self.addresses]\n\t\t}", "title": "" }, { "docid": "97a8079415fc765532356ca9d2dbce47", "score": "0.51438195", "text": "def copy_deep(self) -> \"SuperDict\":\n return pickle.loads(pickle.dumps(self, -1))", "title": "" }, { "docid": "77242759a1e4bb061fdc7a98d022acd0", "score": "0.5139085", "text": "def __init__(self):\n self.result_obj = {}", "title": "" }, { "docid": "2937f92c0794da327c28a4921c06c205", "score": "0.51377267", "text": "def to_address(self):\n return self._to_address", "title": "" }, { "docid": "af0c2588e2092c724331891c39c4089f", "score": "0.513759", "text": "def __call__(self):\n\t\treturn self.__dict__", "title": "" }, { "docid": "43841cf1987bc2960ae499b8591926b2", "score": "0.51367235", "text": "def get_address_history(self, addr: str) -> Dict[str, int]:\n h = {}\n # we need self.transaction_lock but get_tx_height will take self.lock\n # so we need to take that too here, to enforce order of locks\n with self.lock, self.transaction_lock:\n related_txns = self._history_local.get(addr, set())\n for tx_hash in related_txns:\n tx_height = self.get_tx_height(tx_hash).height\n h[tx_hash] = tx_height\n return h", "title": "" }, { "docid": "1ae993906a60bb86d8f57c4a1f5a6a2f", "score": "0.5135327", "text": "def copy() -> dict:\n return _CONTEXT.get().copy()", "title": "" }, { "docid": "fb8ac309be2fd41162fcddc502a7f02d", "score": "0.5132617", "text": "def address_worker(data: Tuple) -> Tuple:\n coordinates_as_string = data[0]\n original_city = data[1]\n location = reverse_coords(coordinates_as_string)\n country_code = location.raw[\"address\"][\"country_code\"].upper()\n\n if \"city\" in location.raw[\"address\"]:\n city = location.raw[\"address\"][\"city\"]\n elif \"town\" in location.raw[\"address\"]:\n city = location.raw[\"address\"][\"town\"]\n elif \"village\" in location.raw[\"address\"]:\n city = location.raw[\"address\"][\"village\"]\n else:\n city = original_city\n return location.address, country_code, city", "title": "" }, { "docid": "4483c010187124b70b5c961287371899", "score": "0.51317334", "text": "def get_function_addr_dict(self):\n ret_dict = {}\n for item in self.functions:\n ret_dict.update({self.functions[item][0]: item})\n return ret_dict", "title": "" }, { "docid": "82f2f809bd281eed4566e20bb9c7ac0c", "score": "0.51154554", "text": "def __copy__(self):\n copied = type(self)()\n copied.__dict__.update(self.__dict__)\n return copied", "title": "" }, { "docid": "82f2f809bd281eed4566e20bb9c7ac0c", "score": "0.51154554", "text": "def __copy__(self):\n copied = type(self)()\n copied.__dict__.update(self.__dict__)\n return copied", "title": "" }, { "docid": "82f2f809bd281eed4566e20bb9c7ac0c", "score": "0.51154554", "text": "def __copy__(self):\n copied = type(self)()\n copied.__dict__.update(self.__dict__)\n return copied", "title": "" }, { "docid": "82f2f809bd281eed4566e20bb9c7ac0c", "score": "0.51154554", "text": "def __copy__(self):\n copied = type(self)()\n copied.__dict__.update(self.__dict__)\n return copied", "title": "" }, { "docid": "0e3e975418c8486d678b6ee6541b7848", "score": "0.51137406", "text": "def get_reverse_dict(self):\n return self._reverse_dictionary.copy()", "title": "" }, { "docid": "63ce935d145416fdf6805e4e5ae9c000", "score": "0.5113158", "text": "def copy(self):\n return self.__class__(self.maps[0].copy(), *self.maps[1:])", "title": "" }, { "docid": "700d7bb4dfd6eada6487b11d566cbaf4", "score": "0.51038593", "text": "def to_dict(self):\n return {\n \"id\": self.id,\n \"supplier\": self.supplier,\n \"street_address\": self.street_address,\n \"tel_1\": self.tel_1,\n \"tel_2\": self.tel_2,\n \"email_address\": self.email_address,\n \"website\": self.website\n }", "title": "" }, { "docid": "e3d7598562c14081e2f00b57d661de47", "score": "0.51022846", "text": "def copy(self):\n return type(self)(**{k: v.copy() if isinstance(v, AttrDict) else v for k, v in self.items()})", "title": "" }, { "docid": "5806f177b169d0f7206c505d91b7334d", "score": "0.5099606", "text": "def __deepcopy__(self, memo):\n return self.__class__(\n {k: copy.deepcopy(v, memo) for k, v in self.items()},\n db_client=self.db_client,\n dev_name=self.dev_name)", "title": "" }, { "docid": "58cdae25952fafd38839d75242d7f21d", "score": "0.50978756", "text": "def to_xmlrpc_dict(self):\n return {\n 'paste_id': self.paste_id,\n 'code': self.code,\n 'parsed_code': self.parsed_code,\n 'pub_date': int(time.mktime(self.pub_date.timetuple())),\n 'language': self.language,\n 'parent_id': self.parent_id,\n 'url': self.url\n }", "title": "" }, { "docid": "9e9fd5a86c516600141604e55d6f49cf", "score": "0.5097118", "text": "def copy(self):\r\n self_copy = struct()\r\n for field in self.keys():\r\n if isinstance(self[field], struct):\r\n self_copy[field] = self[field].copy()\r\n else:\r\n self_copy[field] = _copy.copy(self[field])\r\n \r\n return self_copy", "title": "" }, { "docid": "3e431d85656454e9a7da356065e49633", "score": "0.50959253", "text": "def transfer_address_to_parameter(storage_dict, corresponding):\n return_dict = dict()\n for key in storage_dict:\n return_dict[corresponding[int(key, 16)]] = int(storage_dict[key], 16)\n\n return return_dict", "title": "" }, { "docid": "f8d5590350fb6215060699e50b1e6bd2", "score": "0.5090913", "text": "def as_dict(self):\n return copy.copy(self.__dict__)", "title": "" }, { "docid": "829e66df958c45aa5d11013e61c4d507", "score": "0.5088112", "text": "def _resolve_host(key):\n\n try:\n # Resolve the IPv4 address and hostname from the key\n new_ipv4 = socket.gethostbyname(key)\n # print(\"new_ipv4: {0}\".format(new_ipv4))\n new_host = socket.gethostbyaddr(new_ipv4)[0]\n # print(\"new_host: {0}\".format(new_host))\n d = {\"key\": key, \"hostname\": new_host, \"ipv4addr\": new_ipv4}\n except (socket.gaierror, socket.herror, TypeError):\n # need to add 'as ex' when uncommenting line below\n # print(\"Error for key '{0}': rc={1}\".format(key, ex.returncode))\n d = {\"key\": key, \"hostname\": False, \"ipv4addr\": False}\n return d", "title": "" }, { "docid": "6ed82c7fbcd2e75cae485e0ea401c51b", "score": "0.50851643", "text": "def extend_subnet_dict(self, session, base_model, result):\n self._call_on_dict_driver(\"extend_subnet_dict\", session, base_model,\n result)", "title": "" }, { "docid": "d9cbca4a961d334015230ea55899fd9e", "score": "0.50809145", "text": "def lives(self) -> \"PersonAddressBuilder\":\n return PersonAddressBuilder(self.person)", "title": "" }, { "docid": "07737cb8089ad44a08aafb6c0f6bc779", "score": "0.5078865", "text": "def serialise(self) -> dict:", "title": "" }, { "docid": "680a9b8eb9f029479cc0a46c77b89102", "score": "0.5066401", "text": "def __getstate__(self):\n new_dict = super().__getstate__()\n del new_dict['_networks']\n new_dict['_default_parameters'] = self.parameters\n return new_dict", "title": "" }, { "docid": "b334d6390cabd43b0905ba7a6b9e79d0", "score": "0.5061488", "text": "def rekey(self):\n self.objdict = dict()\n self.objinddict = dict()\n for r in self.results():\n if r.obj is not None:\n self.objdict[r.obj.objname] = r\n self.objinddict[r.obj.objind] = r\n self.objdict[r.label] = r", "title": "" }, { "docid": "6c1f13509498a04fcce46847b648869e", "score": "0.5059301", "text": "def __dict__(self) -> dict:\n return self.value", "title": "" }, { "docid": "6d83678b7dcfb77dbf8dcde9d3e2ffc6", "score": "0.50527114", "text": "def to_dict(self) -> Dict[str, Union[str, int]]:\n return {\"address\": self.address, \"port\": self.port}", "title": "" }, { "docid": "2979c5b6bba0f8e1d238c823d86b6450", "score": "0.50506896", "text": "def create_address(self):\n return self.create(\"AddressToValidate\")", "title": "" }, { "docid": "fa5afeabd4e380ede4b40955d52eb0d8", "score": "0.50505114", "text": "def clone(self) -> \"KS001\":\n result = KS001(identifier=self.identifier)\n for k in self.key_aliases:\n result.key_aliases.set_alias(k, self.key_aliases.get_alias(k))\n for k in self.value_aliases:\n result.value_aliases.set_alias(k, self.value_aliases.get_alias(k))\n for d in self.dicts:\n created = result._add_dict(name=d[\"name\"])\n for k in d[\"dict\"]:\n created[k] = copy.copy(d[\"dict\"][k])\n return result", "title": "" }, { "docid": "e2de4db23173edc0f8143668bf16c7d6", "score": "0.5048811", "text": "def __getstate__(self):\n self_dict = self.__dict__.copy()\n del self_dict[\"_pool\"]\n del self_dict[\"_list\"]\n return self_dict", "title": "" }, { "docid": "b0cb6ac8c6bff3a55a77ef03f71af9f9", "score": "0.50457895", "text": "def pack_result(cls, obj: np.number, key: str) -> dict:\n return super().pack_result(obj=obj.item(), key=key)", "title": "" }, { "docid": "0155ac734a6eb80052b29df106731e26", "score": "0.5037666", "text": "def init(self):\n carry = self.network.init()\n return {self.cname: carry}", "title": "" }, { "docid": "ac68f5412828b9ffd107da74a7d87b8f", "score": "0.5036206", "text": "def _BuildAddressBook(self, zone, address):\n if zone not in self.addressbook:\n self.addressbook[zone] = collections.defaultdict(list)\n name = address.parent_token\n for ip in self.addressbook[zone][name]:\n if ip.supernet_of(address):\n return\n if address.supernet_of(ip):\n for index, ip_addr in enumerate(self.addressbook[zone][name]):\n if ip_addr == ip:\n self.addressbook[zone][name][index] = address\n return\n self.addressbook[zone][name].append(address)", "title": "" }, { "docid": "b77c8c84b198267f3e6f167192b5d136", "score": "0.503591", "text": "def copy_deep2(self) -> \"SuperDict\":\n return json.loads(json.dumps(self))", "title": "" }, { "docid": "18b464b75b9c281954825ab50f74a8ae", "score": "0.50355166", "text": "def GAddresses(self):\n _rc(_gclibo.GAddresses(self._buf, _buf_size))\n addr_dict = {}\n for line in str(self._buf.value.decode(_enc)).splitlines():\n fields = line.split(',')\n if len(fields) >= 2:\n addr_dict[fields[0]] = fields[1]\n else:\n addr_dict[fields[0]] = ''\n \n return addr_dict", "title": "" }, { "docid": "1242d430f7945327c8c2ca62ed971fc8", "score": "0.50354403", "text": "def copy(self):\r\n rv = object.__new__(self.__class__)\r\n rv.__dict__.update(self.__dict__)\r\n rv.identifiers = object.__new__(self.identifiers.__class__)\r\n rv.identifiers.__dict__.update(self.identifiers.__dict__)\r\n return rv", "title": "" }, { "docid": "2fa9e868495db0979bedc4531cd14720", "score": "0.50354046", "text": "def _get_address_by_name(self, name, networktype):\n addresses = self.context.setdefault('_address_names', {})\n address_name = utils.format_address_name(name, networktype)\n address = addresses.get(address_name)\n if address is None:\n address = self.dbapi.address_get_by_name(address_name)\n addresses[address_name] = address\n\n return address", "title": "" }, { "docid": "92e5c985604e1e137c0eca294c77c790", "score": "0.5030948", "text": "def extract(self) -> dict:\n self._preprocessing()\n self._search()\n self._postprocessing()\n return self._collect()", "title": "" }, { "docid": "6ebd74b54519a9aaade3117ea234d490", "score": "0.50263685", "text": "def to_json(self) -> Dict[str, Dict[str, str]]:\n return {\n str(self.url): {\n \"ETag\": self.etag,\n \"Last-Modified\": self.last_modified,\n \"hash\": self.hash,\n }\n }", "title": "" }, { "docid": "cb4020cb29e958e833d465a909f0916f", "score": "0.5015225", "text": "def copy(self: _R) -> _R:\n return self.__class__(\n self.name,\n self.service_name,\n self.module_name,\n use_alias=self.use_alias,\n stringify=self.stringify,\n )", "title": "" }, { "docid": "59be413da8fde9c14e394588eda407e8", "score": "0.50136536", "text": "def __getstate__(self):\n return dict(self)", "title": "" }, { "docid": "900fc45ea9331fbdf17b736657bb511a", "score": "0.5008678", "text": "def get_addresses(self, context, container):", "title": "" }, { "docid": "f60b1fb017b6c320fa67bf18b8535d57", "score": "0.50061494", "text": "def serialize(self):\n return {\n 'Customer_id': self.Customer_id,\n 'Name': self.Name,\n 'Address': self.Address\n }", "title": "" }, { "docid": "fe6adc8b46f1b72c360c3e4b953ab28c", "score": "0.49994132", "text": "def Clone(self):\n callResult = self._Call(\"Clone\", )\n\n if callResult is None:\n return None\n\n\n objId = callResult\n classInstance = ServiceMethodReturnMappingItem\n return classInstance(self._xmlRpc, objId)", "title": "" }, { "docid": "714c0c2228d177751c7291dc2e6c5c0f", "score": "0.49963933", "text": "def __createAddress(self) -> str:\n text = str(self.__version) + self.__publicKey\n # textSHA256Hash = SHA256.new(text.encode('utf8')).hexdigest()\n # return RIPEMD.new(textSHA256Hash).hexdigest()\n return TLCUtilities.getSHA256RIPEMDHash(text)", "title": "" }, { "docid": "c2ddf179314817975e6f2c4abc4eed3b", "score": "0.49863717", "text": "def asDict(self):\n new = {}\n new.update(self)\n return new", "title": "" }, { "docid": "a4a93daa60b84edce1b77e749ec05b48", "score": "0.4983785", "text": "def __deepcopy__(self, memo):\n\n o = self.__class__(self._model)\n for k, v in self.__dict__.items():\n if k == '_query_results_cache':\n o.__dict__[k] = None\n else:\n o.__dict__[k] = copy.deepcopy(v, memo)\n\n return o", "title": "" }, { "docid": "1f579ea606cda167240fbd4d4583eadf", "score": "0.49787676", "text": "def to_dict(self):\n tmp_dict = self.__dict__\n copy_dict = tmp_dict.copy()\n if \"_sa_instance_state\" in copy_dict:\n del copy_dict['_sa_instance_state']\n return copy_dict", "title": "" }, { "docid": "8e96ee61607b7f4469b79ec22ced112d", "score": "0.4972121", "text": "def generate_new_address(self, index):\n address = btc.pubkey_to_address(btc.bip32_descend(self.public_key, [0, index]))\n return address", "title": "" }, { "docid": "ee63e42dae0b2951ca9c252c0967e5de", "score": "0.49628037", "text": "def _create_internal_dict(*args, **kwargs):\n return Config(*args, **kwargs)", "title": "" }, { "docid": "1c6348a86dbf00680f882b640f58f6c2", "score": "0.49594033", "text": "def to_dict(self: Sign) -> Dict[str, Any]:\n return_dict = super().to_dict()\n del return_dict[\"transaction\"]\n return_dict[\"tx_json\"] = self.transaction.to_dict()\n return return_dict", "title": "" }, { "docid": "786f4c9b537f9b544012b0fa6c1c52ae", "score": "0.49547675", "text": "def __copy__(self):\n return self.__class__(**self.mapping(validate=False))", "title": "" } ]
e48b8f096488845f34830d6f36652abc
method that the agent calls when it gets offline
[ { "docid": "0c592c6a75e8b02731b38ce364aa5dd9", "score": "0.673978", "text": "def agent_offline(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "title": "" } ]
[ { "docid": "c0fa22975fe5d791e5801d73e226d912", "score": "0.80011284", "text": "def offline(self):\n raise Fail(\"offline\")", "title": "" }, { "docid": "9e8365a06a73f5c571e04e37e5d1c765", "score": "0.73997015", "text": "def agent_offline(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "title": "" }, { "docid": "a520a5566227549a16d6fe3117b74545", "score": "0.7319257", "text": "def agent_offline(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "title": "" }, { "docid": "0fae1b4bb4e947e3da2b54f87a3e6236", "score": "0.69461876", "text": "def _got_offline(self, msg: Any) -> None:\n\n # remove from list\n jid = msg[\"from\"].full\n if jid in self._online_clients:\n self._online_clients.remove(jid)\n\n # clear interface cache\n if jid in self._interface_cache:\n del self._interface_cache[jid]\n\n # send event\n self._send_event_to_module(ModuleClosedEvent(), msg[\"from\"].username)", "title": "" }, { "docid": "97a20743902d11391228c29543d8a809", "score": "0.6494916", "text": "def agent_online(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "title": "" }, { "docid": "045399316fb0cab22f43fc887fed1829", "score": "0.6425709", "text": "def tempOffline(self):\n raise Fail(\"temp. offline\")", "title": "" }, { "docid": "12fcb571ac83ec81d0519ed0b99a021e", "score": "0.64119047", "text": "def test_online_offline(jenkins):\n # Master node name should be case insensitive\n # mn0 = jenkins.get_node('MaStEr')\n mn = jenkins.get_node(\"Built-In Node\")\n # self.assertEqual(mn, mn0)\n\n mn.set_online() # It should already be online, hence no-op\n assert mn.is_online() is True\n\n mn.set_offline() # We switch that suckah off\n mn.set_offline() # This should be a no-op\n assert mn.is_online() is False\n\n mn.set_online() # Switch it back on\n assert mn.is_online() is True", "title": "" }, { "docid": "3c8ca71186e519b8f06c00bef37161e3", "score": "0.6374582", "text": "def agent_online(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "title": "" }, { "docid": "ab13e8a22b31c4622b61eeb870b8c9f5", "score": "0.63468623", "text": "def is_online():\n raise NotImplementedError(\"Override me!\")", "title": "" }, { "docid": "fe422a82984508bfe0f0a873c57fecc9", "score": "0.63455105", "text": "def offline_requested(self):\n return self._offline_requested", "title": "" }, { "docid": "96a30ff0c35e3ff44a1134b3c0e1a82f", "score": "0.62560856", "text": "def OnContactGoneOffline(\n self,\n contact\n ):\n pass", "title": "" }, { "docid": "c3ed279447af4c4a2aad1654768b681e", "score": "0.6128427", "text": "def is_online(self):\n raise Exception(\"Not implemented!\")", "title": "" }, { "docid": "60e2325c3a8cb9882b26f36aa51fc757", "score": "0.5979231", "text": "def setOffline():\n params = {\n }\n result = call('account.setOffline', **params)\n return parse_response(result)", "title": "" }, { "docid": "6756a9caea60035101d1f5e991d79b36", "score": "0.59684336", "text": "def _sk_on_contact_gone_offline(self, parms):\n l_cleanparms = module_id2classes[2](parms.get(1), self.transport)\n self.OnContactGoneOffline(l_cleanparms)", "title": "" }, { "docid": "25eacf6e1ab0a9a4da9ad7e37b4dc5d3", "score": "0.5966012", "text": "def online(self):\n return True", "title": "" }, { "docid": "71ab4c310527fa4ff6b198b40fd37250", "score": "0.5964164", "text": "def admin_offline( self, offline = None, options = {} ):\n\n assert isinstance( offline, (bool)), \"admin_offline(): Argument 'offline' must be (one) of type(s) '(bool)'; given %s\" % type( offline ).__name__\n assert isinstance( options, (dict)), \"admin_offline(): Argument 'options' must be (one) of type(s) '(dict)'; given %s\" % type( options ).__name__\n\n (REQ_SCHEMA, REP_SCHEMA) = self.get_schemas( \"admin_offline\" )\n\n obj = collections.OrderedDict()\n obj['offline'] = offline\n obj['options'] = options\n\n return self.post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/admin/offline' )", "title": "" }, { "docid": "296d50ef1375b9360619dabcff08e585", "score": "0.59436214", "text": "def offline(self, request: Request) -> Response:\n event = request.data\n\n worker = Worker.get_or_create(event)\n worker.finished = timezone.now()\n worker.save()\n\n return Response()", "title": "" }, { "docid": "8ea1604362cef391fa7d2523ccff64ef", "score": "0.5851629", "text": "async def setOfflineMode(self, value: bool) -> None:\n if self._offline == value:\n return\n self._offline = value\n await self._client.send('Network.emulateNetworkConditions', {\n 'offline': self._offline,\n 'latency': 0,\n 'downloadThroughput': -1,\n 'uploadThroughput': -1,\n })", "title": "" }, { "docid": "31db3014c9eebe45ed059e189efa499c", "score": "0.5824263", "text": "def isonline():\n try:\n conn = http.client.HTTPConnection(\"openstack.cebitec.uni-bielefeld.de\")\n conn.request(\"HEAD\", \"/\")\n r1 = conn.getresponse()\n if int(r1.status) == 302:\n print(r1.status)\n return \"online\"\n else:\n return \"offline\"\n except IOError:\n return \"offline\"", "title": "" }, { "docid": "1224784c182301b7c34ef941a01b8191", "score": "0.5823255", "text": "def recently_offline(self) -> bool:\n return datetime.now().timestamp() <= self.seen.timestamp() + 300", "title": "" }, { "docid": "17d313f507bc28d9d6db86cf2dd13f45", "score": "0.57718945", "text": "def offline_requested(self, offline_requested):\n\n self._offline_requested = offline_requested", "title": "" }, { "docid": "741cdf5d37cf0e96dae8ea084cbba82d", "score": "0.5750108", "text": "def _sk_get_offline_callforward(self):\n return self._sk_property(\"ZGM]\\005\", 77, True)", "title": "" }, { "docid": "6f936968e4ac5c635340bf807fbc03ca", "score": "0.57396936", "text": "def onServiceUnavailable(self):\n pass", "title": "" }, { "docid": "6790822d15c09fa0c6c6873dedfd963e", "score": "0.5725469", "text": "def wait_for_offline_download(self, encoding_name, play_type):\n\n if play_type.strip().lower() == \"local\":\n return\n\n if encoding_name.strip().lower() == \"automatic\":\n strict_check = False\n elif \"widevineoffline\" in encoding_name.strip().lower():\n strict_check = True\n else:\n strict_check = None\n\n # For other cases we don't need to check this\n if strict_check is not None:\n for i in xrange(100):\n try:\n if self.is_item_visible(\"alert-msg\") and \\\n self.get_value(\"alert-msg\") == \\\n WIDEVINE_OFFLINE_DOWNLOAD_MSG:\n info(\"%s is visible\" % WIDEVINE_OFFLINE_DOWNLOAD_MSG)\n if strict_check:\n strict_check = False\n self.wait_for(MEDIUM_WAIT)\n else:\n info(\"%s not visible\" % WIDEVINE_OFFLINE_DOWNLOAD_MSG)\n # currently commented out this raise\n #if strict_check:\n # raise Exception(exception_mod.widevine_error)\n break\n except Exception as excp:\n info(str(excp))\n break\n else:\n raise Exception(\"%s ::: msg still visible\" %\n WIDEVINE_OFFLINE_DOWNLOAD_MSG)", "title": "" }, { "docid": "b5e99bc38d7305c5059c98296c359c6d", "score": "0.57241374", "text": "def checkOnOfflineStatus(\n self,\n tgUsername,\n init,\n ):\n init_flag = init\n poolinfo = PoolInfo(tgUsername)\n poolinfo.load()\n msg = ''\n plural = ''\n if poolinfo.pools:\n for p in poolinfo.pools:\n apiEndPoint = config['LUXOR']['ApiEndPoint']\n apiKey = p['apikey']\n pooluser = p['uname']\n poolname = p['pool']\n self.setPoolInfo(p)\n #poolmonitor = PoolMonitor(p)\n try:\n #logger.info(\"Accessing %s API for user %s ...\", poolname, pooluser)\n offlineWorkers = int(self.getNumberOfOfflineWorkers())\n #logger.info(\"offlineWorkers: %s %s\", offlineWorkers, type(offlineWorkers))\n if init_flag == 1:\n prev = 0\n else:\n prev = int(self.loadNumberOfOfflineWorkers())\n #logger.info(\"pool: %s uname: %s\", poolmonitor.pool.poolinfo['pool'], poolmonitor.pool.poolinfo['uname'])\n #logger.info(\"prev: %s %s\", prev, type(prev))\n logger.info(\"Offline Workers: prev: %s / now: %s\", prev, offlineWorkers)\n if prev != -1:\n if offlineWorkers > 0 and offlineWorkers != prev:\n if offlineWorkers > 1:\n plural = 'S'\n msg += str(offlineWorkers) + \" WORKER\" + plural + \" OFFLINE\\n\"\n #msg += \"Pool: \" + poolname + \" Username: \" + pooluser + \"\\n\"\n if offlineWorkers == 0 and offlineWorkers != prev:\n msg += \"All workers are back online.\"\n #logger.info(\"Successfully retrieved %s API data for user %s.\", poolname, pooluser)\n self.saveNumberOfOfflineWorkers(offlineWorkers)\n except:\n logger.info(\"ERROR: Error in checkOnOfflineStatus() for user %s\", tgUsername)\n return msg", "title": "" }, { "docid": "4dbafc7bcea3187cffab380bb5cdcc56", "score": "0.5648031", "text": "def Stay_Online(self):\n \n while True:\n self.Change_Status_And_Name(1, None)\n gevent.sleep(85)", "title": "" }, { "docid": "fff80b7a38c93e0cc17464cd5bcf3986", "score": "0.5646145", "text": "def online_multiplayer(self):\n pass", "title": "" }, { "docid": "a2afa7265319a69de9cf62fcbdd02e27", "score": "0.56174237", "text": "def alert_online(self):\n return self._set_state('online')", "title": "" }, { "docid": "9108c03ea01eae28f5b2ec322b1f5dfa", "score": "0.56141704", "text": "def execute_maintenance(self):\n pass", "title": "" }, { "docid": "adaf1582f64ed357db99250adfaf9ae6", "score": "0.5613624", "text": "def on_heartbeat(self):\n pass", "title": "" }, { "docid": "35c5d5f7d19e6fe7d7bd50faf1c12883", "score": "0.5596801", "text": "def on_disconnected(self):\n pass", "title": "" }, { "docid": "3b413f3335c716d87fb4aee8dde0a658", "score": "0.55543417", "text": "def disconnected(self):\n\n self.general.server_online = False", "title": "" }, { "docid": "0b62c0d3156432829f934193c9390fe1", "score": "0.5547683", "text": "def _callback_monitor(self, bot, job):\n warning_text = None\n try:\n response = requests.get(self.monitored_url)\n except ConnectionError:\n warning_text = self.CONNECTION_ERROR_MESSAGE\n else:\n if response.text != self.substring:\n warning_text = self.WRONG_RESPONSE_MESSAGE\n if warning_text:\n time_elapsed = time.time() - self.last_notified_time\n if time_elapsed > self.MIN_NOTIFICATION_PERIOD:\n self._send_warning(bot, warning_text)", "title": "" }, { "docid": "a01deb1fea0802bdbf67998fc5701c7f", "score": "0.5538607", "text": "def skip_if_offline(test_method):\n\[email protected](test_method)\n\tdef decorated(self, *args, **kwargs):\n\t\tif os.environ.get('KING_PHISHER_TEST_OFFLINE'):\n\t\t\tself.skipTest('due to running in offline mode')\n\t\treturn test_method(self, *args, **kwargs)\n\treturn decorated", "title": "" }, { "docid": "64b69eefe5679ddcc5280459033850fb", "score": "0.55339634", "text": "def connectionLost(reason):", "title": "" }, { "docid": "7667d9dccd3f6e06deaa08fb2b82b0e8", "score": "0.55076087", "text": "def offline_ivr(self):\n return self._offline_ivr", "title": "" }, { "docid": "7046b6f18e5184ffbb6dc18759c4f07b", "score": "0.54953194", "text": "def notifyWebServerUp(self):\n\t\treturn ()", "title": "" }, { "docid": "d5e1682dba7598a30ff448b836e51589", "score": "0.5469633", "text": "def __disconnected__(self, servername):\n server = self.servers.get(servername)\n if server.connected:\n logger.info(\"Server %s :: Server disconnected now.\" % servername)\n server.clearCalls()\n if server.taskCheckStatus.running:\n server.taskCheckStatus.stop()\n server.connected = False\n server.ami = None", "title": "" }, { "docid": "2906b0647bc57299a55da704260268fa", "score": "0.54613477", "text": "def offline_ivr(self, offline_ivr):\n self._offline_ivr = offline_ivr", "title": "" }, { "docid": "0cbae937432b2171fcf67e0fc8651b22", "score": "0.5441612", "text": "def agent_online(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "title": "" }, { "docid": "7691a0e1fa59cda43e600653b13034e9", "score": "0.5438979", "text": "def setOfflineNick(self):\n self.setNick(self.offlineNick)", "title": "" }, { "docid": "4bf0e30bc38eaa59e3f35fc5f7290b1c", "score": "0.5424972", "text": "def _raise_if_offline_mode_is_enabled(msg: Optional[str] = None):\n if constants.HF_HUB_OFFLINE:\n raise OfflineModeIsEnabled(\n \"Offline mode is enabled.\" if msg is None else \"Offline mode is enabled. \" + str(msg)\n )", "title": "" }, { "docid": "abcaf7c551d26aa6a27a94d67b97d674", "score": "0.54175436", "text": "def _switchbot_device_unavailable_callback(_address: str) -> None:\n nonlocal switchbot_device_went_unavailable\n switchbot_device_went_unavailable = True", "title": "" }, { "docid": "6687dfd0dff06cbcd8e45a664a2c346e", "score": "0.54132634", "text": "async def goon(self, ctx: Context):\n\n for name, address in GOON_SERVERS.items():\n embed_title = f'{name} (byond://{address[0]}:{address[1]})'\n\n try:\n # Retrieve status information and admin list.\n reader, writer = await asyncio.wait_for(\n asyncio.open_connection(*address), 5.0)\n\n writer.write(goon_query('?status'))\n await asyncio.wait_for(writer.drain(), 3.0)\n status_response = await asyncio.wait_for(\n reader.read(4096), 3.0)\n\n writer.write(goon_query('?admins'))\n await asyncio.wait_for(writer.drain(), 3.0)\n admin_response = await asyncio.wait_for(\n reader.read(4096), 3.0)\n\n if writer.can_write_eof():\n writer.write_eof()\n writer.close()\n\n except (ConnectionRefusedError, asyncio.TimeoutError):\n # Conenction refused or unable to connect before timeout.\n await ctx.send(embed=discord.Embed(\n title=embed_title + ' (offline)',\n color=discord.Color.red()))\n continue\n\n time = datetime.now()\n\n for packet in (status_response, admin_response):\n if packet[0:2] != b'\\x00\\x83':\n # Packet didn't start with expected bytes.\n log.warning(\n f\"Malformed packet from server {name}, \"\n f\"{address[0]}:{address[1]}.\")\n log.debug(f\"Packet contents: {packet}\")\n await ctx.send(\n \"Unknown error retrieving status information for \"\n f\"server {name} at {address[0]}:{address[1]}\")\n return\n\n # Get embed paramters.\n status_len = struct.unpack('>H', status_response[2:4])[0]\n params = urllib.parse.parse_qs(\n status_response[5:status_len+3].decode('ascii')\n )\n\n # Get admin list.\n admin_len = struct.unpack('>H', admin_response[2:4])[0]\n admins = [\n value[0]\n for key, value\n in urllib.parse.parse_qs(\n admin_response[5:admin_len+3].decode('ascii')\n ).items()\n if key != 'admins'\n ]\n\n for i in range(len(admins)):\n if admins[i] in self.ckey_aliases:\n admins += self.ckey_aliases[admins[i]]\n else:\n log.info('unaliased admin: ' + admins[i])\n\n players = deque()\n players_cur = []\n total_chars = 0\n for player in sorted(\n params['player' + str(x)][0]\n for x\n in range(\n int(params['players'][0])\n )\n ):\n if total_chars != 0:\n chars = 2\n else:\n chars = 0\n\n entry = player\n chars += len(entry)\n\n if player in self.ckey_list:\n entry = '\\\\\\U0001f354' + entry\n chars += 4\n\n if player in admins:\n entry = '\\\\\\u2b50' + entry\n chars += 2\n\n if total_chars + chars > 1024:\n players.append(', '.join(players_cur))\n players_cur = []\n total_chars = 0\n\n total_chars += chars\n players_cur.append(entry)\n\n if players_cur:\n players.append(', '.join(players_cur))\n\n try:\n shuttle = int(params['shuttle_time'][0])\n shuttle = str(timedelta(\n seconds=abs(shuttle)\n )) + (\n ' (station)'\n if shuttle < 0\n else (\n ' (in transit)'\n if shuttle not in (0, 60*6)\n else ''\n )\n )\n except ValueError:\n shuttle = params['elapsed'][0]\n\n try:\n elapsed = str(timedelta(seconds=int(params['elapsed'][0])))\n except ValueError:\n elapsed = params['elapsed'][0]\n\n await ctx.send(embed=discord.Embed(\n title=embed_title,\n type='rich',\n timestamp=time,\n color=discord.Color.green()\n ).add_field(\n name='Version',\n value=params['version'][0]\n ).add_field(\n name='Mode',\n value=params['mode'][0] + (\n ', respawn enabled'\n if params['respawn'][0] == '1'\n else ''\n )\n ).add_field(\n name='Map Name',\n value=params['map_name'][0]\n ).add_field(\n name='Round Length',\n value=elapsed\n ).add_field(\n name='Shuttle Time',\n value=shuttle\n ).add_field(\n name='Station Name',\n value=(params['station_name'][0]\n if params.get('station_name')\n else 'N/A')\n ).add_field(\n name='Players ({})'.format(params['players'][0]),\n value=players.popleft() if players else 'N/A',\n inline=False\n ))\n\n if players:\n for plist in players:\n await ctx.send(embed=discord.Embed(\n title=embed_title,\n type='rich',\n timestamp=time,\n color=discord.Color.green(),\n ).add_field(\n name='Players (cont.)',\n value=plist,\n inline=False,\n ))", "title": "" }, { "docid": "79b7fec175a08edb93112321b3d605b2", "score": "0.5402837", "text": "def _snmp_power_off(self):", "title": "" }, { "docid": "eb2d72a66cf2668926583afbaa810bb4", "score": "0.53850514", "text": "def friend_offline(event):\n\n data = {'user_id': event[1] * (-1), 'method': None, 'time': None}\n\n value = event[2]\n\n if value:\n data['method'] = 'timeout'\n else:\n data['method'] = 'log out'\n\n return data", "title": "" }, { "docid": "f2aa3fae047b406abe20a1e304c776ba", "score": "0.5374786", "text": "async def offline(self, oid, options):\n pool = await self._get_instance(oid)\n\n verrors = ValidationErrors()\n found = self.__find_disk_from_topology(options['label'], pool)\n if not found:\n verrors.add('options.label', f'Label {options[\"label\"]} not found on this pool.')\n if verrors:\n raise verrors\n\n disk = await self.middleware.call(\n 'disk.label_to_disk', found[1]['path'].replace('/dev/', '')\n )\n await self.middleware.call('disk.swaps_remove_disks', [disk])\n\n await self.middleware.call('zfs.pool.offline', pool['name'], found[1]['guid'])\n\n if found[1]['path'].endswith('.eli'):\n devname = found[1]['path'].replace('/dev/', '')[:-4]\n await self.middleware.call('disk.geli_detach_single', devname)\n await self.middleware.call(\n 'datastore.delete',\n 'storage.encrypteddisk',\n [('encrypted_volume', '=', oid), ('encrypted_provider', '=', devname)],\n )\n return True", "title": "" }, { "docid": "48b0cc10d91debb0c4ec43c55e58d29c", "score": "0.53675985", "text": "def stale():\n if message.nonce in self.awaiting_ack:\n del self.awaiting_ack[message.nonce]\n peer = self.routing_table.by_address(addr)\n if peer:\n peer.local.misses += 1", "title": "" }, { "docid": "5725f33bf59e6030bb7c91353e2aebb7", "score": "0.53620726", "text": "def handle_disconnected(self):\n pass", "title": "" }, { "docid": "f9c859827f24c2652fb755ac361578ee", "score": "0.53340054", "text": "def online(self):\n\n\t\treturn self.__online", "title": "" }, { "docid": "9bdec9b0e8b36ff23f7e7e7389d51f47", "score": "0.5324774", "text": "def _async_set_unavailable(self, now):\n self._remove_unavailability_tracker = None\n self._is_available = False\n self.async_schedule_update_ha_state()", "title": "" }, { "docid": "ad434e58b2c6c72c28288b165c263d93", "score": "0.5324137", "text": "def on_disconnect(self):\n for room in session.rooms:\n # self.__leave_room_and_notify(room)\n self.__leave_room_and_notify(room)\n\n auth = db.Authenticatable.get(session.auth_id)\n auth.status = 'offline'\n auth.save()\n\n # It appears to be necessary to use the root socketio instance\n # otherwise events cannot be sent outside the current namespace.\n # In this case, only events to '/tasks' can be emitted otherwise.\n self.log.warning('emitting to /admin')\n self.socketio.emit('node-status-changed', namespace='/admin')\n\n self.log.info(f'{session.name} disconnected')", "title": "" }, { "docid": "beac8597438823a29080ea0094dc38e1", "score": "0.5316104", "text": "def test_52north_offline():\n WPSClient(\n url_52n,\n caps_xml=open(resource_file('wps_52n_caps.xml'), 'rb').read(),\n desc_xml=open(resource_file('wps_52n_desc.xml'), 'rb').read(),\n )", "title": "" }, { "docid": "9bca8dac8400cf39ba209feefc9d1768", "score": "0.5313064", "text": "def download_cache(self):", "title": "" }, { "docid": "bd537bcc02576c413d45c5f634a8ee12", "score": "0.5312484", "text": "async def _async_ensure_is_off(self):", "title": "" }, { "docid": "5db6ce7d9110b012514a0ffc93d2cec0", "score": "0.52976596", "text": "def _snmp_power_on(self):", "title": "" }, { "docid": "c05c71fb68de5bc421de2124f69a2cd6", "score": "0.52969825", "text": "def refresh_status(self):\n pass", "title": "" }, { "docid": "c0c8574f70f027a77f511c897ef4f994", "score": "0.529204", "text": "async def updateMembers():\n threading.Timer(15.0, updateMembers).start()\n getMembers = sum(1 for x in client.get_all_members() if x.status.value != 'offline' and x.status.value != 'invisible')\n await client.change_presence(game=discord.Game(name='!help | ' + str(getMembers) + ' Online!'))", "title": "" }, { "docid": "10c917b5845e58c319a3a9cb1cb2ed0f", "score": "0.52554756", "text": "def notify_fountain(self):\n pass", "title": "" }, { "docid": "a479571153d402c09b5b2ba21a08c2cf", "score": "0.52538526", "text": "def testRegisteredDevicePoweredOffShowsOffline(self):\n test_id = 'ba6b2c0c-10da-4910-bb6f-63c826087054'\n test_name = 'testRegisteredDevicePoweredOffShowsOffline'\n print 'Power off device.'\n raw_input('Select enter once the printer is completely off.')\n print'Waiting up to 10 minutes for printer status update.'\n for _ in xrange(20):\n device.GetDeviceDetails()\n try:\n self.assertIn('offline', device.status)\n except AssertionError:\n time.sleep(30)\n else:\n break\n try:\n self.assertIsNotNone(device.status)\n except AssertionError:\n notes = 'Device has no status.'\n self.LogTest(test_id, test_name, 'Failed', notes)\n raise\n try:\n self.assertIn('offline', device.status)\n except AssertionError:\n notes = 'Device is not offline. Status: %s' % device.status\n self.LogTest(test_id, test_name, 'Failed', notes)\n raise\n else:\n notes = 'Status: %s' % device.status\n self.LogTest(test_id, test_name, 'Passed', notes)\n finally:\n print 'Power on the devie.'\n raw_input('Select enter once the printer is completely initialized.')", "title": "" }, { "docid": "f82468196b8fd649db15ce4aead22e70", "score": "0.52532786", "text": "def on_off_workernode(**options):\n\n if options.get('Collector'):\n collector = options.get('Collector')\n if options.get('WorkerNode'):\n wns = options.get('WorkerNode').split(',')\n if options.get('Status'):\n status = options.get('Status')\n\n startds = collector.query(htcondor.AdTypes.Startd, \"PartitionableSlot =?=True\")\n workernodes = {}\n for wn in startds:\n if 'Machine' in wn:\n k = wn['Machine']\n v = \"offline\"\n if wn['StartJobs']: v = \"online\"\n workernodes.update({k:v})\n\n if wns[0] == 'ALL':\n question = \"Are you sure that you want to put ALL workernodes \" + status + \"?\"\n yesno = query_yes_no(question, default=\"no\")\n if not yesno:\n sys.exit(0)\n else:\n question = \"Are you REALLY sure that you want to put ALL workernodes \" + status + \"?\"\n yesno = query_yes_no(question, default=\"no\")\n if not yesno:\n sys.exit(0)\n\n wns = list(workernodes.keys())\n\n # remove wn not found on the cluster\n new_wns = []\n for w in wns:\n workernode = w.split('.')[0]\n workernode = socket.getfqdn(str(workernode))\n\n if workernode not in workernodes:\n print workernode, \"not found in this cluster\"\n else:\n # check worker nodes status \n if workernodes[workernode] == status:\n print workernode, \"is already\", status, \", ignoring\"\n else:\n new_wns.append(w)\n\n # change the status of the workernode \n for w in new_wns:\n workernode = w.split('.')[0]\n workernode = socket.getfqdn(str(workernode))\n\n startjobs = \"\"\n if status == 'online':\n print \"onlining worker node....\", workernode\n startjobs = '\"StartJobs = True\"'\n elif status == 'offline':\n print \"offlining worker node....\", workernode\n startjobs = '\"StartJobs = False\"'\n\n if workernode in workernodes and workernodes[workernode] != status:\n\n cmd = '/usr/bin/condor_config_val -name ' + workernode + ' -startd -set ' + startjobs\n proc = subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)\n (out, err) = proc.communicate()\n if err:\n if out: print out\n print \"ERROR:\", err.rstrip()\n else:\n time.sleep(5)\n cmd = '/usr/sbin/condor_reconfig -name ' + workernode\n proc = subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)\n (out, err) = proc.communicate()\n if err:\n if out: print out\n print \"ERROR:\", err.rstrip()\n return", "title": "" }, { "docid": "d914f2a6e0bc651d011857de25155d53", "score": "0.5247041", "text": "async def _got_online(self, msg: Any) -> None:\n\n # get jid, ignore event if it's myself\n jid = msg[\"from\"].full\n if jid == self._jid:\n return\n\n # clear interface cache, just in case there is something there\n if jid in self._interface_cache:\n del self._interface_cache[jid]\n\n # create future for interfaces\n self._interface_cache[jid] = asyncio.get_running_loop().create_future()\n\n # request interfaces\n interface_names = await self._get_interfaces(jid)\n\n # if no interfaces are implemented (not even IModule), quit here\n if len(interface_names) == 0:\n module = jid[: jid.index[\"@\"]]\n log.error(f\"Module {module} does not seem to implement IModule, ignoring.\")\n return\n\n # store interfaces\n self._interface_cache[jid].set_result(self._interface_names_to_classes(interface_names))\n\n # append to list\n if jid not in self._online_clients:\n self._online_clients.append(jid)\n\n # send event\n self._send_event_to_module(ModuleOpenedEvent(), msg[\"from\"].username)", "title": "" }, { "docid": "c9128954b4964359f879472bfb1b5097", "score": "0.5243482", "text": "def on_disconnect_home():\n pass", "title": "" }, { "docid": "b2d18cd3533be861e331f3d076d22f6c", "score": "0.5241515", "text": "def on_retry(self):", "title": "" }, { "docid": "c3bff9a0c30b9c1cdaf153de1fb06fe7", "score": "0.5238285", "text": "def network_update(self):\n return", "title": "" }, { "docid": "846089b26dda38abc611e7192cdea72f", "score": "0.52220947", "text": "def check_api_status(self):\n self.request('/heartbeat')\n return \"Online\"", "title": "" }, { "docid": "9114ea36ffe582cf3d5ab68e2dd5cc1f", "score": "0.52161086", "text": "def muc_online(self, presence):\n log.debug(\"Got presence for %s\", presence['muc']['nick'])", "title": "" }, { "docid": "319b83108bf01836e4557a20e4a873d7", "score": "0.52109075", "text": "def test_watch_namespaced_offline_virtual_machine(self):\n pass", "title": "" }, { "docid": "9cdc7ed0c2fdcb600466d8382bb8cdd7", "score": "0.5203833", "text": "def online(self):\n\n\t\traise foundations.exceptions.ProgrammingError(\n\t\t\"{0} | '{1}' attribute is not deletable!\".format(self.__class__.__name__, \"online\"))", "title": "" }, { "docid": "a7078daa2d651e9b337c53c19bc4abda", "score": "0.5203374", "text": "def test_check_ecc_server_online(self):\n self.set_mock_effect(True)\n\n self.call_task()\n self.get_callable().assert_called_once_with()\n\n self.ecc.refresh_from_db()\n self.assertTrue(self.ecc.is_online)", "title": "" }, { "docid": "a26e2948b05d43ec17e3dbb3559a99aa", "score": "0.5199415", "text": "def unavailableReceived(self, presence):\n self.update_available_status_in_db(presence)", "title": "" }, { "docid": "dcc65df98e6faf2c43eb4e9b63e09b70", "score": "0.5194411", "text": "def fetch(self, irc, msg, args):\n self._refreshCache()\n irc.replySuccess()", "title": "" }, { "docid": "be1447cb8740c6a838049743650a2e91", "score": "0.51792485", "text": "def on_disconnect(self, server):", "title": "" }, { "docid": "8f7624d53ba877882cfbe31a467cacac", "score": "0.5178329", "text": "def test_is_online(self, mock):\n data = load_fixture(\"systeminfo.txt\")\n mock.register_uri(\"get\", function_url(\"systeminfo\"), text=data)\n\n bwd = BewardGeneric(MOCK_HOST, MOCK_USER, MOCK_PASS)\n\n self.assertTrue(bwd.is_online)\n self.assertTrue(bwd.available)\n\n mock.register_uri(\"get\", function_url(\"systeminfo\"))\n\n self.assertTrue(bwd.is_online)\n self.assertTrue(bwd.available)\n\n mock.register_uri(\n \"get\", function_url(\"systeminfo\"), exc=requests.exceptions.ConnectTimeout\n )\n\n self.assertFalse(bwd.is_online())\n self.assertFalse(bwd.available)", "title": "" }, { "docid": "fca4054d88c09e39f05558c682ea21cb", "score": "0.5171115", "text": "def on_disconnect(): \n \n username = session.get('username')\n if username:\n # we have the username in the session, we can mark the user as offline\n user = User.query.filter_by(username=username).first()\n if user:\n user.online = False\n db.session.commit()", "title": "" }, { "docid": "d4556d22ec650295fa84caff57965db1", "score": "0.5167664", "text": "def update(self, *args, **kwargs):\n if not self._home.connected:\n _LOGGER.error(\n \"HMIP access point has lost connection with the cloud\")\n self._previous_connection_state = False\n self.set_all_to_unavailable()\n else:\n if not self._previous_connection_state:\n # only update the state when connection state has gone from\n # false to true\n job = self._hass.async_add_job(self.get_state())\n job.add_done_callback(self.get_state_finished)", "title": "" }, { "docid": "724cafa5f2362d627c10718f720fd7c0", "score": "0.5161866", "text": "def test_read_namespaced_offline_virtual_machine(self):\n pass", "title": "" }, { "docid": "19e6d85cb2afc93ebf24eb07987bb36e", "score": "0.51617026", "text": "def server_onestep(self):\n # This function is meant to be overriden in subclasses\n pass", "title": "" }, { "docid": "8fc1e31e59adad9076412e677e299639", "score": "0.5161306", "text": "def _on_link_down(self):\n self._client.stop()\n self._publisher.publish_event(\n event_type='PublicPlatformTelemetryEvent',\n origin=self._xs_name,\n status=TelemetryStatusType.UNAVAILABLE)", "title": "" }, { "docid": "ff6c9df40c5d0f21ad38d2d04c7072df", "score": "0.5152888", "text": "def offline(config, datadir, file_prefix):\n\n generate_report(\"offline\", config.out_directory, config.syncy, analytic_directory=datadir, engine_name=file_prefix)", "title": "" }, { "docid": "8cd845ddd4d67ef72f3ba05451990afb", "score": "0.51527506", "text": "def run(self):\n log.info(\"Initialize Offline Debugger Server for dbg_dir: %s\", self._context.dbg_dir)\n self._offline_server_manager.initialize()\n log.info(\"Start Offline Debugger Server for dbg_dir: %s\", self._context.dbg_dir)\n self._running.set()\n try_count = 0\n while self._running.is_set() and try_count < self._MAX_TRY_EXCEPT_COUNT:\n try:\n self._offline_server_manager.wait_for_termination()\n if not self._offline_server_manager.is_runnable():\n break\n except MindInsightException as err:\n log.exception(err)\n log.warning(\"Error happens during listening on user commands. Restart listening again.\")\n finally:\n try_count += 1\n # protect server from too much failure commands.\n if try_count == self._MAX_TRY_EXCEPT_COUNT:\n self._cache_store.clean()\n metadata = self._cache_store.get_stream_handler(Streams.METADATA).get()\n self._cache_store.put_data(metadata)\n log.warning(\"Exception exceed %d times, stop server.\", try_count)", "title": "" }, { "docid": "0c45babdca5dd7b2d4ac5b2e1b45fe98", "score": "0.5138398", "text": "def connectionLost(self, reason):\n self.connected = False", "title": "" }, { "docid": "6d4d6e8e091ce48a38b24669bf795a54", "score": "0.51339686", "text": "def acquire( self ) :", "title": "" }, { "docid": "596ff42fe0d451c75c6cdfa9e15d791b", "score": "0.5133395", "text": "def _fetch_status(self):\n _LOGGER.debug(\"_fetch_status\")\n # TODO maybe we should do this for other players in network...\n # self.hass.services.call(\n # 'ais_ai_service',\n # 'publish_command_to_frame', {\n # \"key\": 'getAudioStatus',\n # \"val\": True,\n # \"ip\": self._device_ip\n # }\n # )", "title": "" }, { "docid": "9ed1591514b16751458d6b4046c5282b", "score": "0.51273066", "text": "def activity(self, server_name):\n pass", "title": "" }, { "docid": "a122fdaff4e3563a6a207b9796e103ab", "score": "0.5126921", "text": "def __friendOffline(self, doId):\n friend = base.cr.identifyFriend(doId)\n if friend != None:\n self.setSystemMessage(0, OTPLocalizer.WhisperFriendLoggedOut % (friend.getName()))", "title": "" }, { "docid": "3c0ace0acbd5b05a220b36818a19ab97", "score": "0.5103667", "text": "def getOfflineServers(self):\n\n endpoint = self.ORGANIZATION_ID + \"/servers/filter?expand=applications,server_license,\" \\\n \"skip_links&includeArchived=false&offset=0&quickFilter=OFFLINE\" \\\n \"&sort=-lastActivity\"\n url = self.TEAMSERVER_URL + endpoint\n\n # Get response\n response = requests.get(url=url, headers=self.header, stream=True)\n jsonreader = json.loads(response.text)\n\n # Setup file to output results to\n filename = self.outputpath + \"/OfflineServers.csv\"\n filewriter = open(filename, 'w+')\n\n # Loop through each server and determine if it is offline\n if jsonreader[\"success\"] is True:\n todaydate = datetime.today()\n servernum = 1\n print(\"The following servers are offline as of %s:\" % todaydate)\n filewriter.write(\"The following servers are offline as of %s:\\n\" % todaydate)\n for server in jsonreader[\"servers\"]:\n if server['status'] == \"OFFLINE\": # If the status is offline, add it to our list\n linetowrite = (\"\\t%d,%s\" % (servernum, server['name']))\n filewriter.write(linetowrite + \"\\n\")\n print(linetowrite)\n servernum += 1\n filewriter.close()", "title": "" }, { "docid": "9d20369958e7ec977c947487d50474fe", "score": "0.5090169", "text": "def lostConnection(conection, reason):\n pass", "title": "" }, { "docid": "6fb00716bef18135f20b337f5c75a716", "score": "0.50853676", "text": "def on_heartbeat_timeout(self):\n pass", "title": "" }, { "docid": "adfbb22cd890452ba17eace395365a63", "score": "0.507698", "text": "def status(self):\n ...", "title": "" }, { "docid": "55d9d9181765837907e861e1a21fbbb1", "score": "0.50759214", "text": "def blocking(self, url):\r\n # TODO stub\r\n pass", "title": "" }, { "docid": "99e620c103ee563b80f33c2e7fdbd33e", "score": "0.50546855", "text": "def j_probe(bot, update):\n\n del update\n global gobetween_state\n\n state = probe()\n\n if state != gobetween_state:\n if probe():\n msg = 'Backend is online.'\n else:\n msg = 'Backend is offline!'\n\n gobetween_state = state\n\n for admin in admin_list:\n bot.sendMessage(chat_id=admin, text=msg)", "title": "" }, { "docid": "66e37ddec80ca0b79d58a76d154a41bc", "score": "0.5054352", "text": "def checkIfAvailable():", "title": "" }, { "docid": "3082c3a275e394f1866b8f6d6b9c4476", "score": "0.5053334", "text": "def update(self):\n unavailable_method()", "title": "" }, { "docid": "45cf4fd614b0902a5a70260037e2b089", "score": "0.50493926", "text": "def before_first_request():\n def find_offline_users(app):\n with app.app_context():\n while True:\n users = User.find_offline_users()\n for user in users:\n push_model(user)\n db.session.remove()\n time.sleep(5)\n\n if not current_app.config['TESTING']:\n thread = threading.Thread(target=find_offline_users,\n args=(current_app._get_current_object(),))\n thread.start()", "title": "" }, { "docid": "5bca5279cb85b65cd5f4fa628ce6f0f8", "score": "0.50478125", "text": "def step_offline(self):\n\n if self.first_step:\n action = None\n else:\n action = self.rng.choice(self.num_products)\n\n observation, reward, done, info = self.step(action)\n\n return action, observation, reward, done, info", "title": "" }, { "docid": "1e2c4c8985e072b89d239fe22acdaf6b", "score": "0.5034859", "text": "async def online(self, ctx):\r\n server = ctx.message.guild\r\n members = membersOnline = bots = botsOnline = 0\r\n for member in server.members:\r\n if member.bot:\r\n bots += 1\r\n if not member.status == discord.Status.offline:\r\n botsOnline += 1\r\n else:\r\n members += 1\r\n if not member.status == discord.Status.offline:\r\n membersOnline += 1\r\n await Message.Embed(\r\n title=\"Member Stats\",\r\n description=\"Current member information for {}\".format(server.name),\r\n fields=[\r\n { \"name\" : \"Members\", \"value\" : \"└─ {:,}/{:,} online ({:,g}%)\".format(membersOnline, members, round((membersOnline/members)*100, 2)), \"inline\" : False},\r\n { \"name\" : \"Bots\", \"value\" : \"└─ {:,}/{:,} online ({:,g}%)\".format(botsOnline, bots, round((botsOnline/bots)*100, 2)), \"inline\" : False},\r\n { \"name\" : \"Total\", \"value\" : \"└─ {:,}/{:,} online ({:,g}%)\".format(membersOnline + botsOnline, len(server.members), round(((membersOnline + botsOnline)/len(server.members))*100, 2)), \"inline\" : False}\r\n ],\r\n color=ctx.message.author).send(ctx)\r\n #msg = 'There are *{:,}* out of *{:,}* (*{:.2f}%*) users online.'.format(membersOnline, members, (membersOnline/members)*100)\r\n #await ctx.channel.send(msg)\r", "title": "" }, { "docid": "5e8c0850b01613500ecc7e974522dde3", "score": "0.5021265", "text": "def status(self):", "title": "" }, { "docid": "0e721b423e3e74f7af91865ac36024c2", "score": "0.5016283", "text": "def goodmorning(self,host):", "title": "" }, { "docid": "e17aed93ee7eb8b736545bc24f8daea7", "score": "0.50119495", "text": "def online(self):\n return self.last_interaction >= timezone.now() - timezone.timedelta(minutes=1)", "title": "" } ]
bd6f4ba7a07c5cb4218907446d63b252
Tone for incrementing the payout
[ { "docid": "832ec2d6bd1db7aec4c32e23a6680f76", "score": "0.7596992", "text": "def increment_tone(self):\n\n self.tone(100, 0.05)", "title": "" } ]
[ { "docid": "a90b7c476331ae9f007c10f6366ccbc0", "score": "0.6023244", "text": "def button_tone(self):\n\n self.tone(783, 0.05)\n self.tone(987, 0.05)\n self.tone(523, 0.10)\n self.tone(1760, 0.05)", "title": "" }, { "docid": "4dd951c490ef945ddd8f925e38adefd8", "score": "0.59211475", "text": "def get_payoff(self):\n self.pi_t = self.p * self.harvested - self.w * self.e_t", "title": "" }, { "docid": "9bd6e597a029798160d27e02b4633d2f", "score": "0.59120333", "text": "def payToPot(self, amt):\n self.pots[self.index] += amt", "title": "" }, { "docid": "41d1012095b993baeb4e3810e97e72cf", "score": "0.5806614", "text": "def postTime(self, amt):\n amtOfTime = (amt + 1) * 10\n Publisher().sendMessage(\"update\", amtOfTime)", "title": "" }, { "docid": "d8d325465e5fe82ed0a776df433d99a1", "score": "0.5711432", "text": "def finalPay(quote):\n return quote*12*(15)", "title": "" }, { "docid": "48fa446e0269ec7baee3c4074bc58cb1", "score": "0.5688507", "text": "def pay_gold(self, something):\n print(\"GOLD PAID\")", "title": "" }, { "docid": "b6049006522068581bf88a4f16a7cd96", "score": "0.56519324", "text": "def stomataout(self,what,amount): \n if what ==\"O2\":\n self.O2_out+=amount\n #TODO: dark phase ", "title": "" }, { "docid": "0cb690f3c3534049792376fb2e75d938", "score": "0.5538723", "text": "def _tally(self, chip, gpio, level, tick):\n if self._reset:\n self._reset = False\n self.count = 0\n self.count += 1", "title": "" }, { "docid": "ae1e21d3c56d476a99507189adb2c0ca", "score": "0.55334586", "text": "def increment(self):\n tx_hash = self.contract.functions.increment().transact()\n tx_receipt = Counter.web3.eth.waitForTransactionReceipt(tx_hash)", "title": "" }, { "docid": "57980d509870ad67165a6cf8f027d82d", "score": "0.550609", "text": "def tone(self, frequency, duration):\n\n if not self.sound_enabled:\n return\n\n if(frequency == 0):\n time.sleep(duration)\n return\n\n self.pwm.ChangeDutyCycle(0.50)\n self.pwm.ChangeFrequency(frequency)\n time.sleep(duration)\n self.pwm.ChangeDutyCycle(0.00)", "title": "" }, { "docid": "b02e97e604a21198c2c468cf1c95fee7", "score": "0.5501596", "text": "def update_timing(self, t):\n return t + 1", "title": "" }, { "docid": "952d3571d6cb7cf64f62e547559dbfdb", "score": "0.54447615", "text": "def BURP_Beep():\n\tdev = 1\n\tSP.playTone(210, 0.025, True, dev)", "title": "" }, { "docid": "c36496d78e8fd2d7897c25581231c5cb", "score": "0.54341507", "text": "def increment(self, increment):\r\n pass", "title": "" }, { "docid": "a26fa7f96fde1220a07748bb7856d664", "score": "0.5415235", "text": "def tick():\n progress.update(progress.value + 1)", "title": "" }, { "docid": "08075069ed9a71e76b634a3114285aa3", "score": "0.5413647", "text": "def consume(self):\n self.happiness += 7\n self.money -= 8", "title": "" }, { "docid": "1b9ef7894fd78558e761e8b37b67dd71", "score": "0.5392812", "text": "def response(self, action):\n return action + (self.current_temp - action) * math.exp(-1.0 / self.tau)", "title": "" }, { "docid": "01b7ffa45eb2b1b5907f8ad34ea9aaf6", "score": "0.536269", "text": "def lose_tone(self):\n\n self.tone(261, 0.10)\n self.tone(138, 0.20)", "title": "" }, { "docid": "89495430788dcbbfe919f054746e1a97", "score": "0.535943", "text": "def test_signal_changing():\n T = 5\n Fs = 44000\n N = T * Fs\n t = np.linspace(0, T, N)\n k = 0.2\n w = 5\n ke = -0.25\n signal = 1 * np.exp(ke*t) * np.sin(2 * np.pi * 220 * t)\n signal += 2 * (abs(np.sin(w*t))*k*t) * np.sin(2 * np.pi * 400 * t)\n signal += 0.5 * np.sin(2 * np.pi * 900 * t)\n return signal", "title": "" }, { "docid": "709cc36de64f24c5150ddeabb386bb3f", "score": "0.53461117", "text": "def ramp_up(self):\n self.interface.write(\"INCR\")", "title": "" }, { "docid": "d393b79a08569b878308091d6ff15bff", "score": "0.5345971", "text": "def k_sin_one(t):\n return 1", "title": "" }, { "docid": "cd03e3499df62bac847f86bcd023c467", "score": "0.53317374", "text": "def cal_now(fpga,katadc_n):\n #Addr 0h, bit 15.\n if not katadc_n in [0,1]: raise RuntimeError(\"katadc_n must be 0 or 1. Please select your ZDok port.\")\n spi_write_register(fpga,katadc_n,0x0,0xffff)\n time.sleep(1)\n spi_write_register(fpga,katadc_n,0x0,0x7fff)", "title": "" }, { "docid": "894a969c8fcdf76685cf8ec7b12a427a", "score": "0.5325618", "text": "def increment_number_served(self,number):\r\n self.number_served+=number", "title": "" }, { "docid": "d3a657dacb1a257378325b64dbb9c320", "score": "0.53178227", "text": "def increment(self, increment):\r\n self.send_increment(increment)\r\n self.recv_increment()", "title": "" }, { "docid": "8bd003e27320dba4709e4a762645ac34", "score": "0.530352", "text": "def strike(cls, state):\n state.strikes += 1\n if state.strikes > 2:\n state.strikes = 0\n state.outs += 1", "title": "" }, { "docid": "010d3ffcb83c03ca2418bcc803767d46", "score": "0.53007084", "text": "def buy():", "title": "" }, { "docid": "7126e43837e5ff39c731042eb9f915b9", "score": "0.5274802", "text": "def increment_round(self):\n self.round += 1", "title": "" }, { "docid": "29bc860f1cc17be8df89261faeefd727", "score": "0.5264163", "text": "def payoff_continue(self, p, J):\n return self.c + self.EJ(p, J)", "title": "" }, { "docid": "6a93c2fa3dbd7e880aa884b3116bfc90", "score": "0.5244958", "text": "def next_step(self):\n return self.now() + self.rate_millis", "title": "" }, { "docid": "03809d84d15e12d488452567106a9fe2", "score": "0.52410686", "text": "def store(self, amount): \n self.power = self.power - amount", "title": "" }, { "docid": "3cf38e8fbd8414fdd5ba7666eef8212c", "score": "0.5229262", "text": "def foo(self, t):\n return self.amplitude * np.sin(2 * np.pi * t / self.period)", "title": "" }, { "docid": "f7494ea9a367c3576ad576cc2a61255a", "score": "0.5220477", "text": "def increment_timestep(self):\n\t\tpass", "title": "" }, { "docid": "0784dfe39f08aa2ae264e02af5465e53", "score": "0.52095175", "text": "def payout(self):\n if not self.is_valid():\n return\n return self.value()*(36//len(self.winning_numbers())-1)", "title": "" }, { "docid": "f06ef178cbd0ecc506624bb3ef78eb28", "score": "0.51846486", "text": "def stomatain_do(self,what,amount):\n if what ==\"CO2\":\n self.CO2_out-=amount\n #TODO: dark phase ", "title": "" }, { "docid": "8eed65107c5e2c81ce56a4dbc3b4abec", "score": "0.51824963", "text": "def coin_event(channel):\n config.LASTIMPULSE = time.time()\n config.PULSES = config.PULSES + 1", "title": "" }, { "docid": "87ab64a6012e1ee9977fbd26d17298f9", "score": "0.5170431", "text": "def incr_timer(self) -> None:\n self.timer += 1", "title": "" }, { "docid": "ba27a3e2f7cfbfc5cfd6d7fb6fc4270a", "score": "0.51680434", "text": "def trigger_sweep(self):\n self.write(\"TS\")", "title": "" }, { "docid": "1205870183dfca59ccb626d7aa84d1d9", "score": "0.51639766", "text": "def emit_power(self, power, sender):\n pass", "title": "" }, { "docid": "8a340a5010715c4b24a77f8987a82508", "score": "0.51491934", "text": "def button1_short(self):\n rospy.logwarn('Pause button (2) pressed short')\n speak(\"2 short\", self.speak_publisher, speaking_active=self.speaking_active)\n if self.manual_penality_mode:\n # switch penalty state by calling service on motion\n\n try:\n response = self.manual_penalize_method(1) # penalize\n except rospy.ServiceException as exc:\n speak(\"Pause failed\", self.speak_publisher, speaking_active=self.speaking_active)\n print(\"Penalize service did not process request: \" + str(exc))", "title": "" }, { "docid": "fd74a5c5794f392e5cbf74ea8a4accf6", "score": "0.51308584", "text": "def add_and_increase():\n ...", "title": "" }, { "docid": "83d5e2ddaa64e7dd5b3467970d6b2ddc", "score": "0.5130457", "text": "def buyPrice():", "title": "" }, { "docid": "9a292b9339d385c35e33ed32b9f0ca06", "score": "0.5106998", "text": "def increment(self, frequency):\n self.count += frequency", "title": "" }, { "docid": "7128976d31e804036f54407c9afba4e1", "score": "0.5105806", "text": "def _propagate(self, sqty: SpectralQty) -> SpectralQty:\n return sqty * self._transmittance", "title": "" }, { "docid": "4f3d895f94815fdfb284e2db12664360", "score": "0.5105453", "text": "def inc_speed(self):\n self.speed += 1", "title": "" }, { "docid": "c341733fbd73b7e6470661d3214c82aa", "score": "0.5089075", "text": "def oscillate( self, up, down = False, count = -1 ):\n if not down: \n down = up\n \n cmd = \"run[ oscillate, {}, {}, {} ]\".format( up, down, count )\n return self.__execute( cmd )", "title": "" }, { "docid": "860018f6123b9c71dd20d0f94f6a7c2d", "score": "0.5077765", "text": "def discount(t, r):\n return (1+r)**(-t)", "title": "" }, { "docid": "05514df15ed408e5f4d1954c51cd8c29", "score": "0.5071732", "text": "def atm():", "title": "" }, { "docid": "fd51d5e3bec58c91806f32f3cc8fce4d", "score": "0.5069372", "text": "def update_amount(self):\n self.pantry['current_amount']-=self.pantry['rate']", "title": "" }, { "docid": "5f5de9b68c86f252ff532d04b7283b68", "score": "0.50641686", "text": "def increment(self, value=1, index=-1):\n\n self.t_index[index] += value\n self.check_for_indexerror()", "title": "" }, { "docid": "f86c4372053e83b0e39334d8a23a7ded", "score": "0.5061834", "text": "def payout(self, slot, amt):\n slot.payToPot(self.bank.withdraw(floor(amt)))", "title": "" }, { "docid": "f25d3b590d2248dfa3c2fc48f200e855", "score": "0.5045877", "text": "def advance(self, ts):", "title": "" }, { "docid": "09e8f030bd8c8dfe4cb8d8fbb417d809", "score": "0.5044305", "text": "def BURP_Bebeep2():\n\tdev = 1\n\tSP.playTone(210, 0.1, True, dev)\n\tsleep(0.05)\n\tSP.playTone(420, 0.025, True, dev)", "title": "" }, { "docid": "3b976ddd6d9ad1809a64c9a996727e80", "score": "0.5042", "text": "def increment_number_served(self):\n self.number_served += 10", "title": "" }, { "docid": "8cb5f5bc50eac0afc1c2c61380f31e51", "score": "0.503981", "text": "def appreciate(self, amount):\n self.price += amount\n return self.price", "title": "" }, { "docid": "56d453e2525c27eeef1e6ffb43e0ade3", "score": "0.50356305", "text": "def increase_withdrawal_number(self):\r\n self._monthly_withdrawal_number += 1", "title": "" }, { "docid": "cff266e21dba2216d067bda843d37872", "score": "0.5027392", "text": "def apply_raise(self):\n self.payment = self.payment * self.increment", "title": "" }, { "docid": "5f87c1f4a0d10b5d3657940b029340cc", "score": "0.50252026", "text": "def add_to_wallet(self, amount): \n self.wallet += amount", "title": "" }, { "docid": "7c1c4ec4dcb582cc83a9b75d0bb7385a", "score": "0.50251824", "text": "def set_payoff(self):\n self.payoff = 0", "title": "" }, { "docid": "7c1c4ec4dcb582cc83a9b75d0bb7385a", "score": "0.50251824", "text": "def set_payoff(self):\n self.payoff = 0", "title": "" }, { "docid": "39225a005b5bb91fbc5a01af68bbf7db", "score": "0.50230485", "text": "def increment(self, amount = 1):\n self._value += amount", "title": "" }, { "docid": "599da8aeff70ac1a6bdb5651190c5a9c", "score": "0.50156796", "text": "def out(cls, state):\n state.outs += 1\n state.strikes = 0", "title": "" }, { "docid": "f187736f41f58ba1a318c9236db9d174", "score": "0.5011106", "text": "def tx_wave(handle, gpio, pulses):\n if len(pulses):\n PULSES = bytearray()\n for p in pulses:\n PULSES.extend(struct.pack(\n \"QQQ\", p.group_bits, p.group_mask, p.pulse_delay))\n return _u2i(_lgpio._tx_wave(handle&0xffff, gpio, PULSES)\n)\n else:\n return 0", "title": "" }, { "docid": "2e9f3ac303aa17feaf00a021df7eef69", "score": "0.50006837", "text": "def advance(self):\n self.day += 1", "title": "" }, { "docid": "682c15213cc437ada6de72ec9f80b255", "score": "0.49910685", "text": "def get_next_signal(self) -> str:\n return \"5\"", "title": "" }, { "docid": "6f1bc6cdd513dabe9b76749b6d765972", "score": "0.49842268", "text": "def presweep(self, i):\n pass", "title": "" }, { "docid": "37f897b131265e08dd98855f84172f73", "score": "0.49636194", "text": "def increment(self, hours, mins):", "title": "" }, { "docid": "9966b02bead4dba894000c4e0922f494", "score": "0.49458638", "text": "def update_from_transaction(self, qty):\n\n if self.onhand + self.received > Decimal('0'):\n # to deal with Django bug, fixed in 1.1\n onhand = Decimal(self.onhand)\n onhand += qty\n self.onhand = max([Decimal(\"0\"), onhand])\n self.save()\n else:\n # to deal with Django bug, fixed in 1.1\n remaining = Decimal(self.remaining)\n #print self, \"remaining:\", remaining, \"qty:\", qty\n remaining += qty\n self.remaining = max([Decimal(\"0\"), remaining])\n self.save()", "title": "" }, { "docid": "0fcaa802f5e3c157bf3a5a64fc135be1", "score": "0.49404418", "text": "def BURP_Bebeep():\n\tdev = 1\n\tSP.playTone(420, 0.025, True, dev)\n\tsleep(0.05)\n\tSP.playTone(210, 0.1, True, dev)", "title": "" }, { "docid": "557deef064d4dd90007255b4f0809fde", "score": "0.49362698", "text": "def Isyn(self,postsyn,t_diff):\n\n t[np.nonzero(t < 0)] = 0\n #in order for a good modicum of current to even be applied, t must be negative!\n return t*np.exp(-t/self.tau_psc)", "title": "" }, { "docid": "4c5d0a82aa8e8274252ecd92d4ec774b", "score": "0.49353662", "text": "def increment(w, l):\n\tglobal options\n\toptions[3] += w\n\toptions[4] += l\n\topen('wl', 'w').write(str(options[3]) + ' ' + str(options[4]))", "title": "" }, { "docid": "7caa201dfc36b977d7e597e8fd38f4ae", "score": "0.4927036", "text": "def Step(self, time):\n\n if not self.disable:\n \n self._pulsegen.SingleStep(time)", "title": "" }, { "docid": "e67157d3e7225264876274bdc39e54af", "score": "0.49262708", "text": "def amplitudeResponse(N,A0,k,m):\n sinc = 0\n v = (k - m)\n x = pi * v\n if v == 0:\n sinc = 1.0\n else:\n sinc = ( sin(x) / (x))\n \n return ((A0 * N)/2) * sinc", "title": "" }, { "docid": "4f372b68a73b419be33dc45b2152ce66", "score": "0.49177822", "text": "def playTone(self, freq, stdout=True):\n self.ser.write(str.encode(str(freq) + self.delimiter))\n if stdout:\n print('Tone played at {}Hz'.format(freq))", "title": "" }, { "docid": "a9297a46296652af97a12d04d3c2b848", "score": "0.49173188", "text": "def _oscillating(self, t, h, T0, f):\n if t <= T0:\n return 1.0 * h * ( (T0 - t) / (T0 - 1) ) * math.sin(math.pi * t / f)\n else:\n return 0", "title": "" }, { "docid": "0bcb664731cbe150fe46d6b1cc706893", "score": "0.49161512", "text": "def incrementPC(self):\r\n self._pc += 1\r\n self._pc &= 0o7777", "title": "" }, { "docid": "2ea3eba1af8da3723763f4f32a53a8a7", "score": "0.49137908", "text": "def calculateOngReceived(_tokenToSell):", "title": "" }, { "docid": "0f77b6e92d60992074dbf2bc8ce46a0b", "score": "0.48952472", "text": "def coinapult_payment( order ):\n RoutingTable['mtgox']['sockt'].send_json( order )", "title": "" }, { "docid": "067ac1bbad2ec4c2a0ed74061e6e3bbf", "score": "0.48942292", "text": "def incrementPhase(self):\n\t\tself.phase += self.phaseIncrement\n\t\twhile (self.phase >= math.pi * 2.0):\n\t\t\tself.numCycles += 1\n\t\t\tself.phase -= math.pi * 2.0", "title": "" }, { "docid": "21a29d9ea5096fb07bb3235c70a25ae1", "score": "0.48914427", "text": "def pitch(self) -> int:\r\n ...", "title": "" }, { "docid": "d26491e6e63ab993b28b3bb0f0575278", "score": "0.48886764", "text": "def incrementing_swarms_ended(self):\n pass", "title": "" }, { "docid": "d13087aa74358b4cf8aa101d754ba446", "score": "0.48853114", "text": "def increment_counter(self, time):\n\t\tself.turn_count += time", "title": "" }, { "docid": "b12ffb176b5904c333915f216d211698", "score": "0.48811153", "text": "def powercycle(self, alias: str) -> str:\n self.turn_off(alias)\n time.sleep(5)\n self.turn_on(alias)\n return 'Done'", "title": "" }, { "docid": "2050a3d8e94730a910ef39b294ba8e4c", "score": "0.48796648", "text": "def play_rate(self) -> float:\r\n ...", "title": "" }, { "docid": "4be4f4d542f59a4ae8148db903e5c4d8", "score": "0.48770738", "text": "def increase(self):\n return self.change(self.INC)", "title": "" }, { "docid": "38051c68df9d70b07929041d3ebe9dd5", "score": "0.4869558", "text": "def duty_cycle(self, value):\n pass", "title": "" }, { "docid": "599bc61b601938b8c91b7f8a99e55aea", "score": "0.48672646", "text": "def increment(self):\n self.counter += 1", "title": "" }, { "docid": "63dc40c2c99f3f376978df996ccd14f1", "score": "0.48588616", "text": "def rate( self, s, x ):", "title": "" }, { "docid": "0fa1c7ed4ae21d168d9807222a493ff6", "score": "0.48587584", "text": "def advance_animation(self, dt):\n # decay process - generating the TAU's after 1.5 sec \n \n if self.n==1 :\n self.t+=dt\n if self.t>=1.5:\n T1=self.particles[0]\n T2=self.particles[0]\n T1.r+=T1.radius\n T2.r-=T2.radius\n T1.radius=2/3*T1.radius\n T2.radius=2/3*T2.radius\n T1.v=np.array([0.01,0.01])\n T2.v=np.array([-0.01,-0.01])\n H0=self.particles[0]\n radii = np.array([H0.radius,T1.radius])\n P0=np.array([H0.r[0],H0.r[1],H0.v[0],H0.v[1]])\n P1=np.array([T1.r[0],T1.r[1],0.05,0.05])\n P2=np.array([T2.r[0],T2.r[1],-0.05,-0.05])\n #P1=np.array([0.02,0.02,0.1,0.1])\n #P2=np.array([0.02,0.98,0.1,-0.1])\n self.particles=[]\n p0 = Particle(x=P0[0], y=P0[1], vx=0.2*P0[2], vy=0*P0[3], radius=0*radii[0],styles = {'edgecolor': 'tab:orange', 'fill':True,'color':'tab:orange'})\n p1 = Particle(x=P1[0], y=P1[1], vx=P1[2], vy=P1[3], \n radius=radii[1],styles = {'edgecolor': 'y', 'fill':True,'color':'y'},Name='T')\n p2 = Particle(x=P2[0], y=P2[1], vx=P2[2], vy=P2[3], \n radius=radii[1],styles = {'edgecolor': 'y', 'fill':True,'color':'y'},Name='T-')\n self.particles.append(p0)\n self.particles.append(p1)\n self.particles.append(p2)\n self.n=len(self.particles) \n self.t=0\n for i, p in enumerate(self.particles):\n p.advance(dt)\n self.circles[i].center = p.r\n if self.n==2: \n self.handle_collisions() \n if self.n>=3 :\n self.t+=dt\n \n\n return self.circles", "title": "" }, { "docid": "1a9607946e0a24710258b81a2b035e6f", "score": "0.48572963", "text": "def tick(self):\n if self.backoff == 0:\n self.backoff = random.randrange(self.cw_size + 1)\n else:\n self.backoff -= 1\n\n if self.backoff == 0:\n self.medium.send(self)", "title": "" }, { "docid": "e3bee206d09f9b2eb2b0edd1ac62ac0c", "score": "0.48544738", "text": "def update_total(amount):\n global total\n sleep(0.05)\n total += amount\n print(total)", "title": "" }, { "docid": "6d29976e3e0dcb44966352ca931ba4c9", "score": "0.48484367", "text": "def increment_number_served(self,number_served):\n self.number_served += number_served", "title": "" }, { "docid": "85130a0a7dc68bca9f26fa26a61a0539", "score": "0.48411494", "text": "def ssgenTxOut1():\n # fmt: off\n return msgtx.TxOut(\n value=0x00000000, # 0\n version=0x0000,\n pkScript=ByteArray(\n [0x6A, 0x02, 0x94, 0x8C] # OP_RETURN # 2 bytes to be pushed # Vote bits\n ),\n )\n # fmt: on", "title": "" }, { "docid": "2f59c863edb1f63df69eec8557ee55da", "score": "0.4837749", "text": "def calculate(self, value: int) -> int:\n return value + 1", "title": "" }, { "docid": "47e4d0cbcb6e027f154a33d964fd0780", "score": "0.4837347", "text": "def tick(self):", "title": "" }, { "docid": "c387f58580a8eff33f258c1e48a1da20", "score": "0.48343065", "text": "def next_wave(self):\n if self._wave == self._level.get_max_wave():\n return\n\n self._wave += 1\n\n # Task 1.3 (Status Bar): Update the current wave display here\n # ...\n self._statusbar.set_wave(self._wave)\n\n # Task 1.5 (Play Controls): Disable the add wave button here (if this is the last wave)\n if self._wave == 20:\n self.btn_next_wave.config(state=\"disabled\")\n\n # Generate wave and enqueue\n wave = self._level.get_wave(self._wave)\n for step, enemy in wave:\n enemy.set_cell_size(self._game.grid.cell_size)\n\n self._game.queue_wave(wave)\n self.wave_sound()", "title": "" }, { "docid": "97c5f73cb3406a77e0fe7f19637e7ae5", "score": "0.4825315", "text": "def step(self, n):\r\n\r\n self.current += n", "title": "" }, { "docid": "aa511f504436ae3eee3cdcf1461834ba", "score": "0.48226738", "text": "def withdraw(self, money_withdraw):\n self.money_withdraw = money_withdraw\n self.money_pocket += int(money_withdraw)\n self.money_bank -= int(money_withdraw)\n print(f\"\"\"You have withdrawn {self.money_withdraw} dollars. You have {self.money_bank} dollars in your bank account. You are carrying {self.money_pocket} dollars.\n--------------------------------------------------------------------------------------------------\n \"\"\")", "title": "" }, { "docid": "c5b53250e89bd0584600708ac2ef1e0f", "score": "0.48212337", "text": "def button1_long(self):\n rospy.logwarn('Unpause button (1) pressed long')\n speak(\"1 long\", self.speak_publisher, speaking_active=self.speaking_active)\n try:\n response = self.foot_zero_method()\n except rospy.ServiceException as exc:\n speak(\"Foot zeroing failed\", self.speak_publisher, speaking_active=self.speaking_active)\n print(\"foot zeroing service did not process request: \" + str(exc))", "title": "" }, { "docid": "e44894574e3ebe10f1dd771d97dbc5e5", "score": "0.4818574", "text": "async def donate(self, ctx:utils.Context):\n\n await ctx.send(f\"<{self.bot.config['command_data']['patreon']}>\")", "title": "" }, { "docid": "086383efa4c45f9e92d67b290a4739ae", "score": "0.48174766", "text": "def power(self, tsr, power, out):\n raise NotImplementedError()", "title": "" }, { "docid": "5e5f72aa154e67bc19dbeb74b2126f80", "score": "0.4815394", "text": "def feed(duration):\n GPIO.setup(37, GPIO.OUT)\n time.sleep(float(duration))\n GPIO.setup(37, GPIO.IN)", "title": "" } ]
855a029d5c8a787f021ccb154df032d1
Add change message for a change to the user that is in form of a UserChangeMessage
[ { "docid": "95552ef26efe004ccd40b0c03dad34ac", "score": "0.64958537", "text": "def add_info(self, change_message):\n self._update_change_messages(change_message)\n self._save = True", "title": "" } ]
[ { "docid": "51a119222f2668a85ef16088ec556e39", "score": "0.75375736", "text": "def add_change_message(self, message):\n if self.is_new_user:\n return\n self._update_change_messages(message)\n self._save = True", "title": "" }, { "docid": "ccc720e96f448591a52dccfce2d54e33", "score": "0.7008829", "text": "def add_changeling(self, user: User) -> None:\n self.connection_manager.push_list(self, 'changelings', user.id_)", "title": "" }, { "docid": "948ba82e9abbab2d0e03567f73d35de2", "score": "0.64509076", "text": "def add_changes(self, changes):\n if self.is_new_user:\n return\n for name, new_value in changes.items():\n if self.original_user_doc[name] != new_value:\n self.fields_changed[name] = new_value\n self._save = True", "title": "" }, { "docid": "7f1eaa86be5157ae9ff88e70ce073bde", "score": "0.63568366", "text": "def message_user(self, request, message):\r\n messages.info(request, message)", "title": "" }, { "docid": "a3501ba2de21d6944db4cddb3545b071", "score": "0.63538843", "text": "def modify_message(service, user_id, message_id, modifications):\n return service.users().messages().modify(userId=user_id, id=message_id, body=modifications).execute()", "title": "" }, { "docid": "68d751aa9db8f95fd84c7f9e927b0e73", "score": "0.62835723", "text": "def modify_user():\r\n pass", "title": "" }, { "docid": "23eb83d5233771e083b4b97741f020ca", "score": "0.62590384", "text": "def on_user_update(self):\n msg = self.userLogger.log()\n if msg == \"USERS UPDATES\\n\":\n self.logger.log(\"NO NEW USERS DETECTED\")\n return\n # Shows the warning msg on the console\n self.logger.log(msg, logtype=\"warning\")\n\n # Send message notification to available platforms\n self.send_notif(msg)\n return", "title": "" }, { "docid": "9e4e8932500edc2fdbf07b89e7a999fb", "score": "0.6251951", "text": "def add_usr_message(self, msg):\n\t\t_cfdp.cfdp_add_usr_msg(self._param_addr, msg)", "title": "" }, { "docid": "d1c3a6556122860b7a21e7f5cfd3c932", "score": "0.6127798", "text": "def update_user(self, userkey, warn_msg, end_msg):\n self.warning_area['text'] = ''\n usermail = self.email.get().strip(' \\t\\n')\n usersign = self.user_signature.get('1.0', 'end-1c').strip()\n\n # check for empty fields\n if not (userkey and usermail and usersign):\n self.warning_area['text'] = labels['empty_fields']\n if not userkey:\n self._prev = None\n return\n\n # if consecutive click and name is not changed again\n if userkey == self._prev:\n users.force_update(User, userkey, usermail, usersign)\n self._prev = None\n # if any other click\n else:\n try:\n users.update_item(User, userkey, usermail, usersign)\n self._prev = None\n except AssertionError:\n # show warning\n self.warning_area['text'] = warn_msg\n # store the suggested name\n self._prev = userkey\n return\n\n # inform that evth is done\n self.warning_area['text'] = end_msg", "title": "" }, { "docid": "eb2acb9adf0ca59a0884945480355e4d", "score": "0.6074857", "text": "def update(self, new_message):\n\t\tpass", "title": "" }, { "docid": "fa93834b300b771944546d91a96e7af8", "score": "0.6065194", "text": "def _GetMembershipChange(self, event_data, event, users):\n if event['type'] == 'JOIN':\n user_name = str(users.get(\n event['participant_id'][0]['gaia_id']\n ))\n event_data.user_added = user_name \\\n + str(users.get(\n event['participant_id'][0]['gaia_id']\n ))\n elif event['type'] == 'LEAVE':\n event_data.user_removed = \\\n str(users.get(\n event['participant_id'][0]['gaia_id']\n ))", "title": "" }, { "docid": "e47186717facc1f2cf8c87e29b6d2db4", "score": "0.60113996", "text": "def update_chat(self, username, message):\n chat_message = str(username) + \": \" + str(message)\n self.chat_messages += chat_message + \"\\n\"", "title": "" }, { "docid": "870697e09a6053951ac7bf6c972c836b", "score": "0.59587795", "text": "def add_user_to_msg(bot, trigger):\n if trigger.group(2):\n msg_list = get_msg_list(bot, trigger.sender)\n msg_list.append(trigger.group(2))\n bot.db.set_channel_value(trigger.sender, 'msg_list', msg_list)\n if len(msg_list) > 0:\n bot.say(trigger.group(2) + ' has been added to the .msg list.')\n else:\n bot.say('The .msg list has been initialized, and ' + trigger.group(2) + ' has been added.')\n else:\n bot.say('Don\\'t forget to give me the nick to add to the .msg list.')", "title": "" }, { "docid": "728ad29e2df19507dfa47310ee388c20", "score": "0.594991", "text": "def notify_user(user_name, user_email):\n subject = 'Planit password changed!'\n body = 'Hi %s!' % user_name\n body += '\\n\\n'\n body += 'Your account password was recently changed. '\n body += 'Just wanted to let you know!'\n msg = Message(subject=subject,\n body=body,\n recipients=[user_email])\n mail.send(msg)\n return msg.as_string()", "title": "" }, { "docid": "037d4782a8e7a3cec2d9a35458a487ef", "score": "0.59418434", "text": "async def update_me(message):\n # get information about the user\n uid = message['from']['id']\n alias = message['from']['username']\n name = message['from']['first_name']\n surname = message['from']['last_name']\n\n user = await queries.get_user_by_uid(uid)\n if not user:\n return 'Send the request first.'\n\n # update alias, name, surname of user\n await queries.update_alias(alias, uid)\n await queries.update_name(name, uid)\n await queries.update_surname(surname, uid)\n message_change_name = '@' + alias + ', your account was successfully updated.\\n\\n' + (\n await auxiliary.user_data_to_string(uid))\n return message_change_name", "title": "" }, { "docid": "f817792b4c4e1051f15ddfea26db5de4", "score": "0.59107435", "text": "def onChannelMessage(self, chanid, user, message):", "title": "" }, { "docid": "d23b9b47038f681ccf5f7a83f62f0be9", "score": "0.5904716", "text": "def log_change(self, request, object, message):\r\n if not self.log:\r\n return\r\n from django.contrib.admin.models import LogEntry, CHANGE\r\n LogEntry.objects.log_action(\r\n user_id = request.user.pk,\r\n content_type_id = None,\r\n object_id = object.pk,\r\n object_repr = force_unicode(object),\r\n action_flag = CHANGE,\r\n change_message = message\r\n )", "title": "" }, { "docid": "e1e129fcf23301639cfc6c5f418cfea0", "score": "0.58475876", "text": "def log_change(self, request, object, message):\n if not is_django_user_model(request.user):\n return\n\n super(DocumentAdmin, self).log_change(\n request=request, object=object, message=message)", "title": "" }, { "docid": "2a56ed51aa085da85f35fab7c97919db", "score": "0.5839527", "text": "def on_update_message(self, message):\n self.status['message'] = message", "title": "" }, { "docid": "152d2b14738a9cae71e8484cd4cccbb6", "score": "0.5783226", "text": "def notify_user(self, message: str) -> None:\r\n print(message)", "title": "" }, { "docid": "01b0b1efacdfbb18484fdb34ee996d33", "score": "0.5777053", "text": "def on_user_left(self, user, channel, message):\n pass", "title": "" }, { "docid": "817b5f34d73c1f52b9ed63cfd285031a", "score": "0.5774594", "text": "def modify_user(self, name, description, password):", "title": "" }, { "docid": "33f918c00cb0493471637107a39fd96b", "score": "0.5759197", "text": "def email_change(request):\n if request.method == 'POST':\n form = EmailChangeForm(request.POST, instance=request.user)\n if form.is_valid():\n form.save()\n messages.success(request, message=\"Twój adres e-mail został zmieniony.\")\n return redirect('my-profile')\n else:\n form = EmailChangeForm(instance=request.user)\n return render(request, 'users/form.html', {'form': form})", "title": "" }, { "docid": "64e547254ec67dec44e7df1e785b344a", "score": "0.5735031", "text": "def WriteUserNotification(self, notification):", "title": "" }, { "docid": "28c84d5249681660236b540cc9ad75c0", "score": "0.5734089", "text": "def response_change(self, request, obj):\r\n opts = obj._admin_opts\r\n\r\n verbose_name = opts.verbose_name\r\n # Handle proxy models automatically created by .only() or .defer()\r\n #if obj._deferred:\r\n # opts_ = opts.proxy_for_model._meta\r\n # verbose_name = opts_.verbose_name\r\n\r\n pk_value = obj.pk.__str__()\r\n\r\n msg = _('The %(name)s \"%(obj)s\" was changed successfully.') % {'name': force_unicode(verbose_name), 'obj': force_unicode(obj)}\r\n if \"_continue\" in request.POST:\r\n self.message_user(request, msg + ' ' + _(\"You may edit it again below.\"))\r\n if \"_popup\" in request.REQUEST:\r\n return HttpResponseRedirect(request.path + \"?_popup=1\")\r\n else:\r\n return HttpResponseRedirect(request.path)\r\n elif \"_saveasnew\" in request.POST:\r\n msg = _('The %(name)s \"%(obj)s\" was added successfully. You may edit it again below.') % {'name': force_unicode(verbose_name), 'obj': obj}\r\n self.message_user(request, msg)\r\n return HttpResponseRedirect(\"../%s/\" % pk_value)\r\n elif \"_addanother\" in request.POST:\r\n self.message_user(request, msg + ' ' + (_(\"You may add another %s below.\") % force_unicode(verbose_name)))\r\n return HttpResponseRedirect(\"../add/\")\r\n else:\r\n self.message_user(request, msg)\r\n # Figure out where to redirect. If the user has change permission,\r\n # redirect to the change-list page for this object. Otherwise,\r\n # redirect to the admin index.\r\n if self.has_change_permission(request, None):\r\n return HttpResponseRedirect('../')\r\n else:\r\n return HttpResponseRedirect('../../../')", "title": "" }, { "docid": "f4cf886ab540306cce41f4e676b87c89", "score": "0.57250476", "text": "def change_message(self, last_version, version):\n return (u':gae: App Engine default version changed: `%s` \\u2192 `%s`'\n % (last_version, version))", "title": "" }, { "docid": "4586b4cfac7ccad47a88a001d4ebc271", "score": "0.5708268", "text": "def send_identity_changed(self, user_id):\n if user_id is None:\n identity = AnonymousIdentity()\n else:\n identity = Identity(user_id)\n\n identity_changed.send(\n current_app._get_current_object(), identity=identity)", "title": "" }, { "docid": "937fcab2596f82efe23a1bae8806a487", "score": "0.57040036", "text": "def add_msg(self, ctime, username, msg):\n self.end_time = ctime\n if username not in self.users:\n return\n\n logging.info(\"%s: %s - %s/%s: %s\" % \\\n (self.name, ctime, username, self.users[username].uid, msg))\n self.messages += 1\n self.users[username].add_msg(ctime, msg)", "title": "" }, { "docid": "0fbe1aaee7f77c9c1af0c2aebbcf232c", "score": "0.5673915", "text": "def action(self, user, channel, msg):\n user = user.split('!', 1)[0]\n \n self.AtrusData.log({\n \"kind\": \"chan_msg\", \n \"who\": user, \n \"msg\": \"/me %s\" % msg\n })", "title": "" }, { "docid": "930744973aa6a579cc3ae234f42e814d", "score": "0.5665794", "text": "def on_nick(self, old, new, uid):\n if uid == self._client_id:\n self.nickname = new\n self.console_write(pinylib.COLOR['bright_cyan'],\n 'Received confirmation of setting client nickname. Set to: %s' % self.nickname)\n old_info = self.users.search(old)\n old_info.nick = new\n if not self.users.change(old, new, old_info):\n log.error('failed to change nick for user: %s' % new)\n if self.check_nick(old, old_info):\n if pinylib.CONFIG.B_FORGIVE_AUTO_BANS:\n self.send_forgive_msg(uid)\n elif uid != self._client_id:\n # TODO: Send as undercover message as well.\n if self.is_client_mod and pinylib.CONFIG.B_GREET:\n if old_info.account:\n if len(pinylib.CONFIG.B_GREET_MESSAGE) is 0:\n if not pinylib.CONFIG.B_GREET_UNDERCOVER:\n self.send_bot_msg(unicode_catalog.NOTIFICATION + '*Welcome* %s:%s:%s' %\n (new, uid, old_info.account))\n else:\n self.send_undercover_msg(new, '*Welcome %s:%s:%s' % (new, uid, old_info.account))\n else:\n # TODO: Add in replace content method here to allow the custom message\n # {user} and {roomname} information to be placed into the string.\n if not pinylib.CONFIG.B_GREET_UNDERCOVER:\n self.send_bot_msg(unicode_catalog.NOTIFICATION + pinylib.CONFIG.B_GREET_MESSAGE)\n else:\n self.send_undercover_msg(new, pinylib.CONFIG.B_GREET_MESSAGE)\n else:\n if not pinylib.CONFIG.B_GREET_UNDERCOVER:\n self.send_bot_msg('*Welcome* %s:%s' % (new, uid))\n else:\n self.send_undercover_msg(new, '*Welcome* %s:%s' % (new, uid))\n\n if self.media.has_active_track():\n if not self.media.is_mod_playing:\n self.send_media_broadcast_start(self.media.track().type,\n self.media.track().id,\n time_point=self.media.elapsed_track_time(),\n private_nick=new)\n\n # TODO: Additional functions.\n # TODO: Add the AUTO PM function.\n if pinylib.CONFIG.B_GREET_PRIVATE:\n self.send_auto_pm(new)\n if self._logo_publishing:\n self._logo_to_user = True\n\n self.console_write(pinylib.COLOR['bright_cyan'], '%s:%s changed nick to: %s' % (old, uid, new))", "title": "" }, { "docid": "ecb23d78561058960f1c55f2037c68b1", "score": "0.5638406", "text": "def show_user_message(self, message):\n # In the constructor, we setup this throwaway widget\n # so we could listen for changes in it's text field\n # and display those via an alert. It's a workaround\n # so that here we can send messages to the user from\n # the bokeh server-side python.\n if self._message_holder.text == message:\n # need to trigger a change...\n self._message_holder.text = f\"{message} \"\n else:\n self._message_holder.text = message", "title": "" }, { "docid": "288acb77919ac6d45fd12fefe695f45f", "score": "0.5635368", "text": "def message_user(user, message, level=constants.INFO):\n if user.id is None:\n raise ValueError('Anonymous users cannot send messages')\n\n user_key = _user_key(user)\n messages = cache.get(user_key) or []\n messages.append((message, level))\n cache.set(user_key, messages)", "title": "" }, { "docid": "e3138639956f4dee48629555333e570c", "score": "0.5631499", "text": "def edit_info( self, trans, cntrller, **kwd ):\n params = util.Params( kwd )\n is_admin = cntrller == 'admin' and trans.user_is_admin()\n message = util.restore_text( params.get( 'message', '' ) )\n status = params.get( 'status', 'done' )\n user_id = params.get( 'user_id', None )\n if user_id and is_admin:\n user = trans.sa_session.query( trans.app.model.User ).get( trans.security.decode_id( user_id ) )\n elif user_id and ( not trans.user or trans.user.id != trans.security.decode_id( user_id ) ):\n message = 'Invalid user id'\n status = 'error'\n user = None\n else:\n user = trans.user\n if user and params.get( 'login_info_button', False ):\n # Editing email and username\n email = util.restore_text( params.get( 'email', '' ) )\n username = util.restore_text( params.get( 'username', '' ) ).lower()\n\n # Validate the new values for email and username\n message = validate_email( trans, email, user )\n if not message and username:\n message = validate_publicname( trans, username, user )\n if message:\n status = 'error'\n else:\n if ( user.email != email ):\n # The user's private role name must match the user's login ( email )\n private_role = trans.app.security_agent.get_private_user_role( user )\n private_role.name = email\n private_role.description = 'Private role for ' + email\n # Change the email itself\n user.email = email\n trans.sa_session.add_all( ( user, private_role ) )\n trans.sa_session.flush()\n if trans.webapp.name == 'galaxy' and trans.app.config.user_activation_on:\n user.active = False\n trans.sa_session.add( user )\n trans.sa_session.flush()\n is_activation_sent = self.send_verification_email( trans, user.email, user.username )\n if is_activation_sent:\n message = 'The login information has been updated with the changes.<br>Verification email has been sent to your new email address. Please verify it by clicking the activation link in the email.<br>Please check your spam/trash folder in case you cannot find the message.'\n else:\n message = 'Unable to send activation email, please contact your local Galaxy administrator.'\n if trans.app.config.error_email_to is not None:\n message += ' Contact: %s' % trans.app.config.error_email_to\n if ( user.username != username ):\n user.username = username\n trans.sa_session.add( user )\n trans.sa_session.flush()\n message = 'The login information has been updated with the changes.'\n elif user and params.get( 'edit_user_info_button', False ):\n # Edit user information - webapp MUST BE 'galaxy'\n user_type_fd_id = params.get( 'user_type_fd_id', 'none' )\n if user_type_fd_id not in [ 'none' ]:\n user_type_form_definition = trans.sa_session.query( trans.app.model.FormDefinition ).get( trans.security.decode_id( user_type_fd_id ) )\n elif user.values:\n user_type_form_definition = user.values.form_definition\n else:\n # User was created before any of the user_info forms were created\n user_type_form_definition = None\n if user_type_form_definition:\n values = self.get_form_values( trans, user, user_type_form_definition, **kwd )\n else:\n values = {}\n flush_needed = False\n if user.values:\n # Editing the user info of an existing user with existing user info\n user.values.content = values\n trans.sa_session.add( user.values )\n flush_needed = True\n elif values:\n form_values = trans.model.FormValues( user_type_form_definition, values )\n trans.sa_session.add( form_values )\n user.values = form_values\n flush_needed = True\n if flush_needed:\n trans.sa_session.add( user )\n trans.sa_session.flush()\n message = \"The user information has been updated with the changes.\"\n if user and trans.webapp.name == 'galaxy' and is_admin:\n kwd[ 'user_id' ] = trans.security.encode_id( user.id )\n kwd[ 'id' ] = user_id\n if message:\n kwd[ 'message' ] = util.sanitize_text( message )\n if status:\n kwd[ 'status' ] = status\n return trans.response.send_redirect( web.url_for( controller='user',\n action='manage_user_info',\n cntrller=cntrller,\n **kwd ) )", "title": "" }, { "docid": "bb6108fddf9def5ca5792c517bddf7e8", "score": "0.55942935", "text": "def _user_changed_status(self, presence):\n logger.info('User \"%s\" changed status to %s', presence['from'], presence.get_type())", "title": "" }, { "docid": "7f8706f0d65377b621c868e4033fa978", "score": "0.5572628", "text": "def handle(cls, user, user_msg):", "title": "" }, { "docid": "155371a440cdf90e1e13ad8c62ae2143", "score": "0.5572577", "text": "def modify_current_user(self, args: dict) -> typing.Any:", "title": "" }, { "docid": "9a205071d6f3404ac15289956c1f7f7b", "score": "0.55725044", "text": "def user_nick(self, message, NICK):\n nickorig = message[1].split('!')[0].strip().lower()\n nicknew = message[2].strip().lower()\n if nickorig == NICK.botnick(): NICK.update(nicknew)\n for item in list(self.channels.keys()):\n if nickorig in list(self.channels[item].keys()):\n self.channels[item][nicknew] = self.channels[item][nickorig]\n del self.channels[item][nickorig]\n self.admins.changeNick(nickorig, nicknew)", "title": "" }, { "docid": "534350b9d3ae7b0b50db21dbcdcd74f5", "score": "0.5561521", "text": "async def note(self, ctx: Context, user: UserTypes, *, reason: str = None):\n\n response_object = await post_infraction(\n ctx, user, type=\"warning\", reason=reason, hidden=True\n )\n if response_object is None:\n return\n\n if reason is None:\n await ctx.send(f\":ok_hand: note added for {user.mention}.\")\n else:\n await ctx.send(f\":ok_hand: note added for {user.mention} ({reason}).\")\n\n await self.mod_log.send_log_message(\n icon_url=Icons.user_warn,\n colour=Colour(Colours.soft_red),\n title=\"Member note added\",\n thumbnail=user.avatar_url_as(static_format=\"png\"),\n text=textwrap.dedent(f\"\"\"\n Member: {user.mention} (`{user.id}`)\n Actor: {ctx.message.author}\n Reason: {reason}\n \"\"\"),\n footer=f\"ID {response_object['infraction']['id']}\"\n )", "title": "" }, { "docid": "96b44be4238a652f97e21b0698d5e589", "score": "0.5543575", "text": "def related_user(self, obj):\n link = reverse('admin:auth_user_change', args=(obj.user.id,))\n # TODO: Needs l10n? Maybe not a priority for an admin page.\n return ('<a href=\"%(link)s\"><strong>User %(id)s</strong></a>' % dict(\n link=link, id=obj.user.id, username=obj.user.username))", "title": "" }, { "docid": "d1a343fa43fbc9a5fc8edcaa54149f3a", "score": "0.55251306", "text": "def on_notice(self, user, to, message):\n pass", "title": "" }, { "docid": "3c1aec712a0febcc8b12345d36baa04e", "score": "0.5517856", "text": "def _callback(self, bot, update, user):\n message = update.message\n name_message = \":guardsman: {name}\".format(name=user.name)\n keyboard = ManuKeyboard(admin=user.is_admin,\n manager=user.is_manager).markup\n\n message.reply_text(emojize(name_message, use_aliases=True),\n reply_markup=keyboard)", "title": "" }, { "docid": "e6504f44c34e1b37eb2f7dda41dea424", "score": "0.5517627", "text": "def sync_error(changes, error_message, ebay_user_id=None, customer_name=None,\n customer=None, address=None, ebay_order=None):\n changes.append({\"ebay_change\": error_message,\n \"ebay_user_id\": ebay_user_id,\n \"customer_name\": customer_name,\n \"customer\": customer,\n \"address\": address,\n \"ebay_order\": ebay_order})", "title": "" }, { "docid": "529df08d9b1331e2d8e6f035a819920b", "score": "0.55109066", "text": "def user_data(update):\n user_name = update.message.from_user.username\n user_id = update.message.from_user.id\n\n return f\"{user_name} ({user_id})\"", "title": "" }, { "docid": "614fb92366354cb679d939de1726782f", "score": "0.54761565", "text": "def UpdateUserNotifications(self, username, timestamps, state=None):", "title": "" }, { "docid": "14c697872d081934544e430efa5d03e1", "score": "0.54589283", "text": "def messageUsers():\n\n global lastUpdate\n runTime = time.localtime()\n # Updating admin on last time messages were sent\n lastUpdate = f\"A Custom Message was last sent on {runTime.tm_mday}/{runTime.tm_mon}/{runTime.tm_year}.\"\n\n # Send Custom Message\n bot.send_chat_action(admin, action='typing')\n bot.send_message(admin, \"Your custom message is being sent to users in the database.\")\n\n\n \n [bot.send_message(f\"{data}\", customText) for data in database if data not in blacklist]\n\n \n\n\n # Custom Message Sent Successful\n bot.send_message(admin, \"Successfully sent to all Users in the Database.\")\n\n\n\n return lastUpdate", "title": "" }, { "docid": "a39917a78c27fb4cfe799c83a847ba9c", "score": "0.5458197", "text": "def update_user(self, updated_user):\n sender_id = updated_user['sender_id']\n self.users.find_one_and_replace({\"sender_id\": sender_id}, updated_user)", "title": "" }, { "docid": "423966f49ed876f9748795a6954114f0", "score": "0.54543585", "text": "def update_self(\n changes: schemas.UpdateUser,\n user: models.User = Depends(auth.get_current_user),\n db: Session = Depends(get_db)\n):\n user.user_first = changes.user_first or user.user_first\n user.user_last = changes.user_last or user.user_last\n if changes.new_password:\n if not changes.old_password:\n raise HTTPException(\n status_code=400, detail=\"Must also supply current password\")\n if not auth.verify_password(user.user_hashed_password, changes.old_password):\n raise HTTPException(\n status_code=400, detail=\"Current password did not match\")\n if len(changes.new_password) < auth.MINIMUM_PASSWORD_LENGTH:\n raise HTTPException(\n status_code=400,\n detail=f\"New password much be at least {auth.MINIMUM_PASSWORD_LENGTH} characters long\"\n )\n user.user_hashed_password = auth.hash_password(changes.new_password)\n user.user_email = changes.user_email or user.user_email\n user.user_skill = changes.user_skill or user.user_skill\n user.user_description = changes.user_description or user.user_description\n user.user_profile_picture = changes.user_profile_picture or user.user_profile_picture\n user.user_location = changes.user_location or user.user_location\n user.user_is_medical_professional = changes.user_is_medical_professional or user.user_is_medical_professional\n user.user_is_volunteer = changes.user_is_volunteer or user.user_is_volunteer\n db.add(user)\n db.commit()\n return Response(status_code=status.HTTP_200_OK)", "title": "" }, { "docid": "06f48f49015f6a42d8a6736f0bcf5de2", "score": "0.5450232", "text": "def notifyTournamentUpdate(self, msg):\n\t\tpass", "title": "" }, { "docid": "2dd1287ab3d5948e85e8fbc13ba9bf8a", "score": "0.5434995", "text": "def send_member_change(self, change):\n # The superclass implementation is sufficient.\n super(TextEditor, self).send_member_change(change)", "title": "" }, { "docid": "d956af16be892fadc656b9ad29569dbc", "score": "0.54224193", "text": "def alter_message(message):\n if not is_tagged(message) and get_tagnum_from_branch():\n info_branch_rename = (\n '\\033[93m' + 'Commit will be renamed as: \"' +\n str(get_tagnum_from_branch()) +\n ': ' + str(message) + '\"' + '\\033[0m'\n )\n print info_branch_rename\n write_tagnum(message, get_tagnum_from_branch())", "title": "" }, { "docid": "7448c9a93981964cb1fb1dcfbc8b898e", "score": "0.5420669", "text": "def user_message(self, to, content, id=None):\n\t\tto = self.get_user(to)\n\t\tto = to.get(\"userid\")\n\t\tparams = {\"type\": USER_MSG, \"content\": content, \"destuserid\": to}\n\t\tif id:\n\t\t\tparams[\"id\"] = id\n\t\tmsg = build_tt_message(\"message\", params)\n\t\tself.send(msg)", "title": "" }, { "docid": "e4cdc62d0fc68a4083a0cb098a021979", "score": "0.5397606", "text": "def do_user(self, line):\n if line == \"\":\n print self.user\n else:\n if self.client != None or self.session != None:\n print (\"You can't change user in an active connection, \"\n \"please disconnect it.\")\n return\n self.user = line\n self.prompt = self.user + self.prompt_prefix\n print (\"You have changed username to '%s', please reconnect\"\n \"('disconnect' and 'c') to apply it.\" % (self.user))", "title": "" }, { "docid": "9c0fb78ea79226f215ab8d4c427fa907", "score": "0.5384736", "text": "def update(self, label, message=None, cache=True):\n if label not in states:\n raise ValueError(\"Not a recognized state label: \"+label)\n if not message:\n message = user_message[label]\n self._data['user']['state'] = label\n self._data['user']['message'] = message\n if cache:\n self.cache()", "title": "" }, { "docid": "5596d09143cc6e1f2febbb1ed2394bd9", "score": "0.5381596", "text": "def response_change(self, request, obj):\n\n if IS_POPUP_VAR in request.POST:\n to_field = request.POST.get(TO_FIELD_VAR)\n attr = str(to_field) if to_field else obj._meta.pk.attname\n # Retrieve the `object_id` from the resolved pattern arguments.\n value = request.resolver_match.args[0]\n new_value = obj.serializable_value(attr)\n return SimpleTemplateResponse('admin/popup_response.html', {\n 'action': 'change',\n 'value': value,\n 'obj': obj,\n 'new_value': new_value,\n })\n\n opts = self.model._meta\n preserved_filters = self.get_preserved_filters(request)\n\n msg_dict = {'name': force_text(opts.verbose_name), 'obj': force_text(obj)}\n if \"_continue\" in request.POST:\n msg = _('The %(name)s \"%(obj)s\" was changed successfully. You may edit it again below.') % msg_dict\n self.message_user(request, msg, messages.SUCCESS)\n redirect_url = request.path\n redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)\n return HttpResponseRedirect(redirect_url)\n\n elif \"_saveasnew\" in request.POST:\n msg = _('The %(name)s \"%(obj)s\" was added successfully. You may edit it again below.') % msg_dict\n self.message_user(request, msg, messages.SUCCESS)\n redirect_url = self.admin_change_url(obj)\n redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)\n return HttpResponseRedirect(redirect_url)\n\n elif \"_addanother\" in request.POST:\n msg = _('The %(name)s \"%(obj)s\" was changed successfully. You may add another %(name)s below.') % msg_dict\n self.message_user(request, msg, messages.SUCCESS)\n redirect_url = self.admin_add_url()\n redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)\n return HttpResponseRedirect(redirect_url)\n\n else:\n msg = _('The %(name)s \"%(obj)s\" was changed successfully.') % msg_dict\n self.message_user(request, msg, messages.SUCCESS)\n return self.response_post_save_change(request, obj)", "title": "" }, { "docid": "f118113fecf73e6a3f9180ba7c0b98b0", "score": "0.53756297", "text": "def doChangeUser(user_id, password, **kw):", "title": "" }, { "docid": "16ceb96df2f5ebd8b8a00f36dce21bc2", "score": "0.53668135", "text": "def prepare_password_changed_mail(\n user: User,\n) -> EmailMessage:\n server_url = get_server_url()\n\n context = {\n 'api_token_url': AuthenticationPage.get_absolute_url(),\n 'has_api_tokens': user.webapi_tokens.exists(),\n 'server_url': server_url,\n 'user': user,\n }\n\n user_email = build_email_address_for_user(user)\n text_body = render_to_string(\n template_name='notifications/password_changed.txt',\n context=context)\n html_body = render_to_string(\n template_name='notifications/password_changed.html',\n context=context)\n\n return EmailMessage(\n subject='Password changed for user \"%s\" on %s' % (user.username,\n server_url),\n text_body=text_body,\n html_body=html_body,\n from_email=settings.DEFAULT_FROM_EMAIL,\n sender=settings.DEFAULT_FROM_EMAIL,\n to=(user_email,))", "title": "" }, { "docid": "272b1d75c444e8fde00be664067f6d38", "score": "0.53648984", "text": "def record_change_entry(self, changedesc, old_value, new_value):\n pass", "title": "" }, { "docid": "e5513dacbe6b4fc874a1e091430366b0", "score": "0.53627133", "text": "def updateMessages(self, parameters):\r\n return", "title": "" }, { "docid": "e5513dacbe6b4fc874a1e091430366b0", "score": "0.53627133", "text": "def updateMessages(self, parameters):\r\n return", "title": "" }, { "docid": "e5513dacbe6b4fc874a1e091430366b0", "score": "0.53627133", "text": "def updateMessages(self, parameters):\r\n return", "title": "" }, { "docid": "e5513dacbe6b4fc874a1e091430366b0", "score": "0.53627133", "text": "def updateMessages(self, parameters):\r\n return", "title": "" }, { "docid": "e5513dacbe6b4fc874a1e091430366b0", "score": "0.53627133", "text": "def updateMessages(self, parameters):\r\n return", "title": "" }, { "docid": "e5513dacbe6b4fc874a1e091430366b0", "score": "0.53627133", "text": "def updateMessages(self, parameters):\r\n return", "title": "" }, { "docid": "e5513dacbe6b4fc874a1e091430366b0", "score": "0.53627133", "text": "def updateMessages(self, parameters):\r\n return", "title": "" }, { "docid": "e5513dacbe6b4fc874a1e091430366b0", "score": "0.53627133", "text": "def updateMessages(self, parameters):\r\n return", "title": "" }, { "docid": "e5513dacbe6b4fc874a1e091430366b0", "score": "0.53627133", "text": "def updateMessages(self, parameters):\r\n return", "title": "" }, { "docid": "f70fdfba69e11a1326c6ed0f50f13ac6", "score": "0.5355332", "text": "def construct_change_message(self, request, form, formsets):\r\n change_message = []\r\n if form.changed_data:\r\n change_message.append(_('Changed %s.') % get_text_list(form.changed_data, _('and')))\r\n\r\n if formsets:\r\n for formset in formsets:\r\n for added_object in formset.new_objects:\r\n change_message.append(_('Added %(name)s \"%(object)s\".')\r\n % {'name': force_unicode(added_object._meta.verbose_name),\r\n 'object': force_unicode(added_object)})\r\n for changed_object, changed_fields in formset.changed_objects:\r\n change_message.append(_('Changed %(list)s for %(name)s \"%(object)s\".')\r\n % {'list': get_text_list(changed_fields, _('and')),\r\n 'name': force_unicode(changed_object._meta.verbose_name),\r\n 'object': force_unicode(changed_object)})\r\n for deleted_object in formset.deleted_objects:\r\n change_message.append(_('Deleted %(name)s \"%(object)s\".')\r\n % {'name': force_unicode(deleted_object._meta.verbose_name),\r\n 'object': force_unicode(deleted_object)})\r\n change_message = ' '.join(change_message)\r\n return change_message or _('No fields changed.')", "title": "" }, { "docid": "9354d14ee96a5099fd189970033e615a", "score": "0.53547037", "text": "def change_note(self, new_message):\n self.message = Note(new_message)\n return f'{self.message.note}'", "title": "" }, { "docid": "d2aebac22692eee73b88ddf9ebbf2edf", "score": "0.53485113", "text": "def user_pre_save_callback(sender, **kwargs):\n user = kwargs['instance']\n user._changed_fields = get_changed_fields_dict(user, sender) # lint-amnesty, pylint: disable=protected-access", "title": "" }, { "docid": "099e4a3c73c0028f09bae43e8d27e036", "score": "0.53395176", "text": "def changeButtonClicked(self):\n # given password is correct?\n if not statics.currentLogedUser.isPasswordCorrect(self.txtOldPassword.text()):\n message=QtGui.QMessageBox(self)\n message.setText('Podane hasło nie jest prawidłowe')\n message.exec_()\n\n # given two new passwords are the same?\n elif self.txtNewPassword.text()!=self.txtNewPasswordRep.text():\n message=QtGui.QMessageBox(self)\n message.setText('Podane nowe hasła nie są takie same')\n message.exec_()\n\n # changing password in database\n else:\n session=db.Session()\n session.add(statics.currentLogedUser)\n statics.currentLogedUser.password=statics.currentLogedUser.encryptPassword(self.txtNewPassword.text())\n session.commit()\n session.close()\n self.done(1)", "title": "" }, { "docid": "3473aa9573ecec5c35f98d47e66a89af", "score": "0.53262633", "text": "def on_privmsg(self, user, to, message):\n pass", "title": "" }, { "docid": "f66306da09681aa8c693d3a3313ac399", "score": "0.5324012", "text": "def AddMessageIDToUser(UserID: str, BotMessage: aiogram.types.Message) -> None:\n Settings.UserMessage[UserID] = BotMessage.message_id", "title": "" }, { "docid": "51d33b578c57d93081f8751e5905a08a", "score": "0.5321136", "text": "def updateMessages(self):\n \n if (self.params[0].Altered == True):\n spatial_ref = self.GP.Describe(self.params[0].Value).SpatialReference\n if (spatial_ref.Type != \"Geographic\"):\n self.params[0].SetError(\"%s is not in a Geographic Coordinate System.\" % self.params[0].Value)\n \n return", "title": "" }, { "docid": "71216a765fab3d63545e79c9c8f5b867", "score": "0.531277", "text": "def log_change(self, *args):\n return", "title": "" }, { "docid": "96dd96a3226662be4c9034f0942c7800", "score": "0.52916473", "text": "def UpdateMessage(message, diff):\n if diff:\n return _UpdateMessageHelper(message, diff)\n return message", "title": "" }, { "docid": "81c45faa299678afe7881ebd45b0a13f", "score": "0.52892196", "text": "def on_emoji_change(self, **output):\n pass", "title": "" }, { "docid": "f706f1be451b80afae30f66b66c60559", "score": "0.52886844", "text": "def feedback_data(self, user: discord.Member, msg):\n return {\n \"author_name\": user.display_name,\n \"author_id\": user.id,\n \"time\": dt.utcnow().strftime('%Y-%m-%d %H:%M:%S'),\n \"message\": msg\n }", "title": "" }, { "docid": "2b39e4c5b4a4ca2c5089779a04a11e33", "score": "0.52668446", "text": "def update_user_data(update: telegram.Update, context: telegram.ext.CallbackContext):\n context.user_data['chat_id'] = update.effective_chat.id\n context.user_data['username'] = update.effective_user.username\n context.user_data['first_name'] = update.effective_user.first_name\n context.user_data['last_name'] = update.effective_user.last_name", "title": "" }, { "docid": "71871db1befb9b04e88b88af0f55c737", "score": "0.52653986", "text": "def add_user(self):\n userkey = self.username_edit.get().strip(' \\t\\n')\n self.update_user(\n userkey, labels['user_exists'].format(userkey),\n labels['user_added'].format(userkey))", "title": "" }, { "docid": "36abd41ec4169fe243de3e1ac7029181", "score": "0.5246128", "text": "def vpnuser_updated(self, context):\n self.fanout_cast(\n context, self.make_msg('vpnuser_updated',\n tenant_id=context.tenant_id),\n version=self.RPC_API_VERSION,\n topic='%s' % (topics.PPTP_AGENT_TOPIC))", "title": "" }, { "docid": "8b003a9c69ded1d212df354eead82ce7", "score": "0.52301466", "text": "def record_change_entry(self, changedesc, old_value, new_value):\n changedesc.fields_changed[self.field_id] = {\n 'old': old_value and old_value.pk,\n 'new': new_value.pk,\n }", "title": "" }, { "docid": "fefb8a02f371849765a5ef9a9b514a9f", "score": "0.5228127", "text": "def rename_user(self, old_user: str, new_user: str):\n if self._version > (6, 0, 0):\n url = self._get_latest_url(\"user\")\n payload = {\"name\": new_user}\n params = {\"username\": old_user}\n\n # raw displayName\n self.log.debug(f\"renaming {self.user(old_user).emailAddress}\")\n\n self._session.put(url, params=params, data=json.dumps(payload))\n else:\n raise NotImplementedError(\n \"Support for renaming users in Jira \" \"< 6.0.0 has been removed.\"\n )", "title": "" }, { "docid": "44b695e9c4167ea932b9031e668b9b5e", "score": "0.5222984", "text": "def commandeer_revision_as_user(self, revisionid, username):\n revision = self._data.get_revision(revisionid)\n self._data.assert_is_user(username)\n assert not revision.is_closed()\n assert revision.author != username\n revision.author = username\n self._data.set_changed()", "title": "" }, { "docid": "ac98f3c1c51055c0b6322adc6aee60ec", "score": "0.521609", "text": "def change_edu(self, change):\n if type(change) == int:\n self.educacion += change\n else:\n logging.debug('change de EDUCACION no es entero')", "title": "" }, { "docid": "09e4c5825ff97f428e4f859bfa114b83", "score": "0.5215908", "text": "async def update_user(cls, behaviours: Behaviour):\n pass", "title": "" }, { "docid": "44153b8f5e1b730ad00b35bb4dd55986", "score": "0.5207033", "text": "def mutate(self, info, message, userId=None): # pylint: disable=unused-argument\n assert self is None, \"Root `self` expected to be `None`!\"\n\n # Notify subscribers.\n OnChatMessageSent.notify(message=message, userId=userId)\n\n return SendChatMessage.Output(message=message, userId=userId)", "title": "" }, { "docid": "df2ee0b3f1d1f0d5f5d4e036aaa5c681", "score": "0.52044725", "text": "def _handle_updateuser(self, params):\n\t\tuser_index = self.get_user(params[\"userid\"], index=True)\n\t\tif user_index != None:\n\t\t\tself.users[user_index].update(params)", "title": "" }, { "docid": "3ecd08ccfb69751dd595f2807db91854", "score": "0.519991", "text": "def email_changed(self):\n User.get_or_create(self.params['user']).email=self.params['email']\n\n return {'status' : 'accepted'}", "title": "" }, { "docid": "4c1b189dbebf94474013ae7600c98304", "score": "0.5187235", "text": "def privmsg(self, user, channel, msg):\n\t\tself.factory.logger.log(\"{0}: {1}\".format(user, msg), channel)\n\t\tself.handleMessage(user, channel, msg, 'say')", "title": "" }, { "docid": "2b7031e45f2ecd9e08ddccd89a5cc436", "score": "0.518509", "text": "def do_op_user(self, user_name):\n if self.is_client_mod:\n if len(user_name) is not 0:\n _user = self.users.search(user_name)\n if _user is not None:\n _user.user_level = 4\n self.send_bot_msg(unicode_catalog.BLACK_STAR + ' *%s* is now a bot controller (L4)' % user_name)\n else:\n self.send_bot_msg(unicode_catalog.INDICATE + ' No user named: %s' % user_name)\n else:\n self.send_bot_msg(unicode_catalog.INDICATE + ' Missing username.')", "title": "" }, { "docid": "0480c6d9c6570bca27e5cedc40a1cd4a", "score": "0.51756173", "text": "def message(instance_id, state, changed=0):\n\n # Changed parameter determines what message to return\n if changed != 0:\n return_message = (\"Instance {} is in {} state\").format(instance_id, state)\n else:\n return_message = \"No change for Instance# {}. Currently in {} state\".format(\n instance_id, state)\n return return_message", "title": "" }, { "docid": "b8f7a7db8b7aa5b78dd222e60d35b967", "score": "0.5173666", "text": "def reply(update: tg.Update, context: CallbackContext):\n bot: tg.Bot = context.bot\n reply_user = user_from_tg_user(update.message.reply_to_message.from_user)\n replying_user = user_from_tg_user(update.message.from_user)\n chat_id = str(update.message.chat_id)\n chat = Telegram_Chat(chat_id, update.message.chat.title)\n original_message = Telegram_Message(\n update.message.reply_to_message.message_id,\n chat.chat_id,\n reply_user.id,\n update.message.reply_to_message.text)\n reply_message = Telegram_Message(\n update.message.message_id,\n chat.chat_id,\n replying_user.id,\n update.message.text)\n reply_text = reply_message.message_text\n\n if re.match(\"^([+pP][1-9][0-9]*|[Pp]{2}).*\", reply_text):\n # if user tried to +1 self themselves\n # chat id is user_id when the user is talking 1 on 1 with the bot\n if(replying_user.id == update.message.reply_to_message.from_user.id and chat_id != str(reply_user.id)):\n default_respose = \"USER_FIRST_NAME you cannot +1 yourself\"\n response = dbservice.get_random_witty_response()\n if response is None:\n response = default_respose\n\n message = response.replace(\"USER_FIRST_NAME\", replying_user.first_name)\n bot.send_message(chat_id=chat_id, text=message)\n else: # user +1 someone else\n dbservice.user_reply_to_message(\n replying_user,\n reply_user,\n chat,\n original_message,\n reply_message,\n 1)\n logging.debug(\"user replying other user\")\n logging.debug(replying_user)\n logging.debug(reply_user)\n # user -1 someone else\n elif re.match(\"^([\\-mM][1-9][0-9]*|[Dd]{2}).*\", reply_text):\n dbservice.user_reply_to_message(\n replying_user,\n reply_user,\n chat,\n original_message,\n reply_message,\n -1)\n logging.debug(\"user replying other user\")\n logging.debug(replying_user)\n logging.debug(reply_user)", "title": "" }, { "docid": "11043c1c7458ebe36cd14507adf40c80", "score": "0.51719856", "text": "def updatePreviousMessage(self, message):\n self.previousMessage = message", "title": "" }, { "docid": "474268c99fffdde6262c71f3f51d960c", "score": "0.51719546", "text": "def changeFlags(self, user, chflags):\n curflags = self.authlib.get_flags(user)\n curflags = set(curflags)\n args = re.split(r'([+-])', chflags)[1:]\n for i in range(0, len(args), 2):\n action, flags = args[i], args[i+1]\n flags = set(flags)\n if action == '-':\n for flag in flags:\n curflags.discard(flag)\n elif action == '+':\n for flag in flags:\n curflags.add(flag)\n curflags = ''.join(curflags)\n self.authlib.change_flags(user, curflags)\n if self.users.has_key(user):\n self.users[user].flags = curflags\n return 'flags for %s changed to %s' % (user, curflags)", "title": "" }, { "docid": "7b161763ee4898faa9b4ecce5ba21737", "score": "0.5171271", "text": "def whisper(self, user, message):\n if user[0] == '#':\n LOGGER.warning(f\"Whisper is for users only.\")\n else:\n super().message('#jtv', f\".w {user} {message}\")", "title": "" }, { "docid": "13691af1b73c05e602da9fe0358c0cde", "score": "0.517078", "text": "def post(self): # pylint: disable=g-bad-name\n super(Phase3RecallUserMessagesHandler, self).post()\n self._RecallUserMessages(\n message_criteria=self.request.get('message_criteria'),\n user_email=self.request.get('user_email'),\n user_key_id=int(self.request.get('user_key_id')))", "title": "" }, { "docid": "3537568e52284a09e5b71c7964d093ab", "score": "0.51677716", "text": "def send_msg(self, user_id, message, *args):\n pass", "title": "" }, { "docid": "3c3f54515251b024dbcf1ccec3a517fd", "score": "0.51573324", "text": "def record_change_entry(self, changedesc, old_value, new_value):\n changedesc.record_field_change(self.field_id, old_value, new_value,\n self.model_name_attr)", "title": "" }, { "docid": "3c3f54515251b024dbcf1ccec3a517fd", "score": "0.51573324", "text": "def record_change_entry(self, changedesc, old_value, new_value):\n changedesc.record_field_change(self.field_id, old_value, new_value,\n self.model_name_attr)", "title": "" }, { "docid": "eeeec400b3920e897f5043d55f145d75", "score": "0.5156367", "text": "async def _set_text_only_message(self, ctx, *, message):\n\n prev, new = await self.settings.set_text_only_message(\n channel=ctx.channel, new_message=message\n )\n\n await ctx.send(\n \"**Changed text only message from:**\\n```{prev}```**To:**\\n```{new}``` \".format(\n prev=prev.format(ctx.channel), new=new.format(ctx.channel)\n )\n )", "title": "" } ]
3f62537049bf2b6378f5ee941fc033c3
Test case for watch_extensions_v1beta1_namespaced_ingress_list
[ { "docid": "b32ac6cd95145f0d686371cca5c886cb", "score": "0.9473907", "text": "def test_watch_extensions_v1beta1_namespaced_ingress_list(self):\n pass", "title": "" } ]
[ { "docid": "a61919137b977e1ade8bd4e4a35a0b99", "score": "0.8538603", "text": "def test_list_extensions_v1beta1_namespaced_ingress(self):\n pass", "title": "" }, { "docid": "532a0c68be7f6306bfa24ae5f01f9637", "score": "0.8520756", "text": "def test_watch_extensions_v1beta1_ingress_list_for_all_namespaces(self):\n pass", "title": "" }, { "docid": "186d48eb6f6a6cce00682797417c6fa5", "score": "0.8255956", "text": "def test_watch_extensions_v1beta1_namespaced_ingress(self):\n pass", "title": "" }, { "docid": "9c278ee14cfad4661e1564f4667c2309", "score": "0.7719294", "text": "def test_list_extensions_v1beta1_ingress_for_all_namespaces(self):\n pass", "title": "" }, { "docid": "782d763ef04d42911ec2e82e5c11fd9f", "score": "0.76148415", "text": "def test_read_extensions_v1beta1_namespaced_ingress_status(self):\n pass", "title": "" }, { "docid": "2a41a7a972a22c4a164420266965a6ac", "score": "0.76107633", "text": "def test_read_extensions_v1beta1_namespaced_ingress(self):\n pass", "title": "" }, { "docid": "af58789be699e1914435a81b8ca6b4ff", "score": "0.73455685", "text": "def test_patch_extensions_v1beta1_namespaced_ingress_status(self):\n pass", "title": "" }, { "docid": "b9bab5aace0c96cf1b00c9bc7b950dff", "score": "0.72635585", "text": "def test_create_extensions_v1beta1_namespaced_ingress(self):\n pass", "title": "" }, { "docid": "2b85b6c3d6bf466f38efe829e31c773e", "score": "0.72617", "text": "def test_replace_extensions_v1beta1_namespaced_ingress_status(self):\n pass", "title": "" }, { "docid": "be2c1fb4d14c3d632f04be21f8ec49c6", "score": "0.7197285", "text": "def test_patch_extensions_v1beta1_namespaced_ingress(self):\n pass", "title": "" }, { "docid": "f7c798425d3394a9ad03b6d143ba7893", "score": "0.70378566", "text": "def test_watch_extensions_v1beta1_namespaced_deployment_list(self):\n pass", "title": "" }, { "docid": "dff4f82c7c13e61a8f0e89565f78826a", "score": "0.6864299", "text": "def watch_namespaced_ingress(self, namespace, name, **kwargs):\n\n all_params = ['namespace', 'name', 'field_selector', 'watch', 'resource_version', 'timeout_seconds', 'pretty', 'label_selector']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_namespaced_ingress\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `watch_namespaced_ingress`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `watch_namespaced_ingress`\")\n\n resource_path = '/apis/extensions/v1beta1/watch/namespaces/{namespace}/ingresses/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf', 'application/vnd.kubernetes.protobuf;stream=watch'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='Model2AversionedEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "e5ed73a865faaa346ee798ef29da26c9", "score": "0.68290484", "text": "def test_delete_extensions_v1beta1_collection_namespaced_ingress(self):\n pass", "title": "" }, { "docid": "42eedec608b8217899bb08964e92486f", "score": "0.6822978", "text": "def test_replace_extensions_v1beta1_namespaced_ingress(self):\n pass", "title": "" }, { "docid": "9c9cfe4f4e9e98c7406052a0e8df02bb", "score": "0.68204683", "text": "def test_delete_extensions_v1beta1_namespaced_ingress(self):\n pass", "title": "" }, { "docid": "3d5a1c8571b07a3b5b31943d62750b23", "score": "0.65488666", "text": "def test_watch_extensions_v1beta1_deployment_list_for_all_namespaces(self):\n pass", "title": "" }, { "docid": "1bf90df5fc5c33a4a6f509ab9d2b2d74", "score": "0.6452552", "text": "def test_watch_extensions_v1beta1_namespaced_network_policy_list(self):\n pass", "title": "" }, { "docid": "26b3de23f23481fc606e786b5c5492fb", "score": "0.6346367", "text": "def test_list_extensions_v1beta1_namespaced_deployment(self):\n pass", "title": "" }, { "docid": "762619cb040a302820c0c96bc48d32b2", "score": "0.62667155", "text": "def watch_namespaced_endpoints_list(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_namespaced_endpoints_list\" % key\n )\n params[key] = val\n del params['kwargs']\n\n resource_path = '/api/v1/watch/endpoints'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf', 'application/vnd.kubernetes.protobuf;stream=watch'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='*VersionedEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "d210cf263ecb4e2bfefcf4e623264bae", "score": "0.60993636", "text": "def deletecollection_namespaced_ingress(self, namespace, **kwargs):\n\n all_params = ['namespace', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method deletecollection_namespaced_ingress\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `deletecollection_namespaced_ingress`\")\n\n resource_path = '/apis/extensions/v1beta1/namespaces/{namespace}/ingresses'.replace('{format}', 'json')\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='UnversionedStatus',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "c74aa085ff1a9eaa37bdad14830212d0", "score": "0.60490644", "text": "def watch_namespaced_endpoints_list_44(self, namespace, **kwargs):\n # verify the required parameter 'namespace' is set\n if namespace is None:\n raise ValueError(\"Missing the required parameter `namespace` when calling `watch_namespaced_endpoints_list_44`\")\n\n all_params = ['namespace', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_namespaced_endpoints_list_44\" % key\n )\n params[key] = val\n del params['kwargs']\n\n resource_path = '/api/v1/watch/namespaces/{namespace}/endpoints'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf', 'application/vnd.kubernetes.protobuf;stream=watch'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='*VersionedEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "32b69a62730bba3dc84db6736fb15f5a", "score": "0.599314", "text": "def read_namespaced_ingress_status(self, namespace, name, **kwargs):\n\n all_params = ['namespace', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method read_namespaced_ingress_status\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `read_namespaced_ingress_status`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `read_namespaced_ingress_status`\")\n\n resource_path = '/apis/extensions/v1beta1/namespaces/{namespace}/ingresses/{name}/status'.replace('{format}', 'json')\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1beta1Ingress',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "c85e908eec641651e67104e4c975cfc6", "score": "0.5979876", "text": "def test_watch_extensions_v1beta1_network_policy_list_for_all_namespaces(self):\n pass", "title": "" }, { "docid": "0bc203154dc418601965a3f6067d88f2", "score": "0.5977575", "text": "def test_watch_extensions_v1beta1_namespaced_deployment(self):\n pass", "title": "" }, { "docid": "13249b2362f7c3b9f92217a9dc3ba53b", "score": "0.59373474", "text": "def watch_namespaced_event_list(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_namespaced_event_list\" % key\n )\n params[key] = val\n del params['kwargs']\n\n resource_path = '/api/v1/watch/events'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf', 'application/vnd.kubernetes.protobuf;stream=watch'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='*VersionedEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "e57026cb91b1121a5fd817ae99b2eb08", "score": "0.593172", "text": "def watch_namespaced_service_account_list(self, namespace, **kwargs):\n # verify the required parameter 'namespace' is set\n if namespace is None:\n raise ValueError(\"Missing the required parameter `namespace` when calling `watch_namespaced_service_account_list`\")\n\n all_params = ['namespace', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_namespaced_service_account_list\" % key\n )\n params[key] = val\n del params['kwargs']\n\n resource_path = '/api/v1/watch/namespaces/{namespace}/serviceaccounts'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf', 'application/vnd.kubernetes.protobuf;stream=watch'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='*VersionedEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "aff582e9c92409ac6f06dcf4971562ee", "score": "0.5926127", "text": "def watch_namespace_list(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_namespace_list\" % key\n )\n params[key] = val\n del params['kwargs']\n\n resource_path = '/api/v1/watch/namespaces'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf', 'application/vnd.kubernetes.protobuf;stream=watch'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='*VersionedEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "89e77fadc65619636d87c4d505703a2b", "score": "0.5900496", "text": "def watch_namespaced_replication_controller_list(self, namespace, **kwargs):\n # verify the required parameter 'namespace' is set\n if namespace is None:\n raise ValueError(\"Missing the required parameter `namespace` when calling `watch_namespaced_replication_controller_list`\")\n\n all_params = ['namespace', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_namespaced_replication_controller_list\" % key\n )\n params[key] = val\n del params['kwargs']\n\n resource_path = '/api/v1/watch/namespaces/{namespace}/replicationcontrollers'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf', 'application/vnd.kubernetes.protobuf;stream=watch'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='*VersionedEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "8b4f8e9ce9ba0b5d07ef61285a7b4c17", "score": "0.5875137", "text": "def test_list_extensions_v1beta1_namespaced_network_policy(self):\n pass", "title": "" }, { "docid": "e3935bfe26e9e5b87a9d5690c02b1033", "score": "0.58702505", "text": "def patch_namespaced_ingress_status(self, namespace, name, body, **kwargs):\n\n all_params = ['namespace', 'name', 'body', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method patch_namespaced_ingress_status\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `patch_namespaced_ingress_status`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `patch_namespaced_ingress_status`\")\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `patch_namespaced_ingress_status`\")\n\n resource_path = '/apis/extensions/v1beta1/namespaces/{namespace}/ingresses/{name}/status'.replace('{format}', 'json')\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PATCH',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1beta1Ingress',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "d581920f1d3ac910763c72ceada48940", "score": "0.5862788", "text": "def watch_namespaced_service_list(self, namespace, **kwargs):\n # verify the required parameter 'namespace' is set\n if namespace is None:\n raise ValueError(\"Missing the required parameter `namespace` when calling `watch_namespaced_service_list`\")\n\n all_params = ['namespace', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_namespaced_service_list\" % key\n )\n params[key] = val\n del params['kwargs']\n\n resource_path = '/api/v1/watch/namespaces/{namespace}/services'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf', 'application/vnd.kubernetes.protobuf;stream=watch'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='*VersionedEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "67bae4a9df8aec304d99bca46e984300", "score": "0.5857904", "text": "def watch_namespace_list(self, **kwargs):\n\n all_params = ['resource_version', 'timeout_seconds', 'pretty', 'label_selector', 'field_selector', 'watch']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_namespace_list\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/api/v1/watch/namespaces'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf', 'application/vnd.kubernetes.protobuf;stream=watch'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='Model2AversionedEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "c31da133639f67b7d5066e1a38ae43c1", "score": "0.5853251", "text": "def watch_namespaced_secret_list(self, namespace, **kwargs):\n # verify the required parameter 'namespace' is set\n if namespace is None:\n raise ValueError(\"Missing the required parameter `namespace` when calling `watch_namespaced_secret_list`\")\n\n all_params = ['namespace', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_namespaced_secret_list\" % key\n )\n params[key] = val\n del params['kwargs']\n\n resource_path = '/api/v1/watch/namespaces/{namespace}/secrets'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf', 'application/vnd.kubernetes.protobuf;stream=watch'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='*VersionedEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "b542a005196fe59ddf2c4d0cd0cc9c8e", "score": "0.58261377", "text": "def test_make_pod_ingress_resources(self) -> NoReturn:\n config = {\n \"site_url\": \"http://mongodb-exporter\",\n \"cluster_issuer\": \"\",\n \"ingress_whitelist_source_range\": \"\",\n }\n app_name = \"mongodb-exporter\"\n port = 9216\n\n expected_result = [\n {\n \"name\": f\"{app_name}-ingress\",\n \"annotations\": {\n \"nginx.ingress.kubernetes.io/ssl-redirect\": \"false\",\n },\n \"spec\": {\n \"rules\": [\n {\n \"host\": app_name,\n \"http\": {\n \"paths\": [\n {\n \"path\": \"/\",\n \"backend\": {\n \"serviceName\": app_name,\n \"servicePort\": port,\n },\n }\n ]\n },\n }\n ]\n },\n }\n ]\n\n pod_ingress_resources = pod_spec._make_pod_ingress_resources(\n config, app_name, port\n )\n\n self.assertListEqual(expected_result, pod_ingress_resources)", "title": "" }, { "docid": "9a681eebdd9642926e3d0fd4202b8a78", "score": "0.58175516", "text": "def watch_namespaced_pod_list(self, namespace, **kwargs):\n # verify the required parameter 'namespace' is set\n if namespace is None:\n raise ValueError(\"Missing the required parameter `namespace` when calling `watch_namespaced_pod_list`\")\n\n all_params = ['namespace', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_namespaced_pod_list\" % key\n )\n params[key] = val\n del params['kwargs']\n\n resource_path = '/api/v1/watch/namespaces/{namespace}/pods'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf', 'application/vnd.kubernetes.protobuf;stream=watch'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='*VersionedEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "574d230d614fd681d8f90c3f0cebbee0", "score": "0.5796708", "text": "def test_list_authorization_openshift_io_v1_namespaced_policy_binding(self):\n pass", "title": "" }, { "docid": "04796afcd9483a585c05440a9bf419cd", "score": "0.57821107", "text": "def test_list_authorization_openshift_io_v1_namespaced_policy(self):\n pass", "title": "" }, { "docid": "feae89dd8783b27050180cd2ec130f2f", "score": "0.5777656", "text": "def watch_namespaced_endpoints_list_with_http_info(self, namespace, **kwargs):\n\n all_params = ['namespace', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_namespaced_endpoints_list\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `watch_namespaced_endpoints_list`\")\n\n\n collection_formats = {}\n\n resource_path = '/api/v1/watch/namespaces/{namespace}/endpoints'.replace('{format}', 'json')\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1WatchEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "title": "" }, { "docid": "a91a911d1d04635f6f601418cbecf6f8", "score": "0.57440865", "text": "def patch_namespaced_ingress(self, name, namespace, body, **kwargs):\n\n all_params = ['name', 'namespace', 'body', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method patch_namespaced_ingress\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `patch_namespaced_ingress`\")\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `patch_namespaced_ingress`\")\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `patch_namespaced_ingress`\")\n\n resource_path = '/apis/extensions/v1beta1/namespaces/{namespace}/ingresses/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PATCH',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1beta1Ingress',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "3585c6d289aae340beef00b22c220abf", "score": "0.57425517", "text": "def delete_namespaced_ingress(self, name, namespace, body, **kwargs):\n\n all_params = ['name', 'namespace', 'body', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_namespaced_ingress\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `delete_namespaced_ingress`\")\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `delete_namespaced_ingress`\")\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `delete_namespaced_ingress`\")\n\n resource_path = '/apis/extensions/v1beta1/namespaces/{namespace}/ingresses/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='UnversionedStatus',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "c2da421d0ef1f0cb16891ff350c44ba9", "score": "0.5722577", "text": "def watch_namespaced_endpoints_list(self, namespace, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.watch_namespaced_endpoints_list_with_http_info(namespace, **kwargs)\n else:\n (data) = self.watch_namespaced_endpoints_list_with_http_info(namespace, **kwargs)\n return data", "title": "" }, { "docid": "7328cd585b2c6b012391aeda06a04a32", "score": "0.57142055", "text": "def watch_namespaced_event_list_with_http_info(self, namespace, **kwargs):\n\n all_params = ['namespace', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_namespaced_event_list\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `watch_namespaced_event_list`\")\n\n\n collection_formats = {}\n\n resource_path = '/api/v1/watch/namespaces/{namespace}/events'.replace('{format}', 'json')\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1WatchEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "title": "" }, { "docid": "f79f1b5eaf6b0887f206e97511c337e8", "score": "0.57131815", "text": "def test_list_extensions_v1beta1_deployment_for_all_namespaces(self):\n pass", "title": "" }, { "docid": "e065cbb45d6808d538fc7416eaa58ff3", "score": "0.57029593", "text": "def list_namespaced_endpoints(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_namespaced_endpoints\" % key\n )\n params[key] = val\n del params['kwargs']\n\n resource_path = '/api/v1/endpoints'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='V1EndpointsList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "a0fc1473bbef5c83fba14c0d61a50f4e", "score": "0.569996", "text": "def watch_namespaced_event_list_45(self, namespace, **kwargs):\n # verify the required parameter 'namespace' is set\n if namespace is None:\n raise ValueError(\"Missing the required parameter `namespace` when calling `watch_namespaced_event_list_45`\")\n\n all_params = ['namespace', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_namespaced_event_list_45\" % key\n )\n params[key] = val\n del params['kwargs']\n\n resource_path = '/api/v1/watch/namespaces/{namespace}/events'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf', 'application/vnd.kubernetes.protobuf;stream=watch'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='*VersionedEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "4529121cb5b5396551c3a3c29df94563", "score": "0.5683393", "text": "def test_read_extensions_v1beta1_namespaced_deployment_status(self):\n pass", "title": "" }, { "docid": "8687ea585c25ec42fd1d1e3f332da10e", "score": "0.56722784", "text": "def list_namespaced_event(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_namespaced_event\" % key\n )\n params[key] = val\n del params['kwargs']\n\n resource_path = '/api/v1/events'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='V1EventList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "37460b300c7be9cc3beda548e6b506e2", "score": "0.567045", "text": "def watch_namespaced_service_account_list_53(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_namespaced_service_account_list_53\" % key\n )\n params[key] = val\n del params['kwargs']\n\n resource_path = '/api/v1/watch/serviceaccounts'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf', 'application/vnd.kubernetes.protobuf;stream=watch'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='*VersionedEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "86aede10d550b9fa7882c114a378a7a8", "score": "0.5662053", "text": "def test_watch_policy_v1beta1_pod_disruption_budget_list_for_all_namespaces(self):\n pass", "title": "" }, { "docid": "d3967a926b91a3e8786c3fdac68f7110", "score": "0.56557345", "text": "def read_namespaced_ingress(self, name, namespace, **kwargs):\n\n all_params = ['name', 'namespace', 'pretty', 'export', 'exact']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method read_namespaced_ingress\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `read_namespaced_ingress`\")\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `read_namespaced_ingress`\")\n\n resource_path = '/apis/extensions/v1beta1/namespaces/{namespace}/ingresses/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'export' in params:\n query_params['export'] = params['export']\n if 'exact' in params:\n query_params['exact'] = params['exact']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1beta1Ingress',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "a8030113f37991361e7dffa1a4e7f4ef", "score": "0.56515396", "text": "def create_namespaced_ingress(self, namespace, body, **kwargs):\n\n all_params = ['namespace', 'body', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_ingress\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `create_namespaced_ingress`\")\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_ingress`\")\n\n resource_path = '/apis/extensions/v1beta1/namespaces/{namespace}/ingresses'.replace('{format}', 'json')\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1beta1Ingress',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "2f7f89b7634f282f36f90d3cc84dfcb1", "score": "0.56456935", "text": "def replace_namespaced_ingress_status(self, namespace, name, body, **kwargs):\n\n all_params = ['namespace', 'name', 'body', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method replace_namespaced_ingress_status\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `replace_namespaced_ingress_status`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `replace_namespaced_ingress_status`\")\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `replace_namespaced_ingress_status`\")\n\n resource_path = '/apis/extensions/v1beta1/namespaces/{namespace}/ingresses/{name}/status'.replace('{format}', 'json')\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1beta1Ingress',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "4818436f22fc71459a7df64c0cfcf362", "score": "0.56269455", "text": "def test_make_pod_ingress_resources_with_whitelist_source_range(self) -> NoReturn:\n config = {\n \"site_url\": \"http://mongodb-exporter\",\n \"cluster_issuer\": \"\",\n \"ingress_whitelist_source_range\": \"0.0.0.0/0\",\n }\n app_name = \"mongodb-exporter\"\n port = 9216\n\n expected_result = [\n {\n \"name\": f\"{app_name}-ingress\",\n \"annotations\": {\n \"nginx.ingress.kubernetes.io/ssl-redirect\": \"false\",\n \"nginx.ingress.kubernetes.io/whitelist-source-range\": config[\n \"ingress_whitelist_source_range\"\n ],\n },\n \"spec\": {\n \"rules\": [\n {\n \"host\": app_name,\n \"http\": {\n \"paths\": [\n {\n \"path\": \"/\",\n \"backend\": {\n \"serviceName\": app_name,\n \"servicePort\": port,\n },\n }\n ]\n },\n }\n ]\n },\n }\n ]\n\n pod_ingress_resources = pod_spec._make_pod_ingress_resources(\n config, app_name, port\n )\n\n self.assertListEqual(expected_result, pod_ingress_resources)", "title": "" }, { "docid": "8f9ec8d87dabd201b43fba7048ee38a2", "score": "0.5617942", "text": "def test_watch_extensions_v1beta1_namespaced_network_policy(self):\n pass", "title": "" }, { "docid": "bd2195e780e6cab002fb6bddfec73eab", "score": "0.5584989", "text": "def test_watch_extensions_v1beta1_namespaced_daemon_set_list(self):\n pass", "title": "" }, { "docid": "5a09de1d48ce533972c404d3301cb9d0", "score": "0.55525374", "text": "def watch_namespaced_resource_quota_list(self, namespace, **kwargs):\n # verify the required parameter 'namespace' is set\n if namespace is None:\n raise ValueError(\"Missing the required parameter `namespace` when calling `watch_namespaced_resource_quota_list`\")\n\n all_params = ['namespace', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_namespaced_resource_quota_list\" % key\n )\n params[key] = val\n del params['kwargs']\n\n resource_path = '/api/v1/watch/namespaces/{namespace}/resourcequotas'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf', 'application/vnd.kubernetes.protobuf;stream=watch'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='*VersionedEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "9293dbf1d63f88586fcaea0fcf4a621a", "score": "0.54966104", "text": "def replace_namespaced_ingress(self, name, namespace, body, **kwargs):\n\n all_params = ['name', 'namespace', 'body', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method replace_namespaced_ingress\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `replace_namespaced_ingress`\")\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `replace_namespaced_ingress`\")\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `replace_namespaced_ingress`\")\n\n resource_path = '/apis/extensions/v1beta1/namespaces/{namespace}/ingresses/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1beta1Ingress',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "e55911a3f67d4d31c80f3f7486360fe2", "score": "0.54893094", "text": "def watch_namespaced_secret_list_with_http_info(self, namespace, **kwargs):\n\n all_params = ['namespace', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_namespaced_secret_list\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `watch_namespaced_secret_list`\")\n\n\n collection_formats = {}\n\n resource_path = '/api/v1/watch/namespaces/{namespace}/secrets'.replace('{format}', 'json')\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1WatchEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "title": "" }, { "docid": "19ae893f99f176372ab29eb6a38ae796", "score": "0.5485171", "text": "def test_read_extensions_v1beta1_namespaced_deployment(self):\n pass", "title": "" }, { "docid": "c56ac4e5a418f4b36b6f086c3766d9bc", "score": "0.54709345", "text": "def watch_namespaced_pod_list_48(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_namespaced_pod_list_48\" % key\n )\n params[key] = val\n del params['kwargs']\n\n resource_path = '/api/v1/watch/pods'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf', 'application/vnd.kubernetes.protobuf;stream=watch'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='*VersionedEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "4da085e549edeef175fecec193c8dc10", "score": "0.54408056", "text": "def watch_namespaced_service_account_list_with_http_info(self, namespace, **kwargs):\n\n all_params = ['namespace', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_namespaced_service_account_list\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `watch_namespaced_service_account_list`\")\n\n\n collection_formats = {}\n\n resource_path = '/api/v1/watch/namespaces/{namespace}/serviceaccounts'.replace('{format}', 'json')\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1WatchEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "title": "" }, { "docid": "235568c3c668e35bd026695e61a1b450", "score": "0.54180986", "text": "def watch_namespaced_replication_controller_list_50(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_namespaced_replication_controller_list_50\" % key\n )\n params[key] = val\n del params['kwargs']\n\n resource_path = '/api/v1/watch/replicationcontrollers'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf', 'application/vnd.kubernetes.protobuf;stream=watch'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='*VersionedEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "eb880a9b9bb6109fab0ad5a6e3294bca", "score": "0.5407876", "text": "def watch_namespaced_endpoints(self, namespace, name, **kwargs):\n # verify the required parameter 'namespace' is set\n if namespace is None:\n raise ValueError(\"Missing the required parameter `namespace` when calling `watch_namespaced_endpoints`\")\n # verify the required parameter 'name' is set\n if name is None:\n raise ValueError(\"Missing the required parameter `name` when calling `watch_namespaced_endpoints`\")\n\n all_params = ['namespace', 'name', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_namespaced_endpoints\" % key\n )\n params[key] = val\n del params['kwargs']\n\n resource_path = '/api/v1/watch/namespaces/{namespace}/endpoints/{name}'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf', 'application/vnd.kubernetes.protobuf;stream=watch'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='*VersionedEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "021f96237a6e7529d52706dd2c345411", "score": "0.53916454", "text": "def test_list_authorization_openshift_io_v1_namespaced_role(self):\n pass", "title": "" }, { "docid": "3b14eb411347fa582953f24bbdc7281d", "score": "0.5387538", "text": "def test_watch_extensions_v1beta1_daemon_set_list_for_all_namespaces(self):\n pass", "title": "" }, { "docid": "5cd36b646e7e6cf5132a7e78251c09fb", "score": "0.5381912", "text": "def test_watch_policy_v1beta1_namespaced_pod_disruption_budget_list(self):\n pass", "title": "" }, { "docid": "545d32d003d9acde811ed928e74862c1", "score": "0.53805643", "text": "def watch_namespace_list_with_http_info(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_namespace_list\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n collection_formats = {}\n\n resource_path = '/api/v1/watch/namespaces'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1WatchEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "title": "" }, { "docid": "d01ae2fe1bb52fc610654d67ee01b65b", "score": "0.5366167", "text": "def test_watch_extensions_v1beta1_namespaced_replica_set_list(self):\n pass", "title": "" }, { "docid": "da64d64edc3562fe842a2eab9c18bd28", "score": "0.53422546", "text": "def test_list_authorization_openshift_io_v1_namespaced_role_binding(self):\n pass", "title": "" }, { "docid": "a366695483e5ff30f6d7ae98753fd5ee", "score": "0.53404754", "text": "def list_namespaced_endpoints_2(self, namespace, **kwargs):\n # verify the required parameter 'namespace' is set\n if namespace is None:\n raise ValueError(\"Missing the required parameter `namespace` when calling `list_namespaced_endpoints_2`\")\n\n all_params = ['namespace', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_namespaced_endpoints_2\" % key\n )\n params[key] = val\n del params['kwargs']\n\n resource_path = '/api/v1/namespaces/{namespace}/endpoints'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='V1EndpointsList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "a313353eeb35944eda7f762578a7b30c", "score": "0.5316332", "text": "def watch_namespace_list(self, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.watch_namespace_list_with_http_info(**kwargs)\n else:\n (data) = self.watch_namespace_list_with_http_info(**kwargs)\n return data", "title": "" }, { "docid": "f8871d5176220081c37028b13442149f", "score": "0.53055197", "text": "def watch_namespaced_replication_controller_list(self, namespace, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.watch_namespaced_replication_controller_list_with_http_info(namespace, **kwargs)\n else:\n (data) = self.watch_namespaced_replication_controller_list_with_http_info(namespace, **kwargs)\n return data", "title": "" }, { "docid": "0f54aaf6db6f4f766ff910e9c27849e8", "score": "0.52969664", "text": "def apis_extensions_v1beta1_watch_namespaces_namespace_ingresses_get(self, namespace, **kwargs):\n\n all_params = ['namespace', 'field_selector', 'watch', 'resource_version', 'timeout_seconds', 'pretty', 'label_selector']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method apis_extensions_v1beta1_watch_namespaces_namespace_ingresses_get\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `apis_extensions_v1beta1_watch_namespaces_namespace_ingresses_get`\")\n\n resource_path = '/apis/extensions/v1beta1/watch/namespaces/{namespace}/ingresses'.replace('{format}', 'json')\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf', 'application/vnd.kubernetes.protobuf;stream=watch'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='Model2AversionedEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "4adecc35a0994f00fb62ab36aa2b970b", "score": "0.5295039", "text": "def watch_namespaced_replication_controller_list_with_http_info(self, namespace, **kwargs):\n\n all_params = ['namespace', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_namespaced_replication_controller_list\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `watch_namespaced_replication_controller_list`\")\n\n\n collection_formats = {}\n\n resource_path = '/api/v1/watch/namespaces/{namespace}/replicationcontrollers'.replace('{format}', 'json')\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1WatchEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "title": "" }, { "docid": "233428f66581047f1f2a493969c152c8", "score": "0.5266974", "text": "def test_patch_extensions_v1beta1_namespaced_deployment_status(self):\n pass", "title": "" }, { "docid": "67ce4c638207dda53eb3180b4898a810", "score": "0.52659637", "text": "def watch_namespaced_secret_list_52(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_namespaced_secret_list_52\" % key\n )\n params[key] = val\n del params['kwargs']\n\n resource_path = '/api/v1/watch/secrets'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf', 'application/vnd.kubernetes.protobuf;stream=watch'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='*VersionedEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "93ee5c7f6520a07dadced63f500ee5fc", "score": "0.525859", "text": "def test_replace_extensions_v1beta1_namespaced_deployment_status(self):\n pass", "title": "" }, { "docid": "025dc780ea5a2d5b9eec8cbdcd859def", "score": "0.52548563", "text": "def test_list_authorization_openshift_io_v1_policy_for_all_namespaces(self):\n pass", "title": "" }, { "docid": "369a0d2c638a71618a3543fd6f197f09", "score": "0.5250783", "text": "def watch_namespaced_service_list_54(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_namespaced_service_list_54\" % key\n )\n params[key] = val\n del params['kwargs']\n\n resource_path = '/api/v1/watch/services'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf', 'application/vnd.kubernetes.protobuf;stream=watch'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='*VersionedEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "0b694dd0a352bba3fe379fcc8fe1f06e", "score": "0.52346957", "text": "def watch_namespaced_pod_list_with_http_info(self, namespace, **kwargs):\n\n all_params = ['namespace', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_namespaced_pod_list\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `watch_namespaced_pod_list`\")\n\n\n collection_formats = {}\n\n resource_path = '/api/v1/watch/namespaces/{namespace}/pods'.replace('{format}', 'json')\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1WatchEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "title": "" }, { "docid": "55b830146ce03de064305f57c7fb18f3", "score": "0.52343476", "text": "def watch_namespaced_resource_quota_list(self, namespace, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.watch_namespaced_resource_quota_list_with_http_info(namespace, **kwargs)\n else:\n (data) = self.watch_namespaced_resource_quota_list_with_http_info(namespace, **kwargs)\n return data", "title": "" }, { "docid": "30f0d17def920003e22770377b485ff7", "score": "0.52245593", "text": "def watch_namespaced_pod_template_list(self, namespace, **kwargs):\n # verify the required parameter 'namespace' is set\n if namespace is None:\n raise ValueError(\"Missing the required parameter `namespace` when calling `watch_namespaced_pod_template_list`\")\n\n all_params = ['namespace', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_namespaced_pod_template_list\" % key\n )\n params[key] = val\n del params['kwargs']\n\n resource_path = '/api/v1/watch/namespaces/{namespace}/podtemplates'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf', 'application/vnd.kubernetes.protobuf;stream=watch'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='*VersionedEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "ca513db891a2398b7dee8121f50bbd41", "score": "0.5222923", "text": "def test_list_authorization_openshift_io_v1_policy_binding_for_all_namespaces(self):\n pass", "title": "" }, { "docid": "9da1493c8307896cd00d32a9bcfb84d6", "score": "0.521373", "text": "def watch_secret_list_for_all_namespaces_with_http_info(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_secret_list_for_all_namespaces\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n collection_formats = {}\n\n resource_path = '/api/v1/watch/secrets'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1WatchEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "title": "" }, { "docid": "3bcfd8815fab77e917d7c651543a4aae", "score": "0.52102184", "text": "def ingress(\n self,\n value: typing.Union[typing.List[\"NetworkPolicyIngressRule\"], typing.List[dict]],\n ):\n cleaned: typing.List[NetworkPolicyIngressRule] = []\n for item in value:\n if isinstance(item, dict):\n item = typing.cast(\n NetworkPolicyIngressRule,\n NetworkPolicyIngressRule().from_dict(item),\n )\n cleaned.append(typing.cast(NetworkPolicyIngressRule, item))\n self._properties[\"ingress\"] = cleaned", "title": "" }, { "docid": "fe7a2797e8153695ea6f21536e0b6ff3", "score": "0.5199394", "text": "def watch_namespaced_secret_list(self, namespace, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.watch_namespaced_secret_list_with_http_info(namespace, **kwargs)\n else:\n (data) = self.watch_namespaced_secret_list_with_http_info(namespace, **kwargs)\n return data", "title": "" }, { "docid": "5b23b817f802bde0141f143f45167480", "score": "0.5193513", "text": "def watch_namespaced_service_list_with_http_info(self, namespace, **kwargs):\n\n all_params = ['namespace', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_namespaced_service_list\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `watch_namespaced_service_list`\")\n\n\n collection_formats = {}\n\n resource_path = '/api/v1/watch/namespaces/{namespace}/services'.replace('{format}', 'json')\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1WatchEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "title": "" }, { "docid": "37001efa5376eabf97591ba349df2cfa", "score": "0.519055", "text": "def test_make_pod_ingress_resources_with_https_tls_secret_name(self) -> NoReturn:\n config = {\n \"site_url\": \"https://mongodb-exporter\",\n \"cluster_issuer\": \"\",\n \"ingress_whitelist_source_range\": \"\",\n \"tls_secret_name\": \"secret_name\",\n }\n app_name = \"mongodb-exporter\"\n port = 9216\n\n expected_result = [\n {\n \"name\": f\"{app_name}-ingress\",\n \"annotations\": {},\n \"spec\": {\n \"rules\": [\n {\n \"host\": app_name,\n \"http\": {\n \"paths\": [\n {\n \"path\": \"/\",\n \"backend\": {\n \"serviceName\": app_name,\n \"servicePort\": port,\n },\n }\n ]\n },\n }\n ],\n \"tls\": [\n {\"hosts\": [app_name], \"secretName\": config[\"tls_secret_name\"]}\n ],\n },\n }\n ]\n\n pod_ingress_resources = pod_spec._make_pod_ingress_resources(\n config, app_name, port\n )\n\n self.assertListEqual(expected_result, pod_ingress_resources)", "title": "" }, { "docid": "46e1abb17cbdd98b7d339abb40f8400d", "score": "0.51852906", "text": "def ingress(self) -> typing.List[\"NetworkPolicyIngressRule\"]:\n return typing.cast(\n typing.List[\"NetworkPolicyIngressRule\"],\n self._properties.get(\"ingress\"),\n )", "title": "" }, { "docid": "972adea0168d72376685522f69ca98a6", "score": "0.51825815", "text": "def watch_namespaced_event_list(self, namespace, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.watch_namespaced_event_list_with_http_info(namespace, **kwargs)\n else:\n (data) = self.watch_namespaced_event_list_with_http_info(namespace, **kwargs)\n return data", "title": "" }, { "docid": "fb457613463ecaeccb0a61da5c2871eb", "score": "0.5163904", "text": "def watch_namespaced_pod_list(self, namespace, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.watch_namespaced_pod_list_with_http_info(namespace, **kwargs)\n else:\n (data) = self.watch_namespaced_pod_list_with_http_info(namespace, **kwargs)\n return data", "title": "" }, { "docid": "0840d1bb31c5d70f82256ab575c3e022", "score": "0.5161763", "text": "def list_namespace(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_namespace\" % key\n )\n params[key] = val\n del params['kwargs']\n\n resource_path = '/api/v1/namespaces'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='V1NamespaceList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "de581d5d6279535de82d9aa9f967aea4", "score": "0.5149442", "text": "def list_namespaced_service(self, namespace, **kwargs):\n # verify the required parameter 'namespace' is set\n if namespace is None:\n raise ValueError(\"Missing the required parameter `namespace` when calling `list_namespaced_service`\")\n\n all_params = ['namespace', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_namespaced_service\" % key\n )\n params[key] = val\n del params['kwargs']\n\n resource_path = '/api/v1/namespaces/{namespace}/services'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='V1ServiceList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "20cce0ddd1332c84bc54aa491d5016c0", "score": "0.513326", "text": "def list_namespace(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_namespace\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/api/v1/namespaces'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1NamespaceList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "e261b47422a66d1d8b1ce3d10a657229", "score": "0.5123237", "text": "def watch_namespaced_endpoints(self, namespace, name, **kwargs):\n\n all_params = ['namespace', 'name', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_namespaced_endpoints\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `watch_namespaced_endpoints`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `watch_namespaced_endpoints`\")\n\n resource_path = '/api/v1/watch/namespaces/{namespace}/endpoints/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf', 'application/vnd.kubernetes.protobuf;stream=watch'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='Model2AversionedEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "e9dd0a5dd5dbfebd7ebc2814a5cd9687", "score": "0.51156384", "text": "def test_list_extensions_v1beta1_network_policy_for_all_namespaces(self):\n pass", "title": "" }, { "docid": "532d4ad188432038db93b7efd3fc06b8", "score": "0.5113122", "text": "def list_namespaced_event_3(self, namespace, **kwargs):\n # verify the required parameter 'namespace' is set\n if namespace is None:\n raise ValueError(\"Missing the required parameter `namespace` when calling `list_namespaced_event_3`\")\n\n all_params = ['namespace', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_namespaced_event_3\" % key\n )\n params[key] = val\n del params['kwargs']\n\n resource_path = '/api/v1/namespaces/{namespace}/events'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='V1EventList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "4e9c9f78e8c198986fa59d283fe4756f", "score": "0.51077914", "text": "def watch_namespaced_service_list(self, namespace, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.watch_namespaced_service_list_with_http_info(namespace, **kwargs)\n else:\n (data) = self.watch_namespaced_service_list_with_http_info(namespace, **kwargs)\n return data", "title": "" }, { "docid": "0a33abe8307cf19e8012a1740c18e885", "score": "0.5093219", "text": "def list_namespaced_pod(self, namespace, **kwargs):\n # verify the required parameter 'namespace' is set\n if namespace is None:\n raise ValueError(\"Missing the required parameter `namespace` when calling `list_namespaced_pod`\")\n\n all_params = ['namespace', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_namespaced_pod\" % key\n )\n params[key] = val\n del params['kwargs']\n\n resource_path = '/api/v1/namespaces/{namespace}/pods'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='V1PodList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "ff99312d1a5854a8434daa100b679b99", "score": "0.5072192", "text": "def test_watch_extensions_v1beta1_replica_set_list_for_all_namespaces(self):\n pass", "title": "" } ]
cb911eb2f6a94538cd22adcc20af829d
Decide if a token in a document is an valid English word. For this project, we define valid English words to be ASCII strings that contain only letters (both upper and lower case), single quotes ('), double quotes ("), and hyphens (). Double quotes may only appear at the beginning or end of a token unless the beginning/end of a token is noletter characters. Double quotes cannot appear in the middle of letter characters. (for example, "work" is valid, but home"work and bi"cycle are not). Tokens cannot be empty.
[ { "docid": "92b23a3d6bc9c240d5283674c2d064b9", "score": "0.7346011", "text": "def is_english_word(word):\n return all([char in [\"\\\"\", \"\\'\", \"-\", \"*\", \"~\", \"*\", \".\"] or char.isalpha() for char in word])", "title": "" } ]
[ { "docid": "87d892bc625e0032a7064acad6b543c7", "score": "0.70654637", "text": "def valid_english_word(word):\n return word.isalpha()", "title": "" }, { "docid": "cd0ddf0ce14e543b39777531f88283f5", "score": "0.70102686", "text": "def _is_tokenized(word):\n return not ((word.endswith(\".\") or word.endswith(\",\")) and\n word[:-1].isalpha())", "title": "" }, { "docid": "41d7d6def459d79fca29d53d01e24c0b", "score": "0.69928014", "text": "def _valid_word(self, word):\n if word[0].isalpha() and len(word) > 1:\n return True\n return False", "title": "" }, { "docid": "d514d86ef5f5ec033cdaca2fb09a374e", "score": "0.698032", "text": "def _is_special_token(word: str) -> bool:\n return (word == PAD_TOKEN\n or word == START_TOKEN\n or word == END_TOKEN\n or word == UNK_TOKEN)", "title": "" }, { "docid": "55c8c5def1150870944e7bdeaa0f066e", "score": "0.69312084", "text": "def check_valid_word(word):\n if not word:\n return False\n if word.isalpha():\n return True\n for char in word:\n if not(char.isalpha() or char == \"'\"):\n return False\n return True", "title": "" }, { "docid": "b58c0b945f78a5d09fcc8635ec4799b6", "score": "0.6873664", "text": "def _is_word(lemma) -> bool:\n return '_' not in lemma.name()", "title": "" }, { "docid": "415a22f96f3528692a2ba3676ce9ab34", "score": "0.6836172", "text": "def is_valid_token(token):\n # is_stop_word = token in stops\n # is_puctuation = token in string.punctuation\n # is_number_or_punc = reduce(lambda x, y: x and (y in string.digits or y in string.punctuation), token, True)\n # return not (is_stop_word or is_puctuation or is_number_or_punc)\n return True", "title": "" }, { "docid": "bf0fbac9c7dd409b76336e342f8d79a9", "score": "0.6834045", "text": "def is_token_allowed(token):\n if not token or not token.string.strip() or token.is_stop or token.is_punct:\n return False\n return True", "title": "" }, { "docid": "e14759eceb23bfa68632768ce2c9eeba", "score": "0.6800205", "text": "def is_word_english(word):\n return _is_word_english(word)", "title": "" }, { "docid": "ff4ea38dc0d759d7849eaa7a0d9a6b09", "score": "0.6794934", "text": "def is_word(self, token_id: int) -> bool:\n if token_id < 4:\n return False\n if token_id >= len(self):\n return True # OOV is assumed to be words\n token_str = self.index2word[token_id]\n if not word_detector.search(token_str) or token_str == \"<P>\":\n return False\n return True", "title": "" }, { "docid": "59c52650aabfc980fa96c4d8426140b1", "score": "0.67856336", "text": "def check_spell(token, dictionary):\n if len(token) == 0:\n return True\n if token.lower() not in dictionary:\n return False\n else: \n return True", "title": "" }, { "docid": "21ffc542d5e59f472ad1ca6142268ef2", "score": "0.67705095", "text": "def is_word(self, token_id: int) -> bool:\n if token_id < 4: return False\n if token_id >= len(self): return True # OOV is assumed to be words\n token_str = self.index2word[token_id]\n if not word_detector.search(token_str) or token_str == '<P>':\n return False\n return True", "title": "" }, { "docid": "1a0887beddf0b0ff7f6b5673ba93e3b2", "score": "0.6699155", "text": "def check_token(self, token):\n return self.__spell_checker__.check(token) or self.__spell_checker__.check(token.capitalize())", "title": "" }, { "docid": "cbc7a57211512b0d27f2b6b8471aee35", "score": "0.66884655", "text": "def is_valid_language_word(word, language):\n valid_chars = PlainText.get_language_charset(language)\n invalid_chars = PlainText.get_language_exclude_charset(language)\n\n for char in word.strip():\n if (valid_chars and not re.search(valid_chars, char)) or (invalid_chars and re.search(invalid_chars, char)):\n return False\n\n return True", "title": "" }, { "docid": "138a38b05d14187d566838c552b17ce8", "score": "0.6676555", "text": "def _is_normal_word(word: str) -> bool:\n if _word_is_chord(word):\n return False\n if len(word) > 2 and _contains_any_of(word[1:-1], '!#$%&()*+,-./:;<=>?@[\\\\]^_{|}~'):\n return False\n return True", "title": "" }, { "docid": "c04bf5b1d4e4bf8b8873938f9b6bf728", "score": "0.664389", "text": "def valid_token(token: str) -> bool:\n return token.is_alpha and \\\n not (token.is_space or token.is_punct or token.like_num)", "title": "" }, { "docid": "e834bbb423d1b4064a89c610c24a631c", "score": "0.66392624", "text": "def _is_english(self, word: str) -> bool:\n flag = True\n for c in word:\n if 'a' <= c <= 'z' or 'A' <= c <= 'Z' or c == '#':\n continue\n else:\n flag = False\n break\n return flag", "title": "" }, { "docid": "2ff75e0c845b990d61ce40b86280065c", "score": "0.6544322", "text": "def has_any_word(text):\n \n lower_text = text.lower()\n\n split_words = lower_text.split(' ')\n \n text_words = map(remove_ponctuation, split_words)\n \n lemm_words = map(lemmatize_word, text_words)\n \n return any(w in valid_words for w in lemm_words)", "title": "" }, { "docid": "3734784a5f7fd10b1cb0fe4a4aed4b5d", "score": "0.65316695", "text": "def is_English_Words(self, data):\n\n words = data.split()\n is_English_Words = True\n if len(words)==0:\n is_English_Words = False\n for word in words:\n if word not in self.english_Words:\n is_English_Words = False\n break\n return is_English_Words", "title": "" }, { "docid": "cee2afcf3924a74ba8994795fe201c9d", "score": "0.652493", "text": "def check_word(self, word):\n\t\treturn self.common_alph.check_word(word)", "title": "" }, { "docid": "c930528079195078d4cd1ba82e68dded", "score": "0.6487436", "text": "def get_tok_word(self):\r\n line = self.line\r\n if len(line) == 0:\r\n return False\r\n ib = 0\r\n iend = ib\r\n tok_str = \"\"\r\n ch = line[ib]\r\n if not re.match(r'[a-zA-Z_]', ch):\r\n return False\r\n \r\n tok_str = ch\r\n iend += 1 \r\n while iend < len(line):\r\n ch = line[iend]\r\n if not re.match(r'\\w', ch):\r\n break # end of word\r\n tok_str += ch\r\n iend += 1\r\n self.tok = SelectStreamToken(SelectStreamToken.WORD,\r\n str=tok_str)\r\n self.line = line[iend:]\r\n return True", "title": "" }, { "docid": "5099bf8a5bb537cbd2109e9e8c2fc25a", "score": "0.64777076", "text": "def validate_lang_name(word):\n if re.match(\"^[\\w]*$\", word):\n return True\n else:\n return False", "title": "" }, { "docid": "92972b6a726640b1d15dea0583dfec06", "score": "0.6461911", "text": "def is_multiword(self):\n return len(self.get_tokens()) > 1", "title": "" }, { "docid": "1e4815655ee0db36d26c325fea7690d2", "score": "0.6444443", "text": "def IS_WORD(word):\n isword = 1\n for w in word.split():\n if(wn.synsets(w)):\n isword *= 1\n else:\n isword *= 0\n return isword", "title": "" }, { "docid": "bb9b179ddd88e2322e85f206a36d8582", "score": "0.64420736", "text": "def check_vocab(input_word):\n # ... replace(\"char you allow\",\"char you don t allow\")\n invalid_chars = set(string.punctuation.replace(\"\", \"’ -'´`\"))\n if any(char in invalid_chars for char in input_word):\n return False\n elif len(input_word) < 5:\n return False\n else:\n return True", "title": "" }, { "docid": "ffd6cc70330d1d6075bbad82c238d21b", "score": "0.64248383", "text": "def test_returns_error_if_illegal_word_used(self):\n self.assertEqual(is_violation(\"one two\", \"three swift\"), ILLEGAL_WORD)", "title": "" }, { "docid": "7a3eec4f2f04b1e09da202e56b3cc05a", "score": "0.6401365", "text": "def valid_str(self, word:str) -> str:\n # check if the word only contain alphabetic characters\n if word.isalpha():\n # check if the word is in self.lines which is a list of words from the dictionary file\n if word in self.lines:\n return True\n else:\n raise ValueError(\"Sorry, this word is not in the dictonary file.\")\n else:\n raise ValueError(\"Invalid word. The word must only conatin alphabetic letters.\")", "title": "" }, { "docid": "1508e968e9e4dc90a43dfc5b3b32ff38", "score": "0.6381459", "text": "def validate_string(self, word):\n if type(word) != str:\n return False\n\n for x in word:\n if x not in self.machine[Af.ALPHABET]:\n return False\n return True", "title": "" }, { "docid": "8d0bbc86adee19e82be60f54b3c7e61c", "score": "0.63564324", "text": "def _has_apostrophe(token):\n \n if \"'\" in token:\n return True\n return False", "title": "" }, { "docid": "7d27a1e19e418ffc7dd66769cdc5eb8c", "score": "0.6352375", "text": "def is_content_word(word):\n return word.lower() not in STOPLIST and word[0].isalpha()", "title": "" }, { "docid": "08a54302d6d44018020aab1e8d802982", "score": "0.6311917", "text": "def is_word(input_string: str) -> bool:\n return WORD_RE.match(input_string) is not None", "title": "" }, { "docid": "acdb73617a96b026c5926554c6dfa1d4", "score": "0.63048327", "text": "def check_pretty_word(self, word):\n\t\tpunctuation = [\".\", \":\", \";\", \"!\", \"?\", \"*\", \"(\", \")\", \",\", \"'\", \"/\", \"*\", \"$\", \"#\", \"@\"]\n\t\tif len(word) < 2: return False\n\t\tfor char in word:\n\t\t\tif char in punctuation: return False\n\t\treturn True", "title": "" }, { "docid": "a722f8fece5c758f5bd3d2c9ef950a00", "score": "0.62836367", "text": "def check_valid_word(shorthand, word):\n if shorthand.startswith(\"vi_\"):\n return True if len(word.split(\" \")) > 1 and any(map(str.isalpha, word)) and not any(map(str.isdigit, word)) else False\n elif shorthand.startswith(\"th_\"):\n return True if len(word) > 1 and any(map(pattern_thai.match, word)) and not any(map(str.isdigit, word)) else False\n else:\n return True if len(word) > 1 and any(map(str.isalpha, word)) and not any(map(str.isdigit, word)) else False", "title": "" }, { "docid": "a2a48c08a8760dd5abf2b677040d1102", "score": "0.627865", "text": "def is_token(token):\n if len(token) == 0:\n return False\n if not token.isprintable():\n return False\n if \" \" in token:\n return False\n return True", "title": "" }, { "docid": "114b2174b600f7f57dce92c1c84d4b31", "score": "0.62563837", "text": "def is_relevant_concept(word):\n return len(word.split('_')) < data_settings.MAX_UNDERSCORES_ALLOWED + 2", "title": "" }, { "docid": "f19bdf96fbccbb8139a7dec6bc377aea", "score": "0.6251212", "text": "def words_only(token: Token) -> None:\n if not token.is_alpha:\n token._.ppt_output = token.whitespace_", "title": "" }, { "docid": "f381486c628584b394897d63a91158b5", "score": "0.62479776", "text": "def _is_word(self, word):\n return self.value == word", "title": "" }, { "docid": "d9667f3161d0508a9f580934db93e7fb", "score": "0.62412244", "text": "def __is_valid_word(self, word, hand):\n\n if WILDCARD in word:\n for vowel in VOWELS:\n temp_word = re.sub('[*]', vowel, word)\n if temp_word in self.__get_all_words():\n return True\n\n letters_word = self.__get_frequency_dict(word)\n\n if word in self.__get_all_words():\n for key, value in letters_word.items():\n if key not in hand or value > hand[key]:\n return False\n return True\n return False", "title": "" }, { "docid": "52a881097fdc93b98b902fa2ba49985c", "score": "0.62406754", "text": "def legal_word(word, words, start_word = None):\r\n if word not in words:\r\n return False\r\n elif start_word == None or len(start_word) == len(word):\r\n for i in word:\r\n if not i.isalpha():\r\n return False\r\n elif len(start_word) != len(word):\r\n return False\r\n return True", "title": "" }, { "docid": "fddcf96cacb12cacee1bff61723efd10", "score": "0.62234336", "text": "def is_valid_word(word, hand, word_list):\r\n\r\n words = []\r\n word = word.lower()\r\n lst_word = list(word)\r\n # checks if * is in word\r\n if '*' in lst_word:\r\n position = lst_word.index('*')\r\n for letter in VOWELS:\r\n lst_word[position] = letter\r\n words.append(\"\".join(lst_word))\r\n else:\r\n words.append(word)\r\n\r\n # checks if possible words in word_list\r\n found = False\r\n for i in words:\r\n if i in word_list:\r\n found = True\r\n\r\n if not found:\r\n return False\r\n\r\n # checks if dictionary isn't empty\r\n word_d = get_frequency_dict(word)\r\n for letter, freq in word_d.items():\r\n if freq > hand.get(letter, 0):\r\n return False\r\n\r\n return True", "title": "" }, { "docid": "afa47f9e1d960796e51f934d543291a6", "score": "0.6220782", "text": "def test_multiword_construction():\n token_line = '8-9\tdu\t_\t_\t_\t_\t_\t_\t_\t_'\n token = Token(token_line)\n\n assert_token_members(token, '8-9', 'du', None, None, None, {}, None, None,\n {}, {})\n assert token.is_multiword()", "title": "" }, { "docid": "d0299c9a464f1bed30d78288320794be", "score": "0.621582", "text": "def test_check_input_false_words_space():\n from trie import Trie\n trie = Trie()\n token = 'my computer'\n assert trie._check_token(token) is False", "title": "" }, { "docid": "6a4ed4639084e7bc52445f3653892245", "score": "0.62149763", "text": "def tokenize(self, is_word=True):", "title": "" }, { "docid": "e9607d29c5cdd6bbc4bb97bd6edb7c5d", "score": "0.62000275", "text": "def test_all_documents_contain_words(path_pp_ling_corpus: str,\n path_pp_token_corpus: str,\n path_pp_lemma_corpus: str\n ) -> None:\n for i, doc in enumerate(get_docs(path_pp_ling_corpus,\n sent_tokenized=False,\n word_tokenized=False)):\n if len(doc) == 0:\n raise Exception('Document {} is empty.'.format(i))\n for i, doc in enumerate(get_docs(path_pp_token_corpus,\n sent_tokenized=False,\n word_tokenized=False)):\n if len(doc) == 0:\n raise Exception('Document {} is empty.'.format(i))\n for i, doc in enumerate(get_docs(path_pp_lemma_corpus,\n sent_tokenized=False,\n word_tokenized=False)):\n if len(doc) == 0:\n raise Exception('Document {} is empty.'.format(i))", "title": "" }, { "docid": "8f0997a8ea10200ad2e4c9acdf43b65a", "score": "0.6192079", "text": "def is_word(letters):\n return \"\".join(letters) in WORD_LIST", "title": "" }, { "docid": "df96165aa5b9e12e8bc1c36626f5b583", "score": "0.6188987", "text": "def is_word(string):\n if wn.synsets(string):\n return True", "title": "" }, { "docid": "6bef91c86dd992921ea7a2fcc7a83186", "score": "0.61882037", "text": "def is_word_adjective(self, word):", "title": "" }, { "docid": "bf5ce4f44c27e37dfd864c11e0e0232e", "score": "0.6188192", "text": "def _validate_word(self):\n valid_word = word_verification.validate_word_syntax(self._word)\n if valid_word:\n return valid_word\n else:\n logger.error(f'The word {self._word} was not in a valid format.')\n logger.error(f'Please verify that the word {self._word} is spelled correctly.')", "title": "" }, { "docid": "a4f1c7c55506f09285cfb57c431876e7", "score": "0.61811125", "text": "def is_word(state):\n if state.letter_sequence in words:\n return True\n return False", "title": "" }, { "docid": "592f5cdbf3b28694da14a81d1c721244", "score": "0.6174786", "text": "def validWords(self):\r\n for item in self.words:\r\n if item.getWord().strip() == \"\" or item.getWord() is None \\\r\n or item.meaningsToText().strip() == \"\" or item.meaningsToText() is None:\r\n return False\r\n return True", "title": "" }, { "docid": "41ced01c464b2d03a1157d8fdbff4e64", "score": "0.6174454", "text": "def check_if_real_word(word):\n for char in word:\n if char not in ALPHABET:\n return False\n break\n return True", "title": "" }, { "docid": "8357a0859f3c5b2104fccaf1ec861272", "score": "0.6169185", "text": "def _is_start_of_word(text):\n first_char = text[0]\n return bool(_is_control(first_char) | _is_punctuation(first_char) | _is_whitespace(first_char))", "title": "" }, { "docid": "7d7b5b72faa5ea0ab792a1379ae4837c", "score": "0.6165215", "text": "def validate_word(word):\n word = word.replace('\\n', '')\n for letter in word:\n if letter not in rusalph:\n return False\n return True", "title": "" }, { "docid": "b0199e048785c4f2ec02b57e9e3ab397", "score": "0.6159575", "text": "def check_token(token):\n assert token.find(\" \") == -1, \"Token %s is not valid\" % token", "title": "" }, { "docid": "2c52c72af31ebf4309c83cc4b1958a6d", "score": "0.6147621", "text": "def check_token(token, stopwords_set, punctuation_set):\n punctuation = True\n for character in token:\n if character not in punctuation_set:\n punctuation = False\n break\n\n if punctuation:\n return False\n\n if token in stopwords_set:\n return False\n\n return True", "title": "" }, { "docid": "b10e8b265a6ac8a1938a5d1c9f6ef7d4", "score": "0.6134718", "text": "def is_wordtag(self, word):\n # some words\n if word in (u'ابن', u'بن', u'أبو', u'أبا', \\\n u'أبي', u'عبد', u'عبيد', u'بنو', u'بني', u'بنت'):\n return True\n if self.is_proper_noun(word):\n return True\n return False", "title": "" }, { "docid": "83b364447a166d7c6c9c657e15e2ea78", "score": "0.612661", "text": "def __at_least_one_token_known(doc):\n one_good = False\n for token in doc:\n if not token.is_oov:\n one_good = True\n return one_good", "title": "" }, { "docid": "e78cb9b4ad24caaba53ae1fe5523ad92", "score": "0.6123923", "text": "def isWord(wordList, word):\n\tword = word.lower()\n\tword = word.strip(\" !@#$%^&*()-_+={}[]|\\\\:;'<>?,./\\\"\")\n\treturn word in wordList", "title": "" }, { "docid": "79a41f0a4f25158a98731eea72eee7c6", "score": "0.6122794", "text": "def is_nsw(token):\n return token.lower() not in vn_words_dict", "title": "" }, { "docid": "d4d292b7be60a015aea5d17d0b6e56b8", "score": "0.6114984", "text": "def isWord(wordList, word):\n word = word.lower()\n word = word.strip(\" !@#$%^&*()-_+={}[]|\\\\:;'<>?,./\\\"\")\n return word in wordList", "title": "" }, { "docid": "f5912cc627437ff6e792d7dd19ce8abb", "score": "0.61086595", "text": "def is_word_noun(self, word):", "title": "" }, { "docid": "e0e6f7553bb5389826a6fdb2b2b8d96a", "score": "0.6103704", "text": "def is_word(wordlist, word):\n word = word.lower()\n word = word.strip(\" !@#$%^&*()-_+={}[]|\\:;'<>?,./\\\"\")\n return word in wordlist", "title": "" }, { "docid": "4e9e0e76afb66bb5825871471509a590", "score": "0.6087732", "text": "def _english_words_without_homophones(self):\n global _no_homophones_list\n match = bool(self._word in _no_homophones_list)\n if match:\n return f'no homophones for {self._word}'", "title": "" }, { "docid": "17c13758643b4a5d26fc7dbd679a091c", "score": "0.60860276", "text": "def is_bad_word(word):\n return word.lower() in BAD_WORDS", "title": "" }, { "docid": "52f0342b5ad6c6cdab0ef5b60a463c42", "score": "0.6075904", "text": "def is_wordtag(self, word):\n key = word\n # some words must have WAW prefix\n if key in plconst.PLACE_WORDS:\n return True\n return False", "title": "" }, { "docid": "778da1558fa85be03186d3ce0ccafbc2", "score": "0.6067894", "text": "def is_valid_word(self, word):\n\n is_valid = self.parse_word(word)\n if is_valid == '':\n return 'VALID'\n else:\n log.error('Method parse_word returned a remainder for word %s. '\n 'Valid words do not return remainders. Returned \"%s\"!'\n % (word, is_valid))\n return 'INVALID'", "title": "" }, { "docid": "88d36abb3fd34750629906979cbd73dd", "score": "0.60623544", "text": "def check_if_expressible(word, allowed_vertices=allowed_vertices, allowed_tokens=allowed_tokens):\n if not set(word).issubset(allowed_tokens):\n # input word contains tokens not found in graph of allowed pairs.\n allowed = False\n else:\n if len(word) > 1:\n # need to check pairs of tokens\n allowed =True\n first_letter = word[0]\n for second_letter in word[1:]:\n if second_letter not in next_letter_dict[first_letter]:\n allowed = False\n break\n first_letter = second_letter\n else:\n allowed = True\n return allowed", "title": "" }, { "docid": "61ab938861a0104badda8f7f5d1c95b6", "score": "0.6061405", "text": "def check_word(self, item: str) -> bool:\n if item == \"\":\n return False\n if item in self.words:\n return True\n sanitized = sanitize(item, self.punctuation, self.clitic_markers)\n if sanitized in self.words:\n return True\n\n sanitized = self.split_clitics(sanitized)\n if all(s in self.words for s in sanitized):\n return True\n return False", "title": "" }, { "docid": "5771959f2fbdd0c3a7b9a92c3ea3c2b6", "score": "0.6038558", "text": "def is_special(word: str) -> bool:\n is_keyword = keywords.get(word, None)\n is_delimiter = delimiters.get(word, None)\n is_operator = operators.get(word, None)\n return is_operator is not None or is_keyword is not None or is_delimiter is not None", "title": "" }, { "docid": "1ad6ea888bf74eaeceba76e9a95a1f76", "score": "0.60321414", "text": "def process_word(self, word):\n\n word = re.sub(r'[^\\w\\s]', '', word)\n word = word.lower().strip()\n\n if word.isdigit() or word in string.punctuation or word in self.stopwords or word in self.ignore_words:\n return False\n\n try:\n index = self.word2index[word]\n except KeyError:\n lemmatized_word = self.lemmatizer.lemmatize(word)\n\n try:\n index = self.word2index[lemmatized_word]\n except KeyError:\n return False\n\n return index", "title": "" }, { "docid": "0781759abe4d7e8fa32c1dd80dbc7791", "score": "0.6032136", "text": "def contains_any_from_tokens(words_lower, token_list):\n for word in words_lower:\n if word in token_list:\n print \"[User-2-Follow] Description contains word: \",word\n return True\n return False", "title": "" }, { "docid": "3f58ec9021c7a077b76d991f0ecc70fd", "score": "0.600279", "text": "def isValid(text):\n global WORDS\n regex = \"\\b(\" + \"|\".join(WORDS) + \")\\b\"\n return bool(re.search(regex, text, re.IGNORECASE))", "title": "" }, { "docid": "6191622ff34338ba5d0aa0dbac637c80", "score": "0.60006994", "text": "def bad_word():", "title": "" }, { "docid": "ea7afd47e7945115d2cb6bf9434bb947", "score": "0.59948444", "text": "def allow_word(word):\n exclude_list = [\"a\",\"able\",\"about\",\"across\",\"after\",\"all\",\"almost\",\"also\",\"am\",\"among\",\"an\",\"and\",\"any\",\"are\",\"as\",\"at\",\"be\"\n ,\"because\",\"been\",\"but\",\"by\",\"can\",\"cannot\",\"could\",\"dear\",\"did\",\"do\",\"does\",\"either\",\"else\",\"ever\",\"every\"\n ,\"for\",\"from\",\"get\",\"got\",\"had\",\"has\",\"have\",\"he\",\"her\",\"hers\",\"him\",\"his\",\"how\",\"however\",\"i\",\"if\",\"in\",\"into\",\"is\"\n ,\"it\",\"its\",\"just\",\"least\",\"let\",\"like\",\"likely\",\"may\",\"me\",\"might\",\"most\",\"must\",\"my\",\"neither\",\"no\",\"nor\"\n ,\"not\",\"of\",\"off\",\"often\",\"on\",\"only\",\"or\",\"other\",\"our\",\"own\",\"rather\",\"said\",\"say\",\"says\",\"she\",\"should\"\n ,\"since\",\"so\",\"some\",\"than\",\"that\",\"the\",\"their\",\"them\",\"then\",\"there\",\"these\",\"they\",\"this\",\"tis\",\"to\",\"too\"\n ,\"twas\",\"us\",\"wants\",\"was\",\"we\",\"were\",\"what\",\"when\",\"where\",\"which\",\"while\",\"who\",\"whom\",\"why\",\"will\",\"with\"\n ,\"would\",\"yet\",\"you\",\"your\"]\n if word in exclude_list:\n return False\n else:\n return True", "title": "" }, { "docid": "592c611c569f22dc781b9778d27105b3", "score": "0.5981622", "text": "def isValid(text):\n return bool(re.search(r'\\bNEW WORD\\b', text, re.IGNORECASE))", "title": "" }, { "docid": "e8c8cdf54e508f9abc1d56929e0a7bda", "score": "0.5981377", "text": "def test_returns_false_if_string_does_not_contain_illegal_word(self):\n string = \"one two three\"\n\n self.assertFalse(illegal_words(string))", "title": "" }, { "docid": "e333a98231636ed84d27e0a96ec56ddf", "score": "0.5973188", "text": "def _check_word_and_translate_not_empty(self):\n if self.user_input['translate'] == \"\" or self.user_input['word'] == \"\":\n return False\n return True", "title": "" }, { "docid": "697273612ada0fa23e36befa6a6bd1b4", "score": "0.59643775", "text": "def test_check_input_true_with_apostrophe():\n from trie import Trie\n trie = Trie()\n token = \"computer's\"\n assert trie._check_token(token)", "title": "" }, { "docid": "a64f5d97a765d5104d9ab4326619e8b0", "score": "0.5953084", "text": "def word_checker(message):\n wordToUse = \"\"\n rightMessage = False\n \n for letter in message:\n if letter == \" \":\n if wordToUse in [\"the\", \"be\", \"to\", \"of\", \"and\", \"in\", \"that\", \"have\", \"it\", \"for\", \"an\", \"well\", \"only\", \"even\", \"back\", \"is\"]:\n rightMessage = True\n wordToUse = \"\"\n else:\n wordToUse = \"\"\n elif letter == \".\": #For every letter which is a fullstop add a fullstop to the plaintext\n pass\n else:\n wordToUse = wordToUse + letter\n return rightMessage", "title": "" }, { "docid": "e8da633f340664216ad4f5c5a626c295", "score": "0.5949241", "text": "def is_word_verb(self, word):", "title": "" }, { "docid": "73f79af6dc5ab7a73ac03891b8ffaa31", "score": "0.59477276", "text": "def _should_filter_token(self, token, filtered_words):\n reason = None\n\n # # Keep roots\n # if token[\"is_root\"]:\n # return False, reason\n\n # Filter stop words\n if token[\"is_stop\"]:\n reason = \"stop word\"\n return True, reason\n\n # Filter bogus labels\n if \":\" not in token[\"label\"]:\n reason = \"invalid label, does not contain ':'\"\n return True, reason\n\n # Filter bogus lemma\n if len(token[\"lemma\"]) == 1:\n reason = \"lemma too short\"\n return True, reason\n\n # Filter punctuation\n if token[\"is_punct\"]:\n reason = \"is punctation\"\n return True, reason\n\n # Filter \"who/what...\" etc\n if \"wp\" in token[\"tag_\"]:\n reason = \"tag contains wp\"\n return True, reason\n\n # Filter spaces\n if \"_sp\" in token[\"tag_\"]:\n reason = \"tag contains _sp\"\n return True, reason\n\n # Filter pronouns\n if token[\"lemma\"] == \"-PRON-\":\n reason = \"lemma is -PRON-\"\n return True, reason\n\n # Must be a consumed tag\n if token[\"pos_\"] not in CONSUMED_TAGS:\n reason = f\"pos_ {token['pos_']} not in consumed tags: {CONSUMED_TAGS}\"\n return True, reason\n\n # Filter quotes\n if token[\"text\"] == \"’\" or token[\"lemma\"] == \"’\":\n reason = \"text or lemma is quote string\"\n return True, reason\n\n label = token[\"label\"]\n if label in filtered_words:\n reason = f\"label: {label} is in filtered words: {filtered_words}\"\n return True, reason\n\n return False, reason", "title": "" }, { "docid": "d0176adb62d9486fce7f09bd7311491a", "score": "0.59466016", "text": "def has_token(node, token):\n return any(filter(lambda t: t.spelling == token, node.get_tokens()))", "title": "" }, { "docid": "957afdc17d152ca2420ec3fd705729a6", "score": "0.59428716", "text": "def is_valid_token(self, token):\n if token.like_url:\n return False\n if token.like_email:\n return False\n if token.is_stop:\n return False\n if token.text in self.custom_stop:\n return False\n\n return True", "title": "" }, { "docid": "c884ff40cd43ce39cf3aaf6b25e636e3", "score": "0.59418505", "text": "def _is_end_of_word(text):\n last_char = text[-1]\n return bool(_is_control(last_char) | _is_punctuation(last_char) | _is_whitespace(last_char))", "title": "" }, { "docid": "56bbe79cef70e65b4ef76c72bef30f44", "score": "0.5934477", "text": "def is_ok(tokenized, vocab, min_len, max_len):\n if len(tokenized)>max_len:\n return False\n elif len(tokenized)<min_len:\n return False\n for x in tokenized:\n if x not in vocab:\n return False\n return True", "title": "" }, { "docid": "fb1b38c6b3571a72597c0ee4776dc076", "score": "0.59292173", "text": "def test_returns_true_if_string_contains_illegal_word(self):\n string = \"one two swift\"\n\n self.assertTrue(string)", "title": "" }, { "docid": "1928ffcf5ff5fe8c0c91ecde05c9db18", "score": "0.59242547", "text": "def is_word_alpha(word: str) -> bool:\n\n result = search(r'^[A-Za-z]*$', word)\n if result is None:\n return False\n return True", "title": "" }, { "docid": "28f7a91faadbeabde60a50e214a21c9e", "score": "0.59124166", "text": "def is_word_tokenizer(cls, verbose=False):\n if not _is_word_tokenizer(cls, verbose=verbose):\n return False, None\n\n inputs = []\n output = kb.Seq[kb.Word]\n\n for input_type in [kb.Sentence]:\n try:\n X = DATA_TYPE_EXAMPLES[input_type]\n\n tokenizer = cls()\n y = tokenizer.tokenize(X)\n\n assert DATA_RESOLVERS[output](y)\n inputs.append(input_type)\n except Exception as e:\n if verbose:\n warnings.warn(str(e))\n\n inputs = combine_types(*inputs)\n\n if inputs:\n return True, (inputs, output)\n else:\n return False, None", "title": "" }, { "docid": "ae197e5ae2f91f7a4f4bab2a8318ac15", "score": "0.59101856", "text": "def token_validator(token):\n if ' ' in token:\n raise serializers.ValidationError('No whitespace allowed in token.')", "title": "" }, { "docid": "92abff828779583ce21f5ef24515f039", "score": "0.5907151", "text": "def is_valid_word(word, hand, word_list):\n\n if word in word_list:\n word_in_word_list = True\n else: \n word_in_word_list = False\n\n # cannot be put in the \"for char in word:\" loop down below because it's\n # only true if it's true for every char in the word\n word_in_hand = True\n\n # checks if each character used in the word is also in the hand and the\n # if a character is not used more often than it occurs in the hand\n for char in word:\n if char not in hand or word.count(char) > hand[char]:\n word_in_hand = False \n\n if word_in_word_list == False or word_in_hand == False:\n return False\n else: \n return True", "title": "" }, { "docid": "12fe2db30610726595683ae120dbecb2", "score": "0.59036905", "text": "def __is_keyword(word):\n if len(word) >= 3:\n if word[0] == '\\\\' and word[1].isalpha() and word[2].isalpha():\n return True\n return False", "title": "" }, { "docid": "b27406fd494f8f2815145390512607ff", "score": "0.59036094", "text": "def test_only_form_and_lemma():\n token_line = '10.1\tmicro-pays\tmicro-pays\t_\t_\t_\t_\t_\t_\t_\\n'\n token = Token(token_line)\n\n assert_token_members(token, '10.1', 'micro-pays', 'micro-pays', None, None,\n {}, None, None, {}, {})", "title": "" }, { "docid": "f80de3b11638be1feca0f9eb86c2ba43", "score": "0.5902309", "text": "def valid_word(s):\n pattern = \"[a-z0-9_]+\"\n return re.fullmatch(pattern=pattern, string=s)", "title": "" }, { "docid": "7e6298e96a03f5faf1daaed8185cd30b", "score": "0.58908266", "text": "def is_word(obj):\n try:\n return isinstance(obj, str) and len(obj.split()) == 1\n except:\n return False", "title": "" }, { "docid": "f8bda91ca323a82eceaeaafcd76c7949", "score": "0.588986", "text": "def validate_word(guess, game_language):\n # Get absolute file path\n absolute_path = str(Path(__file__).parents[4])\n file_path = absolute_path + '/assets/filtered_dictionaries/' + game_language + '.txt'\n\n # Check if word exists / Check if right grammar\n with open(file_path, 'r') as dictionary:\n guess = guess.lower()\n if guess not in dictionary.read():\n return 'Given word does not exist'\n return ''", "title": "" }, { "docid": "6b988bfc4e4f112853d07da731958225", "score": "0.58880055", "text": "def is_stopword(self, token):\n return token.lower() in self.stopwords", "title": "" }, { "docid": "703e364581ceb75455e1751d435af054", "score": "0.58856565", "text": "def has_no_e(word):", "title": "" }, { "docid": "a038e67a09e33c7bdc4efeec010d5fb8", "score": "0.586093", "text": "def tobemodified(tokens, dictionary):\n error_list = []\n for word in tokens:\n if word[:1] == '\\n':\n word= word[1:]\n \n if word[-1] in '!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~':\n word = word[:-1]\n \n if not check_spell(word, dictionary):\n error_list.append(word)\n return error_list", "title": "" }, { "docid": "d9bc2c5b2efc217075842acac77101b1", "score": "0.5853468", "text": "def word_check(word):\n if word in word_set:\n return True\n if word in self.clitic_set:\n return True\n if word in self.specials_set:\n return True\n return False", "title": "" }, { "docid": "5dfb34090fff5f868f1cb40ed076c795", "score": "0.58475816", "text": "def word_finder(vocab, word):\n doc = nlp(word)\n if doc[0].is_punct is False:\n try:\n result = vocab.word.str.contains(r'(?:\\s|^)' + word + '(?:\\s|$)').any()\n except:\n result = False\n if result:\n return True\n else:\n return False\n else:\n return False", "title": "" } ]
ddac9e7b5347d89c48b6db4ac68d0389
Fetch the product; return 404 or redirect as needed
[ { "docid": "db82d666c6839b6d9b68c65077befefc", "score": "0.65228117", "text": "def get(self, request, *args, **kwargs):\n self.object = get_obj(self.kwargs['slug'], Product)\n potential_redirect = redirect_if_necessary(request.path, self.object)\n\n if potential_redirect is not None:\n return potential_redirect\n\n self.kwargs['slug'] = self.object.slug\n return super(ProductDetailView, self).get(request, *args, **kwargs)", "title": "" } ]
[ { "docid": "e663e554f3aae5a892e1c8e614abc42b", "score": "0.69985074", "text": "def test_get_product_not_found(self):\n resp = self.app.get(\"/products/0\")\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)", "title": "" }, { "docid": "559f1e35e242076dc48f14f3d146007d", "score": "0.678645", "text": "def test_successful_get_specific_product(self):\n response = self.app.get('/product/1151')\n self.assertEqual(200, response.status_code)", "title": "" }, { "docid": "0977bb784e3c34db4f80928329583b36", "score": "0.6614696", "text": "def find_or_404(cls, customer_id,product_id):\n logger.info(\"Processing lookup or 404 for customer %s with product %s ...\", customer_id,product_id)\n return cls.query.get_or_404((customer_id,product_id))", "title": "" }, { "docid": "9d84c6561b6d20188ae823b91efd7d9d", "score": "0.6489885", "text": "def get(self, request, *args, **kwargs):\r\n raise Http404", "title": "" }, { "docid": "1beac2aa453ec18d51aad982d1460e70", "score": "0.64258575", "text": "def item_not_found():\n\n return make_response('Item not found.', 404)", "title": "" }, { "docid": "c8e69674b4330a17c15f41109cc0b329", "score": "0.63504493", "text": "def test_successful_get_product(self):\n response = self.app.get('/product')\n self.assertEqual(200, response.status_code)", "title": "" }, { "docid": "75dc11c0e03b2a71bc5ec2e8c8e14b5f", "score": "0.6336679", "text": "def test_for_fetching_a_specific_order_fails(self):\n response = self.client().get('/api/v1/products/1000')\n self.assertEqual(response.status_code, 404)\n self.assertIn(\"Product not found\", str(response.data))", "title": "" }, { "docid": "dec091fe72f15916cfbd18d62a70e806", "score": "0.6247141", "text": "def get(self):\n self.error(404)\n return", "title": "" }, { "docid": "98c138b643ac5bd3fd7e7becfa95a1bc", "score": "0.622798", "text": "def handle_404_error(e):\n return response('failed', 'Transactions resource cannot be found', 404)", "title": "" }, { "docid": "9478d0e503d028dcb1c30c8cd0b31e80", "score": "0.6203569", "text": "def test_raise_404_on_item_not_found(self):\n super(ItemEndpointTestPass, self).test_raise_404_on_item_not_found()", "title": "" }, { "docid": "57550b82c90f536bccd4a0555616b795", "score": "0.61958164", "text": "def get_or_404(self, *args, _message_404=None, **kwargs):\n try:\n return self.get(*args, **kwargs)\n except DoesNotExist:\n self._abort_404(_message_404)", "title": "" }, { "docid": "4adcaef92923709d6e5ea1b8e1e38c74", "score": "0.61111623", "text": "def test_raise_404_on_item_not_found(self):\n super(ItemEndpointTestFail, self).test_raise_404_on_item_not_found()", "title": "" }, { "docid": "be42f85d8317f7a33605d446473a4d5b", "score": "0.6031189", "text": "def get(self, request, *args, **kwargs):\n resp = super().get(request, *args, **kwargs)\n if self.object is None:\n return HttpResponseRedirect(reverse('gold_subscription'))\n return resp", "title": "" }, { "docid": "a31d6eaf0cb3870470c8559e201bbb4b", "score": "0.6026784", "text": "def page_not_found(e):\r\n return {'message': 'resource not found'}, 404", "title": "" }, { "docid": "9b0201ff0c506cc4f54df51d8c2785ab", "score": "0.5994425", "text": "async def get_or_404(record):\n if record.first():\n return record.first()\n else:\n raise HTTPException(404, 'Not Found')", "title": "" }, { "docid": "eb720976ab4c8012b8df7d1bfab58d1b", "score": "0.5967726", "text": "def find_or_404(self, pk):\n data = self.find(pk)\n return data if data else abort(404)", "title": "" }, { "docid": "b192d65fba5f0dcb15e59545874515c6", "score": "0.5956303", "text": "def get(self, request, **kwargs):\n item = self.get_object()\n correct_path = item.get_absolute_url() \n if correct_path != request.path:\n return HttpResponsePermanentRedirect(correct_path)\n \n response = super(ItemDetailView, self).get(request, **kwargs)\n \n # Send signal to record the view of this product\n product_signals.product_viewed.send(sender=self, product=item, user=request.user, request=request, response=response)\n return response;", "title": "" }, { "docid": "ec2a7954e9b452613b9a020e167a1c9a", "score": "0.5952283", "text": "def handle_not_found(self, err):\n raise err", "title": "" }, { "docid": "f05734f92a609ea8d7bcc336091a9e23", "score": "0.5941383", "text": "def return_404():\n return abort(404)", "title": "" }, { "docid": "17c535e3331234517e5d0fb69b56a8a1", "score": "0.5929744", "text": "def test_get_inventory_item_not_found(self):\n resp = self.app.get(\"/inventory/0\")\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)", "title": "" }, { "docid": "33a58be8bd853bed4aebb8ed72b18074", "score": "0.59205335", "text": "def retrieve(self, request, pk=None):\n try:\n single_order_product = OrderProduct.objects.get(pk=pk)\n serializer = OrderProductSerializer(\n single_order_product, context={'request': request})\n return Response(serializer.data)\n except Exception as ex:\n return HttpResponseServerError(ex)", "title": "" }, { "docid": "cd9e9f7c6aca64290dcf09534498e206", "score": "0.59147537", "text": "def modal_api(request, store_name, product_id):\n if request.method == 'GET':\n try:\n # Returned products should be within store's look_back parameter\n look_back_qs = StoreSettings.objects.filter(store__store_name=store_name)\n\n if not look_back_qs:\n logger.error('{} does not exist'.format(store_name))\n return HttpResponseBadRequest('{} does not exist'.format(store_name), status=400)\n\n look_back = look_back_qs.values('look_back')[0]['look_back']\n time_threshold = timezone.now() - timedelta(seconds=look_back * 60 * 60)\n\n product_id_socials = find_products_from_social_scope(store_name, product_id)\n\n product_id_social = None\n while len(product_id_socials) != 0:\n # Keep checking for a related product id with an order\n product_id_social = choice(product_id_socials)\n\n order_obj = Orders.objects \\\n .filter(store__store_name=store_name) \\\n .filter(product__product_id=product_id_social) \\\n .filter(processed_at__range=[time_threshold, timezone.now()])\n order_obj_first = order_obj.first()\n\n if order_obj_first is not None:\n break\n\n product_id_socials.remove(product_id_social)\n\n modal_obj = Modal.objects.filter(store__store_name=store_name).first()\n product_obj = Product.objects.filter(product_id=product_id_social).first()\n\n collection_obj = Collection.objects.filter(product__product_id=product_id_social).values('collection_id')\n collection_ids = ','.join([k['collection_id'] for k in list(collection_obj)])\n\n response_dict = dict()\n response_dict['store_name'] = store_name\n response_dict['product_id'] = product_id_social\n response_dict['look_back'] = look_back\n\n response_dict['main_image_url'] = product_obj.main_image_url if hasattr(product_obj,\n 'main_image_url') and product_obj.main_image_url != '' else None\n response_dict['handle'] = product_obj.handle if hasattr(product_obj, 'handle') else None\n response_dict['product_type'] = product_obj.product_type if hasattr(product_obj, 'product_type') else None\n response_dict['vendor'] = product_obj.vendor if hasattr(product_obj, 'vendor') else None\n response_dict['tags'] = product_obj.tags if hasattr(product_obj, 'tags') else None\n response_dict['product_name'] = product_obj.product_name if hasattr(product_obj, 'product_name') else None\n\n response_dict['collection_ids'] = collection_ids if collection_ids else None\n\n response_dict['social_setting'] = modal_obj.social_setting\n response_dict['color_brightness'] = modal_obj.color_brightness\n response_dict['color_hue'] = modal_obj.color_hue\n response_dict['color_saturation'] = modal_obj.color_saturation\n response_dict['size'] = modal_obj.size\n response_dict['location'] = modal_obj.location\n response_dict['social_scope'] = modal_obj.social_scope\n\n response_dict['first_name'] = order_obj_first.first_name if hasattr(order_obj_first, 'first_name') else None\n response_dict['last_name'] = order_obj_first.last_name if hasattr(order_obj_first, 'last_name') else None\n response_dict['province_code'] = order_obj_first.province_code if hasattr(order_obj_first,\n 'province_code') else None\n response_dict['country_code'] = order_obj_first.country_code if hasattr(order_obj_first,\n 'country_code') else None\n response_dict['last_order_qty'] = order_obj_first.qty if hasattr(order_obj_first, 'qty') else None\n response_dict['processed_at'] = order_obj_first.processed_at if hasattr(order_obj_first,\n 'processed_at') else None\n response_dict['person_qty_from_look_back'] = order_obj.count()\n response_dict['item_qty_from_look_back'] = order_obj.aggregate(Sum('qty'))['qty__sum']\n\n return JsonResponse(response_dict, safe=False)\n except Exception as e:\n logger.error(e)\n return HttpResponseBadRequest('Something went wrong.')\n\n return HttpResponseBadRequest('Invalid request')", "title": "" }, { "docid": "e82b6df145fdb3e4fdd857384965d92f", "score": "0.5909623", "text": "def render_GET(self, request):\n debug(\"404 error\")\n return '404 Error: Not Found.'", "title": "" }, { "docid": "c22c458795a3e8a14c1ca11c11ef94c7", "score": "0.5909124", "text": "def treenav_undefined_url(request, item_slug):\n item = get_object_or_404(treenav.MenuItem, slug=item_slug)\n # do something with item here and return an HttpResponseRedirect\n raise Http404", "title": "" }, { "docid": "8c00b661e14a956e56929e917785fc30", "score": "0.5900227", "text": "def return_404(self, msg='Resource not found'):\n self.send_error(404, msg)\n return", "title": "" }, { "docid": "065dd02261904f2a8eefba98b862f312", "score": "0.58961153", "text": "def test_get_non_existing_purchase(self):\n res = self.get(url=\"/purchases/5\")\n self.assertEqual(res.status_code, 401)\n self.assertException(res, exc.EntryNotFound)", "title": "" }, { "docid": "c0ba8f3568811e7505b0792cfcf0186e", "score": "0.5895416", "text": "def not_found(e):\n\n errorfile.debug(request.url)\n errorfile.debug(ERROR_CODE_400)\n resp = jsonify(ERROR_CODE_400)\n return resp, HTTPStatus.BAD_REQUEST.value", "title": "" }, { "docid": "0c8168abd049b23e6114326303acf973", "score": "0.58772784", "text": "def page_not_found(e):\n response = jsonify({\"message\": \"USE A VALID URL\"})\n response.status_code = 404\n return response", "title": "" }, { "docid": "43f8a3478cb4925c093faa7c84cb5ca1", "score": "0.5872004", "text": "def page_not_found(e):\n return redirect(url_for('index'))", "title": "" }, { "docid": "d5d8e7652a3a92b3fbe392edbf678b5b", "score": "0.58594143", "text": "def get_or_404(cls, entity_id):\n rv = cls.query.get(entity_id)\n if rv is None:\n abort(404)\n return rv", "title": "" }, { "docid": "436e10cf085420a4a24ed8249acd0147", "score": "0.5844687", "text": "def entity_or_404(key):\n try:\n obj = db.get(key)\n if obj:\n return obj\n except:\n pass\n raise Http404('Object not found')", "title": "" }, { "docid": "e6df2ed49436b427a14561c575866fa0", "score": "0.5841294", "text": "def get(self, item_id):\n app.logger.info('Finding a Product with id [{}]'.format(item_id))\n if not isinstance(item_id, int):\n return request_validation_error(\"Invalid Product ID\")\n product = Product.find_by_id(item_id)\n if product:\n # app.logger.info(product)\n return product.serialize(), status.HTTP_200_OK\n else:\n return not_found('Product ID was not found')", "title": "" }, { "docid": "78021fbbddabc9c7b2407d204d5d7cbd", "score": "0.58392555", "text": "def product(req):\n query = req.GET.get('code')\n if not query:\n prod = Product.objects.all()\n else:\n prod = Product.objects.filter(\n code=query)\n if not prod:\n return HttpResponseNotFound()\n else:\n prod = prod[0]\n req100 = json.loads(prod.req100.replace('-', ''))\n ng = prod.nutrition_grades.upper()\n ng_image = f\"/static/assets/img/Nutri-{ng}.png\"\n context = {\n 'masthead_title': prod.product_name,\n 'ng_image': ng_image,\n 'product_searched': prod,\n 'req100': req100\n }\n return render(req, 'product.html', context)", "title": "" }, { "docid": "f5abdb37b5a9a1adc673d73dba846447", "score": "0.5833637", "text": "def product_detail(request, product_id):\n product = _get_product(product_id)\n if product:\n serialized_product = ProductSerializer(product).product\n return JsonResponse(data=serialized_product, status=200)\n else:\n return JsonResponse(data={\"status\": \"Error 500: Bad Server Request\"}, status=500)", "title": "" }, { "docid": "0559d8dba39b6554ead297241602b88c", "score": "0.583006", "text": "def handle(self, data, context):\n return ControllerResponse(status=404)", "title": "" }, { "docid": "62588f7275ac6aaa6d009abe66a6fce1", "score": "0.58220875", "text": "def test_product_detail_url(self):\n response = self.c.get(reverse('store:product_detail', args=['django-beginners']))\n self.assertEqual(response.status_code, 200)", "title": "" }, { "docid": "75769ba2a2ddb0b5ddff2aa9c940de92", "score": "0.580997", "text": "def get_or_abort(self, ident, error_code=404, error_msg=None):\n rv = self.get(ident)\n if rv is None:\n abort(\n error_code, status='error', code=error_code, message=error_msg\n )\n return rv", "title": "" }, { "docid": "20cada5d5810bf4e6b21c8ea9cf34d62", "score": "0.57963455", "text": "def found_product(search_term):\n # API request\n try:\n search_result = openfoodfacts.products.search(\n search_term, locale=\"fr\"\n )\n except ConnectionError:\n product = \"API connect error\"\n else:\n products = search_result['products']\n if len(products) == 0:\n # check if there is a result ...\n product = \"api no result\"\n else:\n i = 0\n prod_founded = False\n while not prod_founded:\n try:\n product = products[i]\n except IndexError:\n # check if there is a result ...\n product = \"api no result\"\n prod_founded = True\n else:\n try:\n product[\"nutrition_grades\"]\n except KeyError:\n i += 1\n else:\n prod_founded = True\n return product", "title": "" }, { "docid": "b1b5a0c0e9fed73e0f1972a7aaf8ebd1", "score": "0.5790926", "text": "def url_product(self, product):\n pass", "title": "" }, { "docid": "89af7e0c73c1a4449a9574591ec089d8", "score": "0.5788948", "text": "def not_found(e):\n\n errorfile.debug(request.url)\n errorfile.debug(ERROR_CODE_404)\n resp = jsonify(ERROR_CODE_404)\n return resp, HTTPStatus.NOT_FOUND.value", "title": "" }, { "docid": "7b109326847b2ff8e5989d5402125eb9", "score": "0.5764918", "text": "def step_impl(context):\n context.resp = requests.get(context.base_url)\n assert context.resp.status_code != str(404)", "title": "" }, { "docid": "27e91a81b960ba723a92fd80af8f35d4", "score": "0.57634586", "text": "def get(self, request, *args, **kwargs):\n raise Http404(\"Page does not exist\")", "title": "" }, { "docid": "27e91a81b960ba723a92fd80af8f35d4", "score": "0.57634586", "text": "def get(self, request, *args, **kwargs):\n raise Http404(\"Page does not exist\")", "title": "" }, { "docid": "6c9129d4e3090229d2056756264b5b66", "score": "0.575226", "text": "def not_found(request, *args, **kwargs):\n raise Http404", "title": "" }, { "docid": "8f7750441e9c31a762acd21004160f57", "score": "0.57477474", "text": "def test_product_detail_url(self):\n response = self.c.get(reverse('store:product_detail', args=['django-ecommerce']))\n self.assertEqual(response.status_code, 200)", "title": "" }, { "docid": "a69425f6fa8ab15a981363f1f5181a1e", "score": "0.574148", "text": "def test_get_not_found(self):\n not_found_detail_url = self.get_detail_url(uuid.uuid4())\n\n response = self.client.get(\n not_found_detail_url, format=\"json\", HTTP_AUTHORIZATION=self.get_http_authorization()\n )\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "title": "" }, { "docid": "437a60c2547c1880e0a8c37992d0a1c1", "score": "0.57412237", "text": "def product_detail(request, id, slug):\n product = get_object_or_404(Product, id=id, slug=slug, available=True)\n return render(request, 'shop/product/detail.html', {'product': product})", "title": "" }, { "docid": "4e32bbb0484773eb90ed4ec71ddd41d4", "score": "0.5733496", "text": "def get_object_or_json404(*args, **kwargs):\n\n\ttry:\n\t\treturn get_object_or_404(*args, **kwargs)\n\texcept Http404:\n\t\traise Json404({'successs': False, 'errors': ['This object was not found.']})", "title": "" }, { "docid": "4208f805c2f3bd9293447a2da18a571a", "score": "0.57279116", "text": "def not_found(e):\n return make_response(jsonify(status=404, error='Not Found',\n message=e.description), status.HTTP_404_NOT_FOUND)", "title": "" }, { "docid": "4208f805c2f3bd9293447a2da18a571a", "score": "0.57279116", "text": "def not_found(e):\n return make_response(jsonify(status=404, error='Not Found',\n message=e.description), status.HTTP_404_NOT_FOUND)", "title": "" }, { "docid": "9085f68ba575156a49e230610f9bc090", "score": "0.5717423", "text": "def product_detail(request, pk):\n try:\n snippet = products.objects.get(pk=pk)\n except products.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = productsSerializer(snippet)\n return Response(serializer.data)\n\n elif request.method == 'PUT':\n serializer = productsSerializer(snippet, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n snippet.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "title": "" }, { "docid": "3eb01aff02aeb3ee5bf7bbe9ffd8912e", "score": "0.5705913", "text": "def get_product_by_id(self, id):\n try:\n #get request\n self.cursor.execute('SELECT * FROM product WHERE id=?', (id,))\n #get of the product\n product = self.cursor.fetchone()\n return product\n except Exception as error:\n print('DATABASE ERROR:', error)\n self.connection.rollback()", "title": "" }, { "docid": "bbd79a08e48e435e0e1c90e057504093", "score": "0.5700507", "text": "def test_ingredient_detail_redirects_unauthenticated(self):\n url = crud_url_by_action_and_pk('detail')\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, status.HTTP_302_FOUND)", "title": "" }, { "docid": "686d2e956b46ead1a59cdaab0aa33a82", "score": "0.5691569", "text": "def get(self, production_id):\n result = Production.get_product(production_id)\n if result is not None:\n return result\n else:\n return api.abort(404, f\"Production {production_id} doesn't exist\")", "title": "" }, { "docid": "fa1b8a0b912f8b7af47da61bdc28ecf7", "score": "0.5684426", "text": "def get_product_from_external_service(product_id):\n response = requests.get(product_service_url + f\"/product/{product_id}\")\n try:\n response_dict = response.json()[\"product\"]\n except KeyError:\n logger.warn(\"No product found with id %s\", product_id)\n raise NotFoundException\n\n return response_dict", "title": "" }, { "docid": "68129397594693a2e9aad0789b954565", "score": "0.5681374", "text": "def not_found(e):\n return jsonify(error='Not found'), 404", "title": "" }, { "docid": "27f05ff1cbfece0b17ab10d85497f4b0", "score": "0.5669108", "text": "def first_or_404(cls, **kwargs):\n item = cls._and_query(kwargs).first()\n if item is None:\n return abort(404)\n else:\n return item", "title": "" }, { "docid": "ab37432f262aceb7fdbbdd849d1a3458", "score": "0.5669022", "text": "def get(self, name):\n product = ProductService.get_by_name(name)\n if not product:\n api.abort(404)\n else:\n return product", "title": "" }, { "docid": "27117b0eb3d58ad174aaac3a10ea6a97", "score": "0.566318", "text": "def test_get_non_existing(self):\n response = self.app.get('/api/v3/menu/57', headers=self.user_header)\n self.assertEqual(response.status_code, 404)", "title": "" }, { "docid": "4247f0390e48366f4c483368d4eac53a", "score": "0.5649043", "text": "def not_found():\n return Response(404)", "title": "" }, { "docid": "c301bd65726423b2dc2529e799932f25", "score": "0.56392574", "text": "def not_found(e):\n errorfile.debug(request.url)\n errorfile.debug(ERROR_CODE_500)\n resp = jsonify(ERROR_CODE_500)\n return resp, HTTPStatus.INTERNAL_SERVER_ERROR.value", "title": "" }, { "docid": "bc141679d4e40280b5984e8090456514", "score": "0.5621265", "text": "def product_display(request, name):\n try:\n # Filter the product table where the slug_name.\n product = Product.objects.filter(slug_name=name)\n\n # Get pk from the product and store in pk variable.\n pk = product[0].pk\n\n # Store the product_id and price in the cache.\n cache.set('product_id', pk, None)\n cache.set('price', product[0].price, None)\n\n ctx = ({'title': 'Product display page', 'product': product[0]})\n return render(request, \"dashboard/product_display.html\", ctx)\n\n except Exception as e:\n logger.exception(\"EXCEPTION :\" + str(e))\n\n # Redirect to the dashboard page.\n return HttpResponseRedirect(reverse('dashboard'))", "title": "" }, { "docid": "2e0105dac2b3cb667749c29332e19a00", "score": "0.56169426", "text": "def _page_not_found(err):\n if request.path.startswith(\"/api/\"):\n message = \"API endpoint {!r} does not exist on this server!\".format(request.path)\n r = APIResult(status=False, message=message)\n r.fields = (\"status\", \"message\")\n return r.json(), err.code\n\n message = \"Page {!r} does not exist on this server!\".format(request.path)\n flash(message, \"warning\")\n\n return redirect(url_for(\"front._page_not_found\"))", "title": "" }, { "docid": "6864f5a86503145b6c05d7ef577afab3", "score": "0.5606515", "text": "def fast_404(request, *args, **kwargs):\n return HttpResponse(\"Not Found.\", status=404)", "title": "" }, { "docid": "2c0e0f3053605ed2057c4d3f3cbdf3fe", "score": "0.56023943", "text": "def resource_not_found(error):\n\n return jsonify({\"message\": \"Resource not found. Please confirm the URL\",\n \"status\": 404})", "title": "" }, { "docid": "fd2bde7f69846641f34ccf709aaf5404", "score": "0.5600506", "text": "def fast_404(request, *args, **kwargs):\n return HttpResponse('Not Found.', status=404)", "title": "" }, { "docid": "2f7679363ad975cd8bb71bdb67aaf8dd", "score": "0.5592767", "text": "def fetch(self, path, raise_error=..., **kwargs):\n ...", "title": "" }, { "docid": "c5d3d233baf58dec7cb47f279dbea1e1", "score": "0.55872846", "text": "def test_update_no_product(self):\n resp = self.app.put(\n \"/products/{}\".format(123),\n content_type=\"application/json\",\n )\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)", "title": "" }, { "docid": "4f0503a2ae63ae95235686e3780127b9", "score": "0.5578726", "text": "def error_404(e):\n return {\"error\": \"Not found\"}, 404", "title": "" }, { "docid": "88ed24afc58b6dc99974bb1e75f59696", "score": "0.5576002", "text": "def resource_not_found(e):\n error_code = HTTPStatus.NOT_FOUND.value\n response = jsonify(status=error_code, text=str(e))\n return response, error_code", "title": "" }, { "docid": "0b1adc7093ef5ac4f11d53729fe6383a", "score": "0.5573608", "text": "def raise_404(request):\n raise Http404", "title": "" }, { "docid": "568e3d44d03a19d14cb1f243f257c346", "score": "0.55619556", "text": "def product_view(request, category_name, category_id):\n names = [c.name for c in Category.objects.all()]\n if category_name not in names:\n template = loader.get_template('catalog/404Page.html')\n return HttpResponseNotFound(template.render(request))\n else:\n username = user_name(request)\n products = Product.objects.filter(categories=category_id)\n context = {\n 'username': username,\n 'categories': categories, # added\n 'category': category_name,\n 'products': products,\n }\n return render(request, 'catalog/product.html', context)", "title": "" }, { "docid": "54e14c7e6b4207f0bac1957ba7affe38", "score": "0.5554991", "text": "def product_detail(request, pk):\n try:\n product = Product.objects.get(pk=pk)\n except Product.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == \"GET\":\n serializer = ProductSerializer(product)\n return Response({\n \"en\": {\n \"id\": serializer.data.get('id'),\n \"category\": serializer.data.get('category'),\n \"category_en\": serializer.data.get('category_en'),\n \"name\": serializer.data.get('name_en'),\n \"rate\": serializer.data.get('rate'),\n \"price\": serializer.data.get('price'),\n \"discount\": serializer.data.get('discount'),\n \"brand\": serializer.data.get('brand'),\n \"brand_en\": serializer.data.get('brand_en'),\n \"code\": serializer.data.get('code'),\n \"point\": serializer.data.get('point'),\n \"quantity\": serializer.data.get('quantity'),\n \"thumbimage\": serializer.data.get('thumbimage'),\n \"image\": serializer.data.get('image'),\n \"description_en\": serializer.data.get('description_en'),\n \"slug\": serializer.data.get('slug'),\n \"link_to_emonos\": serializer.data.get('link_to_emonos')\n },\n \"mn\":{\n \"id\": serializer.data.get('id'),\n \"category\": serializer.data.get('category'),\n \"name\": serializer.data.get('name'),\n \"rate\": serializer.data.get('rate'),\n \"price\": serializer.data.get('price'),\n \"discount\": serializer.data.get('discount'),\n \"brand\": serializer.data.get('brand'),\n \"code\": serializer.data.get('code'),\n \"point\": serializer.data.get('point'),\n \"quantity\": serializer.data.get('quantity'),\n \"thumbimage\": serializer.data.get('thumbimage'),\n \"image\": serializer.data.get('image'),\n \"description\": serializer.data.get('description'),\n \"slug\": serializer.data.get('slug'),\n \"link_to_emonos\": serializer.data.get('link_to_emonos')\n }\n })", "title": "" }, { "docid": "daf85e84faab64348fafaa992ca4f72a", "score": "0.55534655", "text": "def page_not_found(e):\n\treturn 'Sorry, Nothing at this URL.', 404", "title": "" }, { "docid": "daf85e84faab64348fafaa992ca4f72a", "score": "0.55534655", "text": "def page_not_found(e):\n\treturn 'Sorry, Nothing at this URL.', 404", "title": "" }, { "docid": "b5c743e81b17726f7b0481dee28622db", "score": "0.5548017", "text": "def test_get_detail(self):\n product = Product.objects.first()\n\n response = self.c.get('/api/v1/product/{}/'.format(product.id))\n self.assertHttpOK(response)", "title": "" }, { "docid": "1b58d899264209b5ce02d759ab1ae83f", "score": "0.55367637", "text": "def pageNotFound(e):\n #Default 404 Error\n logger.info('Deafault page 404')\n return jsonify(\"{Error:[ Message: No Endpoint Here]}\")", "title": "" }, { "docid": "3f6b573d067d2d61ec36ca7ecd01d088", "score": "0.5535882", "text": "def test_disable_no_product(self):\n \n # disable the product\n resp = self.app.put(\n \"/products/{}/disable\".format(456),\n content_type=\"application/json\",\n )\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)", "title": "" }, { "docid": "ced41c0e6ff3f582e51103ae5088a01e", "score": "0.553204", "text": "def handle_resource_not_found_request(error):\n log.error(error)\n payload = dict(error.payload or ())\n payload['status'] = error.status\n payload['message'] = error.message\n return jsonify(payload), 404", "title": "" }, { "docid": "c1ea0c403eb6a2e19ce8c44d7da8e7e9", "score": "0.55284595", "text": "def test_like_no_product(self):\n \n # like the product\n resp = self.app.put(\n \"/products/{}/like\".format(456),\n content_type=\"application/json\",\n )\n self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)", "title": "" }, { "docid": "8cd9d81742a2bbfe411bf847fed978e7", "score": "0.55259097", "text": "def product_detail(request, pk, format=None):\n try:\n products = Product.objects.get(pk=pk)\n except Product.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = ProductSerializer(products)\n return Response(serializer.data)\n\n elif request.method == 'PUT':\n serializer = ProductSerializer(products, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n products.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "title": "" }, { "docid": "7e496654bd0e00ae7fb48b2844448ac0", "score": "0.5517633", "text": "def test_find_or_404_not_found(self):\r\n self.assertRaises(NotFound, Supplier.find_or_404, 0)", "title": "" }, { "docid": "d65e06209acbaf8fe71a1e1a6ea30fd8", "score": "0.5513908", "text": "def page_not_found(e):\n return 'Sorry, Nothing at this URL.', 404", "title": "" }, { "docid": "8cefbb3818de661c25403bddd26192d9", "score": "0.5510964", "text": "def user_search_product(store_name, product_name, search_term):\n # Lucky for us, stock can have just 1 more search term \n stock = Stock.query.filter_by(store_name=store_name,\n product_name=product_name\n ).first()\n print(stock)\n if not stock:\n response_object = {\n 'status': 'Fail',\n 'messsage': 'Stock does not exist'\n }\n return response_object, 409\n print(stock.quantity)\n print(search_term)\n try: \n if int(stock.quantity) >= int(search_term):\n return {\"Result\": \"Suficiente stock\"}\n except Exception as e:\n return {'Error': 'Invalid type for search term, try a number'}\n \n return {\"Result\": \"Insuficiente stock\"}", "title": "" }, { "docid": "8ae4f51d154ce681d5a3793b08ee37a6", "score": "0.5495336", "text": "def not_found(error):\n return make_response(jsonify({'error': 'Sorry! Couldn\\'t find that one.'}), 404)", "title": "" }, { "docid": "d92cd3b6b71d61f380f3ecd346069cf4", "score": "0.5494788", "text": "def lookup_product(self, product):\n return self.lookup_table.find(\n mappings=[\n (\"SKU\", product.sku),\n (\"NAME\", product.name.encode('ascii', errors='ignore')),\n (\"THIRDPARTYCATEGORY\", product.url),\n ], first=True)", "title": "" }, { "docid": "f0822345899dad416a30b3ba442ea616", "score": "0.548927", "text": "def resource_404(request):\n return render(request, '404_missing.html')", "title": "" }, { "docid": "1ac339c21699547830c6da708a046663", "score": "0.5488302", "text": "def notfound(error):\n return jsonify({\"error\": \"Not found\"}), 404", "title": "" }, { "docid": "98b82ea8ff146e07e73ab0b64ec07bb4", "score": "0.54818064", "text": "def resource_not_found(e: Any = \"Not Found\") -> Response:\n # return jsonify({\"code\": 404, \"message\": str(e)}), http.HTTPStatus.NOT_FOUND\n return make_json_response(http_status=404, data={\"code\": 404, \"message\": str(e)})", "title": "" }, { "docid": "283cb2c6968017eff1dd99fbc4017910", "score": "0.54757625", "text": "def not_found(error):\n return (jsonify({'error': 'Not found'}), 404)", "title": "" }, { "docid": "80e45b36522df419176e9415371a6343", "score": "0.54731005", "text": "def product(product_id):\n product = Product.query.get_or_404(product_id)\n\n if request.method == 'POST':\n product.name = request.form['name']\n product.price = _parse_price(request.form['price'])\n product.link = request.form['link']\n product.infer_photo()\n db.session.commit()\n elif request.method == 'DELETE':\n db.session.delete(product)\n db.session.commit()\n\n return redirect(url_for('products'))", "title": "" }, { "docid": "7b35f222881c19aa24b78b22a2d53eb9", "score": "0.54652363", "text": "def page_not_found(e):\n return 'Sorry, Nothing here', 404", "title": "" }, { "docid": "c3f8885076377eb0d27a10f02acf8e4f", "score": "0.5463917", "text": "def product_detail(request, pk):\n try:\n product = Product.objects.get(pk=pk)\n except Product.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == 'GET':\n serializer = ProductSerializer(product)\n return JSONResponse(serializer.data)\n\n elif request.method == 'PUT':\n data = JSONParser().parse(request)\n serializer = ProductSerializer(product, data=data)\n if serializer.is_valid():\n serializer.save()\n return JSONResponse(serializer.data)\n else:\n return JSONResponse(serializer.errors, status=400)\n\n elif request.method == 'DELETE':\n product.delete()\n return HttpResponse(status=204)", "title": "" }, { "docid": "841e9ae3e06d9d00f5de1db313442b3c", "score": "0.54620785", "text": "def response(self, request):\n msg = {\n 'error': 'NOT_FOUND',\n 'message': self.args[0]\n }\n return JsonResponse(msg, status=404)", "title": "" }, { "docid": "3a9ebab27586d53ce4f8db6167a350c0", "score": "0.5457777", "text": "def not_found(error):\n return make_response(jsonify({\"error\": \"Not found\"}), 404)", "title": "" }, { "docid": "3a9ebab27586d53ce4f8db6167a350c0", "score": "0.5457777", "text": "def not_found(error):\n return make_response(jsonify({\"error\": \"Not found\"}), 404)", "title": "" }, { "docid": "125fb851cce018244cf04ef276dd4347", "score": "0.54539704", "text": "def process_response(self, request, response):\n # If the given URL is \"Not Found\", then check if we should redirect to\n # a path without a slash appended.\n if response.status_code == 404:\n path = request.path\n whitelists = getattr(settings,\n 'UNSLASHED_WHITELIST_STARTSWITH',\n ['/admin'])\n if any(path.startswith(x) for x in whitelists):\n return response\n if self.should_redirect_without_slash(request):\n return self.response_class(\n self.get_full_path_without_slash(request))\n return response", "title": "" }, { "docid": "43ea16e13260de3a4712e86c7d03e554", "score": "0.5452604", "text": "def not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)", "title": "" }, { "docid": "43ea16e13260de3a4712e86c7d03e554", "score": "0.5452604", "text": "def not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)", "title": "" }, { "docid": "43ea16e13260de3a4712e86c7d03e554", "score": "0.5452604", "text": "def not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)", "title": "" } ]
93a36c4fe30acaf89dcd5cc040164c27
identifies picks on a truncated timeseries and creates attr.
[ { "docid": "135553abc9c41eea9f48d9a2907c11cd", "score": "0.6437175", "text": "def truncated_picks(self, date_range, value, which):\n\n pass", "title": "" } ]
[ { "docid": "1f32af4fefb08350a084de27f7dc1039", "score": "0.51315945", "text": "def convert_timestamp_attrs(self) -> None:\n\n def _get_datetime_str(timestamp: int) -> str:\n \"\"\"Auxiliary method to return datetime string from timestamp.\"\"\"\n if not timestamp: # attrInfo records as string to record to CSV.\n return timestamp\n \n datetime_obj = datetime.fromtimestamp( int(timestamp))\n return datetime_obj.strftime(\"%Y/%m/%d\")\n\n\n ## Convert joined\n joined_timestamp = getattr(self, 'joined')\n setattr(self, 'joined', _get_datetime_str(joined_timestamp))\n\n ## Convert attr with date in name.\n for attr in vars(self):\n if 'date' in attr:\n attr_timestamp = getattr(self, attr)\n setattr(self, attr,\n _get_datetime_str(attr_timestamp))\n return", "title": "" }, { "docid": "8cee2df84bc26e8c4c29d261c22e7cae", "score": "0.5127222", "text": "def __pos__(self): \n return TimeSeries(self._value, self._time)", "title": "" }, { "docid": "6dee79ec880a974642478f9dfb13032a", "score": "0.50730175", "text": "def _get_atts_from(self, parent_time_series):\n\n self.fmt = parent_time_series.fmt\n self.headers = parent_time_series.headers\n self.time_col = parent_time_series.time_col\n self.time_header = parent_time_series.time_header\n self.disc_level = parent_time_series.disc_level + 1\n return", "title": "" }, { "docid": "ca5bf699e660a3df1fe821fba9523e20", "score": "0.500903", "text": "def get_temporal_attributes(df, axes=None):\n\n axes = get_default_axes(axes)\n mint = df[axes.t].min()\n maxt = df[axes.t].max()\n\n times = pd.DatetimeIndex(unique_justseen(df[axes.t]))\n dt_index_diff = times[1:] - times[:-1]\n dt_counts = dt_index_diff.value_counts(sort=True)\n\n if dt_counts.size > 0 and dt_counts.values[0] / (len(times) - 1) > 0.75:\n mode_value = dt_counts.index[0]\n else:\n # Calculate a static resolution\n mode_value = ((maxt - mint) / len(times))\n\n return {\n 'variables': {\n axes.t: {\n 'attributes': {\n 'actual_min': mint.strftime('%Y-%m-%dT%H:%M:%SZ'),\n 'actual_max': maxt.strftime('%Y-%m-%dT%H:%M:%SZ'),\n }\n },\n },\n 'attributes': {\n 'time_coverage_start': mint.strftime('%Y-%m-%dT%H:%M:%SZ'),\n 'time_coverage_end': maxt.strftime('%Y-%m-%dT%H:%M:%SZ'),\n 'time_coverage_duration': (maxt - mint).round('1S').isoformat(),\n 'time_coverage_resolution': mode_value.round('1S').isoformat()\n }\n }", "title": "" }, { "docid": "96066ca939cbae5ff7ea12ea61e012c9", "score": "0.49801186", "text": "def _adjust_length(self, length: int) -> None:\n\n self.n = length\n self.time = pd.date_range(\n start=self.start,\n freq=self.freq,\n periods=self.n,\n )", "title": "" }, { "docid": "13e0d289c59d0b70efca5910bb286967", "score": "0.49750167", "text": "def truncated(self, truncated):\n\n self._truncated = truncated", "title": "" }, { "docid": "8f9994529aae8ed04cdb6fa61eacfd76", "score": "0.49558097", "text": "def __init__(self, name = \"name\", units = None, subsetted = False,\n disc_level = 0, parent = None):\n\n self.name = name # the name of this time series (string)\n self.units = units # unit of time represented by subset if subsetted (string)\n self.subsetted = subsetted # does this time series have subsets? (bool)\n self.disc_level = disc_level # the subset level of this time_series (int)\n\n self.fmt = False # for interpreting timestrings to time objs (string)\n self.headers = [] # one header for each col in dataset (list of strings)\n\n self.time_col = 0 # index of data column with time info (int)\n self.time = [] # separate copy of data[time_col] (list of strings)\n self.time_dom = False # self.time converted to (list of datetime objs)\n self.time_dec_days = [] # self.time converted to (mono rising decimal days floats)\n self.time_seconds = [] # self.time converted to (mono rising seconds floats)\n self.center_time = [] # time around which data in a subset it centered (dto)\n self.start_dto = [] # datetime_object that mono rising times start from (dto)\n self.mean_interval = 0 # average number of seconds between data points (float)\n\n self.subsets = [] # object list containing constituent time_series\n\n self.row_data = [] # row wise dataset\n self.col_data = [] # column wise dataset, built as dict\n\n self.bad_rows = [] # subset from data attribute with \"bad rows\"\n\n self.infilepath = [] # tracks filepath of input CSV. used to DISALLOW overwriting\n # source CSV with output CSV.\n \n # run some methods to build subset attributes\n if parent:\n self._get_atts_from(parent)\n \n return", "title": "" }, { "docid": "84a31dbdd2b3065d7dbe2706c48fc303", "score": "0.48654923", "text": "def addAttributeTime(self):\n values = range(self.attributes.shape[1])\n self.addAttribute(\"Time\", values)", "title": "" }, { "docid": "50f345711ffbb3d0b0f14e97ff3312ce", "score": "0.48158103", "text": "def __trunc__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "50f345711ffbb3d0b0f14e97ff3312ce", "score": "0.48158103", "text": "def __trunc__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "50f345711ffbb3d0b0f14e97ff3312ce", "score": "0.48158103", "text": "def __trunc__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "50f345711ffbb3d0b0f14e97ff3312ce", "score": "0.48158103", "text": "def __trunc__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "50f345711ffbb3d0b0f14e97ff3312ce", "score": "0.48158103", "text": "def __trunc__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "50f345711ffbb3d0b0f14e97ff3312ce", "score": "0.48158103", "text": "def __trunc__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "50f345711ffbb3d0b0f14e97ff3312ce", "score": "0.48158103", "text": "def __trunc__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "50f345711ffbb3d0b0f14e97ff3312ce", "score": "0.48158103", "text": "def __trunc__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "50f345711ffbb3d0b0f14e97ff3312ce", "score": "0.48158103", "text": "def __trunc__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "50f345711ffbb3d0b0f14e97ff3312ce", "score": "0.48158103", "text": "def __trunc__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "50f345711ffbb3d0b0f14e97ff3312ce", "score": "0.48158103", "text": "def __trunc__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "b0352bed465ddbd7bfe387cbb540550c", "score": "0.47947532", "text": "def __getattr__(self, attr_name: str) -> Callable:\n from .timeseries import TimeSeries\n\n def method(*args, **kwargs):\n if attr_name in dir(np) and callable(getattr(np, attr_name)):\n rtn_time_series = self.init_data\n for chunk in self.chunks:\n if \"axis\" not in kwargs:\n kwargs[\"axis\"] = self.axis\n if not chunk.row_mask:\n return AttributeError\n if not chunk.col_mask:\n return AttributeError\n if isinstance(getattr(np, attr_name), np.ufunc):\n data = getattr(np, attr_name).reduce(\n self.parent[chunk.row_mask][:, chunk.col_mask], *args, **kwargs\n )\n else:\n data = getattr(np, attr_name)(\n self.parent[chunk.row_mask][:, chunk.col_mask], *args, **kwargs\n )\n if not data.shape: # not an array\n return data\n if len(data.shape) == 1:\n if kwargs[\"axis\"] == 0:\n data = data.reshape((1, data.shape[0]))\n elif data.shape[1] == 1 and data.shape[0] != 1:\n if kwargs[\"axis\"] == 0:\n data = data.reshape((data.shape[1], data.shape[0]))\n\n row_count = data.shape[0]\n if len(data.shape) == 1:\n col_count = 1\n else:\n col_count = data.shape[1]\n\n time_index = self.parent.time[chunk.row_mask]\n if self.collapse_index and row_count > 0:\n time_index = [time_index[self.collapse_time_index]]\n row_count = 1\n new_ts = TimeSeries(\n shape=(row_count, col_count),\n time=time_index,\n labels=self.parent._least_common_labels(chunk.col_mask),\n )\n new_ts[:] = data\n if rtn_time_series is None:\n rtn_time_series = new_ts\n else:\n if self.collapse_index: # need to vstack this\n rtn_time_series.vstack(new_ts)\n else:\n rtn_time_series = rtn_time_series.merge([new_ts])\n return rtn_time_series\n else:\n raise AttributeError\n\n return method", "title": "" }, { "docid": "174339b96786e9c11cfe5f3642adeb0c", "score": "0.47190338", "text": "def test_setting_series_data_by_name(self):\n\n self.testInst.load(self.ref_time.year, self.ref_doy, use_header=True)\n self.testInst['doubleMLT'] = 2. * pds.Series(\n self.testInst['mlt'].values, index=self.testInst.index)\n assert np.all(self.testInst['doubleMLT'] == 2. * self.testInst['mlt'])\n\n self.testInst['blankMLT'] = pds.Series(None, dtype='float64')\n assert np.all(np.isnan(self.testInst['blankMLT']))\n return", "title": "" }, { "docid": "90aa99eac2451e7dcdc0d65cd23845d6", "score": "0.47114", "text": "def add_time_series_data(self, label, data, times, ts_attrs={}, data_attrs={},\n kind='TimeSeries'):\n nts = self.nwb_file.make_group(\n \"<{}>\".format(kind), label,\n path=\"/acquisition/timeseries\",\n attrs=ts_attrs)\n nts.set_dataset(\"data\", data, attrs=data_attrs)\n nts.set_dataset(\"timestamps\", times)\n return nts", "title": "" }, { "docid": "cebc48873b7399fe9f58233914a58680", "score": "0.47106883", "text": "def add_datapoint(self, value, N, sample, attr):\n if value == self.min:\n # need to find bucket at end of range\n if len(self.singular) != 0 and self.singular[0]['low'] == self.regular[0]['low']:\n # then there is a singular bucket at the left extreme of the range in which case we extend\n # the ranges of both buckets while only adding to the frequency of the singular bucket\n self.singular[0]['low'] = self.min\n self.singular[0]['frequency'] += 1\n self.singular[0]['size'] = self.singular[0]['high'] - self.singular[0]['low']\n self.regular[0]['low'] = self.min\n self.regular[0]['size'] = self.regular[0]['high'] - self.regular[0]['low']\n else:\n # then there isn't a singular bucket at the left extreme of the range and we only extend the leftmost\n # regular bucket\n self.regular[0]['low'] = self.min\n self.regular[0]['frequency'] += 1\n self.regular[0]['size'] = self.regular[0]['high'] - self.regular[0]['low']\n\n elif value == self.max:\n if len(self.singular) != 0 and self.singular[len(self.singular) - 1]['high'] == self.regular[len(self.regular) - 1]['high']:\n # then there is a singular bucket at the right extreme of the range in which case we extend\n # the ranges of both buckets while only adding to the frequency of the singular bucket\n self.singular[len(self.singular) - 1]['high'] = self.max\n self.singular[len(self.singular) - 1]['frequency'] += 1\n self.singular[len(self.singular) - 1]['size'] = self.singular[len(self.singular) - 1]['high'] - self.singular[len(self.singular) - 1]['low']\n self.regular[len(self.regular) - 1]['high'] = self.max\n self.regular[len(self.regular) - 1]['size'] = self.regular[len(self.regular) - 1]['high'] - self.regular[len(self.regular) -1]['low']\n else:\n # then there isn't a singular bucket at the right extreme of the range and we only extend the rightmost\n # regular bucket\n self.regular[len(self.regular) - 1]['high'] = self.max\n self.regular[len(self.regular) - 1]['frequency'] += 1\n self.regular[len(self.regular) - 1]['size'] = self.regular[len(self.regular) - 1]['high'] - self.regular[len(self.regular) - 1]['low']\n\n else:\n self.checkbucketsandincrement(value, N)\n\n if self.chisquaretest() < 0.05:\n self.significanceReached(N)", "title": "" }, { "docid": "77fc0bc4e4cc7c67e9a61cc6da496fcd", "score": "0.47102097", "text": "def timeseries_dataset(self):\n raise NotImplementedError()", "title": "" }, { "docid": "cb1b7dec86bf7fb504df12478f70bb77", "score": "0.46888927", "text": "def _get_series(self):\n return self.__series", "title": "" }, { "docid": "249faf0759264325e8295c9780f4ebb6", "score": "0.46757296", "text": "def _fill_before(self, series: Series) -> Series:\n freq = self.settings[\"freq\"]\n method = self.settings[\"fill_before\"]\n tmin = self.settings[\"tmin\"]\n\n if tmin is None:\n pass\n elif pd.Timestamp(tmin) > series.index.max():\n logger.error(\n \"The tmin is later than the last value of the time series. Pastas \"\n \"does not support this. Please extend time series manually.\"\n )\n elif pd.Timestamp(tmin) >= series.index.min():\n series = series.loc[pd.Timestamp(tmin) :]\n else:\n index_extend = pd.date_range(\n start=pd.Timestamp(tmin), end=series.index.min(), freq=freq\n )\n series = series.reindex(series.index.union(index_extend[:-1]))\n\n if method == \"mean\":\n mean_value = series.mean()\n series = series.fillna(mean_value) # Default option\n logger.info(\n \"Time Series '%s' was extended in the past to %s with the mean \"\n \"value (%.2g) of the time series.\",\n self.name,\n series.index.min(),\n mean_value,\n )\n elif method == \"bfill\":\n first_value = series.loc[series.first_valid_index()]\n series = series.fillna(method=\"bfill\") # Default option\n logger.info(\n \"Time Series '%s' was extended in the past to %s with the first \"\n \"value (%.2g) of the time series.\",\n self.name,\n series.index.min(),\n first_value,\n )\n elif isinstance(method, float):\n series = series.fillna(method)\n logger.info(\n \"Time Series '%s' was extended in the past to %s by adding %s \"\n \"values.\",\n self.name,\n series.index.min(),\n method,\n )\n elif method is None:\n msg = (\n f\"Time Series '{self.name}': cannot be extended into past to\"\n f\" {series.index.min()} as 'fill_before' method is 'None'. \"\n \"Provide settings to stress model, e.g. \"\n \"`ps.StressModel(stress, settings='prec')`.\"\n )\n logger.error(msg)\n raise ValueError(msg)\n else:\n logger.info(\n \"Time Series '%s': User-defined option for fill_before '%s' is not \"\n \"supported.\",\n self.name,\n method,\n )\n\n return series", "title": "" }, { "docid": "2ca848825b5eb3b9fe1c7e28104803eb", "score": "0.46345872", "text": "def series_original(self, series: Series) -> None:\n validate_stress(series)\n self._series_original = series.copy()\n self.freq_original = pd.infer_freq(self._series_original.index)\n self.settings[\"tmin\"] = series.index.min() # reset tmin\n self.settings[\"tmax\"] = series.index.max() # reset tmax\n self.update_series(force_update=True, **self.settings)", "title": "" }, { "docid": "0ae45ae57974b80267cc79f8f43c5b32", "score": "0.4609831", "text": "def test_data_access_by_datetime_slicing_and_name(self):\n\n self.testInst.load(self.ref_time.year, self.ref_doy, use_header=True)\n time_step = (self.testInst.index[1]\n - self.testInst.index[0]).value / 1.E9\n offset = dt.timedelta(seconds=(10 * time_step))\n start = dt.datetime(2009, 1, 1, 0, 0, 0)\n stop = start + offset\n assert np.all(self.testInst[start:stop, 'uts']\n == self.testInst.data['uts'].values[0:11])\n return", "title": "" }, { "docid": "57276c6fa45da53c160f61087c27d20e", "score": "0.45908964", "text": "def __init__(self, n0):\r\n super(type(self)).__init__()\r\n #we need to create the data dateframe first\r\n \r\n x = data['x'][:, None]\r\n y = data['y'][:,None]\r\n time = np.divide(distance,vptxt)\r\n t=time.flatten()[:, None]\r\n u = distance\r\n idx_x = np.random.choice(x.shape[0], n0, replace=False)\r\n self.x = x[idx_x, :]\r\n self.y = y[idx_x, :]\r\n self.u = u[idx_x, 0:1]\r\n self.t = np.zeros(self.x.shape)", "title": "" }, { "docid": "35693dd78d2bd0617a00c122e82554ed", "score": "0.45812804", "text": "def get_chow_type_stat(series: pd.Series, min_length: int = 20, num_threads: int = 8, verbose: bool = True) -> pd.Series:\n\n pass", "title": "" }, { "docid": "a8b19b936524ca0539458826bc8c59b5", "score": "0.45588413", "text": "def _TimeSeriesFromData(self, data, attr=None):\n\n series = timeseries.Timeseries()\n\n for value, timestamp in data:\n if attr:\n try:\n series.Append(getattr(value, attr), timestamp)\n except AttributeError:\n raise ValueError(\n \"Can't find attribute %s in value %s.\" % (attr, value))\n else:\n if hasattr(value, \"sum\") or hasattr(value, \"count\"):\n raise ValueError(\n \"Can't treat complext type as simple value: %s\" % value)\n series.Append(value, timestamp)\n\n return series", "title": "" }, { "docid": "224c0d577eaecadb1f84109c54cecdba", "score": "0.45534542", "text": "def create_features2(df,datetime_series):\r\n df['Date/Time']=datetime_series\r\n df['Date/Time'] = pd.to_datetime(df['Date/Time'], errors='coerce')\r\n df['date'] = df.index\r\n df['hour'] = df['Date/Time'].dt.hour\r\n df['dayofweek'] = df['Date/Time'].dt.dayofweek\r\n df['quarter'] = df['Date/Time'].dt.quarter\r\n df['month'] = df['Date/Time'].dt.month\r\n df['year'] = df['Date/Time'].dt.year\r\n df['dayofyear'] = df['Date/Time'].dt.dayofyear\r\n df['dayofmonth'] = df['Date/Time'].dt.day\r\n df['weekofyear'] = df['Date/Time'].dt.weekofyear\r\n \r\n X = df[['hour','dayofweek','quarter','month','year',\r\n 'dayofyear','dayofmonth','weekofyear']]\r\n \r\n return X", "title": "" }, { "docid": "be5b8f22e455fd5e15f9ee3a3ec7240d", "score": "0.45434174", "text": "def make_set(self, setname, align_dates, feats):\n set_dict = {}\n for data, feat_list in feats.items():\n for feat in feat_list:\n set_dict[feat] = self.dicts[data][feat].select(\n F.date_trunc(align_dates, self.dicts[data][feat].datetime).alias(\"datetime\"), \"value\")\n\n self.sets[setname] = self._dict2df(set_dict, sort=True, drop_nulls=False)\n\n # Truncated duplicates can exist in dataframe, so lets drop these samples.\n self.sets[setname] = self.sets[setname].dropDuplicates([\"datetime\"])", "title": "" }, { "docid": "5bc1f196ce8560abfeadfcffc3d1df7b", "score": "0.45332098", "text": "def __init__(self, *args, **kwargs):\n dt = kwargs.pop('dtype', WeightedSeriesDtype())\n Series.__init__(self, *args, dtype=dt, **kwargs)", "title": "" }, { "docid": "0aecbb8613f78e3c9f6c1de685f2c6d7", "score": "0.4513046", "text": "def add_exposure_times(self, dt=None):\r\n\r\n\t\tif 'exposure_times' in self.data.columns:\r\n\t\t\treturn self\r\n\r\n\t\tts = [] # this will be a list of time instant lists one per path \r\n\r\n\t\tif dt:\r\n\r\n\t\t\t_t0 = arrow.utcnow()\r\n\r\n\t\t\tself.data['path'].str.split('>') \\\r\n\t\t\t\t.apply(lambda _: [ch.strip() for ch in _]) \\\r\n\t\t\t\t.apply(lambda lst: ts.append(self.sep.join([r.format('YYYY-MM-DD HH:mm:ss') \r\n\t\t\t\t\t\t\t\t\tfor r in arrow.Arrow.range('second', _t0, _t0.shift(seconds=+(len(lst) - 1)))])))\r\n\r\n\t\tself.data['exposure_times'] = ts\r\n\r\n\t\treturn self", "title": "" }, { "docid": "2902b54a08b4c82fcac74cfc8093ea33", "score": "0.45127666", "text": "def status_addToSeries():", "title": "" }, { "docid": "78d8d03629abb576c33b3a8b7dc24167", "score": "0.4501075", "text": "def create_embedded_features(data, s, l, varnames):\n\n # rolling window average\n tmp = data.sel(variable=varnames).rolling(time=l-s, center=False, min_periods=1).mean()\n\n # overwrite time stamp to current day\n tmp = tmp.assign_coords(time=[time + np.timedelta64(l,'D') for time in tmp.coords['time'].values])\n\n # rename feature to not overwrite variable\n tmp = tmp.assign_coords(variable=[f'{var}lag_{s}ff' for var in varnames])\n\n # fill missing values in lagged features at beginning or end of time series\n varmeans = tmp.mean(dim=('time'))\n tmp = tmp.fillna(varmeans)\n\n return tmp", "title": "" }, { "docid": "c98aeda125ab72b91f396035981618cf", "score": "0.44942376", "text": "def add_variable_along_timelatlon(ds, var, name, units, long_name):\n ds[name] = (('time','south_north','west_east'), var)\n ds[name].attrs['units'] = units\n ds[name].attrs['long_name'] = long_name\n return ds", "title": "" }, { "docid": "58fa69a83e9e7a1e2e73c72fa01945c0", "score": "0.4492216", "text": "def _add_series_start_time(self, matches_df):\n series_start_time = \\\n matches_df.groupby('seriesId')['startTimestamp'].min()\n matches_df['series_start_time'] = \\\n series_start_time[matches_df.seriesId].values\n return matches_df", "title": "" }, { "docid": "0838852a6a941295889439ced04b3170", "score": "0.4487692", "text": "def generate_point_suspension(athlete_dict, attrs, extra=None, convert_lab=None, styles=None):\n\n athletes = list(athlete_dict.keys()) #athlete names\n num_ath = len(athletes)\n fig, ax = plt.subplots(dpi = 320, figsize=(10,6))\n\n plot_dict = {}\n plot_dict = plot_dict.fromkeys(athletes)\n for key in plot_dict.keys():\n plot_dict[key] = {}\n plot_dict[key] = plot_dict[key].fromkeys(np.unique(athlete_dict[key][\"dim\"]))\n\n for dims in np.unique(athlete_dict[key][\"dim\"]):\n plot_dict[key][dims] = {\"time\": [], \"kg\": [], \"day\": []}\n\n col_id = 0\n\n for idx, at_name in enumerate(athletes):\n\n num_load = len(np.unique(athlete_dict[at_name][\"kg\"]))\n num_dim = len(np.unique(athlete_dict[at_name][\"dim\"]))\n\n\n x_lab = athlete_dict[at_name][attrs[0]] #select the one with more days\n y_val = athlete_dict[at_name][attrs[1]] #select attribute 1\n z_val = athlete_dict[at_name][attrs[2]] #select attribute 2 \n c_val = athlete_dict[at_name][extra]\n\n for i in range(len(x_lab)):\n day_ = x_lab[i]* len(y_val[i])\n times_ = y_val[i]\n dims_ = z_val[i]\n kgs_ = c_val[i]\n\n\n for id_ in range(len(day_)):\n if type(times_[id_]) == type(kgs_[id_]) == type(day_[id_]) == list:\n plot_dict[at_name][dims_[id_]][\"time\"] += times_[id_]\n plot_dict[at_name][dims_[id_]][\"kg\"] += kgs_[id_]\n plot_dict[at_name][dims_[id_]][\"day\"] += day_[id_]\n else:\n plot_dict[at_name][dims_[id_]][\"time\"].append(times_[id_])\n plot_dict[at_name][dims_[id_]][\"kg\"].append(kgs_[id_])\n plot_dict[at_name][dims_[id_]][\"day\"].append(day_[id_])\n\n \n #if there is the dimension 0 it means that we do not have data for that day (filling everything with 0)\n #but it will save it as a new dimension. We cycle on the days without data and for each other dimension\n #if the \"0\" day is not present we fill the 0 for kg and time. Then we delete the dim=0 data\n\n if 0 in plot_dict[at_name].keys():\n #extending each dimension with the empty ones\n for i in range(len(plot_dict[at_name][0][\"day\"])):\n day = plot_dict[at_name][0][\"day\"][i]\n\n for key in plot_dict[at_name].keys():\n if key != 0:\n if day not in plot_dict[at_name][key][\"day\"]:\n plot_dict[at_name][key][\"day\"].append(day)\n plot_dict[at_name][key][\"time\"].append(0)\n plot_dict[at_name][key][\"kg\"].append(0)\n\n #deleting the empty (dim = 0)\n del plot_dict[at_name][0]\n\n for key in plot_dict[at_name].keys():\n label_ = at_name + \" Dimension: {} mm\".format(str(key))\n\n x = np.arange(len(plot_dict[at_name][key][\"day\"]))\n y = plot_dict[at_name][key][\"time\"] \n txt = plot_dict[at_name][key][\"kg\"]\n\n \"\"\"\n\n if colors is not None and col_id < len(colors):\n scat = ax.plot(x, y, label=label_, color=colors[idx], markersize=10, marker=None)\n else:\n scat = ax.plot(x, y, label=label_, markersize=10, marker='o')\n\n \"\"\"\n scat = ax.plot(x, y, label=label_)\n scat = Graphic_Utils.style_for_plot(scat, col_id, styles)\n fig.canvas.draw()\n fig.canvas.flush_events()\n\n for ind, txt_ in enumerate(txt):\n ax.annotate(\"Kg: {}\".format(txt_), (x[ind] + 0.03, y[ind]+0.06), fontsize=10)\n #ax.text(x * (1 + 0.02), y , i, fontsize=12)\n\n ax.set_xticks(x)\n ax.set_xticklabels(plot_dict[at_name][key][\"day\"], fontsize=8)\n ax.legend()\n\n col_id += 1\n\n\n # Add some text for labels, title and custom x-axis tick labels, etc.\n if convert_lab is None:\n ax.set_ylabel(\"time\")\n ax.set_xlabel(\"days\")\n else:\n ax.set_ylabel(convert_lab[\"time\"])\n ax.set_xlabel(convert_lab[\"days\"])\n \n fig.tight_layout()\n\n return fig, ax", "title": "" }, { "docid": "4cf033c50db46cb94a7cda9ea5988671", "score": "0.4450936", "text": "def _update_timeseries_info(self):\n self._timeseriesInfo = self._get_timeseries_info()\n\n\n if self._timeseriesInfo:\n self._last_timestamp_timeseries = self._timeseriesInfo.end_time\n else:\n self._last_timestamp_timeseries = None\n\n if self._cache:\n if self._last_timestamp_timeseries is not None:\n self._cache.last_timestamp_timeseries = self._last_timestamp_timeseries\n\n #When we update timeseries info, verify our last point cache is valid, if it's not invalidate it\n if self._last_point:\n if self._last_point.timestamp != self._last_timestamp_timeseries:\n self._last_point = None", "title": "" }, { "docid": "1a36c4f5a169a8414d5aaf6e5533acc9", "score": "0.44437224", "text": "def add_soundspeed(tag_data, time_col, temp, temp_time_col, temp_temp_col):\n\n tag_data = pd.merge_asof(tag_data, temp[[temp_time_col, temp_temp_col]], left_on=time_col, right_on=temp_time_col, direction='nearest')\n tag_data['soundspeed'] = calc_soundspeed(T=tag_data[temp_temp_col])\n return tag_data", "title": "" }, { "docid": "d7703716763a0d982b2203b3d522ecf9", "score": "0.44432297", "text": "def _create_time_series(dfincident, agg_by, pattern, group_by,\n types, width=500, height=350):\n\n def xticker():\n \"\"\" Custom function for positioning ticks \"\"\"\n if (int(tick)%10 == 0) | (len(tick)>2):\n return tick\n else:\n return \"\"\n\n x, y, labels = aggregate_data_for_time_series(dfincident, agg_by, \n pattern, group_by,\n types, None)\n\n if group_by != \"None\":\n colors, ngroups = get_colors(len(labels))\n source = ColumnDataSource({\"xs\": x[0:ngroups],\n \"ys\": y[0:ngroups],\n \"cs\": colors,\n \"label\": labels[0:ngroups]})\n else:\n source = ColumnDataSource({\"xs\": [x],\n \"ys\": [y],\n \"cs\": [\"green\"],\n \"label\": [\"avg incidents count\"]})\n\n # create plot\n timeseries_tools = \"pan,wheel_zoom,reset,xbox_select,hover,save\"\n p = figure(tools=timeseries_tools, width=width, height=height,\n x_range=FactorRange(*x))\n\n glyph = p.multi_line(xs=\"xs\", ys=\"ys\", legend=\"label\", line_color=\"cs\", \n source=source, line_width=3)\n\n # format legend\n p.legend.label_text_font_size = \"7pt\"\n p.legend.background_fill_alpha = 0.5\n p.legend.location = 'top_left'\n # format ticks\n p.xaxis.formatter = FuncTickFormatter.from_py_func(xticker)\n p.xaxis.major_tick_line_width = 0.1\n p.xaxis.major_label_text_font_size = \"5pt\"\n p.xaxis.group_text_font_size = \"6pt\"\n p.xaxis.major_tick_line_color = None\n p.x_range.group_padding = 0.0\n p.x_range.range_padding = 0.0\n p.x_range.subgroup_padding = 0.0\n\n #p.yaxis.major_tick_line_color = \"Red\"\n #p.yaxis.major_label_text_font_size = \"6pt\"\n #p.y_range = Range1d(np.min(y)*0.9, np.max(y)*1.1)\n return p, glyph", "title": "" }, { "docid": "45193b41608394b35d6be1ce3f112895", "score": "0.4432728", "text": "def _set_date_attr(self, date_type, value):\n dates_tag = self.xpath('//DATES')\n if dates_tag:\n dates_tag = dates_tag[0]\n else: # Tag wasn't present; create it\n dates_tag = etree.Element(\"DATES\")\n self.xml.insert(0, dates_tag)\n if isinstance(value, date):\n value = value.isoformat()\n dates_tag.attrib[\"eregs-{}-date\".format(date_type)] = value", "title": "" }, { "docid": "2ff9376e8108f09a50bba0049eceeb8b", "score": "0.44090134", "text": "def _make_series(\n n_timepoints=50,\n n_columns=1,\n all_positive=True,\n index_type=None,\n return_numpy=False,\n random_state=None,\n):\n rng = check_random_state(random_state)\n data = rng.normal(size=(n_timepoints, n_columns))\n if all_positive:\n data -= np.min(data, axis=0) - 1\n if return_numpy:\n if n_columns == 1:\n data = data.ravel()\n return data\n else:\n index = _make_index(n_timepoints, index_type)\n if n_columns == 1:\n return pd.Series(data.ravel(), index)\n else:\n return pd.DataFrame(data, index)", "title": "" }, { "docid": "8b2930f3a9e9f4d6355298590a9acb4e", "score": "0.43978932", "text": "def get_timeseries(train_len: int,\n test_len: int,\n time_column_name: str,\n target_column_name: str,\n time_series_id_column_name: str,\n time_series_number: int = 1,\n freq: str = 'H'):\n data_train = [] # type: List[pd.DataFrame]\n data_test = [] # type: List[pd.DataFrame]\n data_length = train_len + test_len\n for i in range(time_series_number):\n X = pd.DataFrame({\n time_column_name: pd.date_range(start='2000-01-01',\n periods=data_length,\n freq=freq),\n target_column_name: np.arange(data_length).astype(float) + np.random.rand(data_length) + i*5,\n 'ext_predictor': np.asarray(range(42, 42 + data_length)),\n time_series_id_column_name: np.repeat('ts{}'.format(i), data_length)\n })\n data_train.append(X[:train_len])\n data_test.append(X[train_len:])\n X_train = pd.concat(data_train)\n y_train = X_train.pop(target_column_name).values\n X_test = pd.concat(data_test)\n y_test = X_test.pop(target_column_name).values\n return X_train, y_train, X_test, y_test", "title": "" }, { "docid": "eb15e361d55e4741e42450b53dd6b72f", "score": "0.43891874", "text": "def __init__(self, name, min_=None, attr=None):\n if not attr:\n Data.__init__(self, name, min_, \"work\")\n else:\n Data.__init__(self, name, min_, \"work, \" + attr)", "title": "" }, { "docid": "ba8b77133361867214a27a47d42bcb92", "score": "0.43856373", "text": "def _attrs(self, x):\n pass", "title": "" }, { "docid": "78c1ec8b604b7dca97d4916d7e0348fd", "score": "0.4384557", "text": "def assign_validation_time(ds):\n return ds.assign_coords(validation_time=ds.time+ds.step)", "title": "" }, { "docid": "97c56aac567eec339aa384fd84f4fac8", "score": "0.43774283", "text": "def timeprofile(tlim: tuple, dt: timedelta,\n altkmrange: list, glat: float, glon: float) -> xarray.Dataset:\n\n T = datetimerange(tlim[0], tlim[1], dt)\n\n iono: xarray.Dataset = None\n\n f107 = []\n ap = []\n for t in T:\n iri = IRI(t, altkmrange, glat, glon)\n if iono is None:\n iono = iri\n else:\n iono = xarray.concat((iono, iri), dim='time')\n\n f107.append(iri.f107)\n ap.append(iri.ap)\n\n iono.attrs = iri.attrs\n iono.attrs['f107'] = f107\n iono.attrs['ap'] = ap\n\n return iono", "title": "" }, { "docid": "4f6ecc5fcb9da75eadfefaec7c84ba9b", "score": "0.43753514", "text": "def set_duration(self):\n # CRV init the max and min timestamp\n min_ts = min(self.timestamps)\n max_ts = max(self.timestamps)\n # CRV - calculating the diff here just incase there is an offset error\n # (earliest ts in data set NOT 0)\n self.__duration = max_ts - min_ts\n logging.info('duration set: ' + str(self.__duration))", "title": "" }, { "docid": "64a7c6f103e143f51ee1cb92f406207d", "score": "0.4374278", "text": "def append_time_range(\n self, since: DateTimeType, until: DateTimeType\n ) -> DF:\n\n return self.klines_getter.get(since=since, until=until)", "title": "" }, { "docid": "7ca205021fb868a50f747665256e830d", "score": "0.43709967", "text": "def __init__(self, filename, dims=None, verbose=False):\n\n dataset = Dataset(filename, 'r')\n _noisy = verbose\n self._dims = copy.deepcopy(dims)\n self._mjd_origin = 'days since 1858-11-17 00:00:00'\n self._using_calendar_time = True # for non-calendar runs, we need to skip the datetime stuff.\n\n time_variables = ('time', 'Itime', 'Itime2', 'Times')\n got_time, missing_time = [], []\n for time in time_variables:\n # Since not all of the time_variables specified above are required, only try to load the data if they\n # exist. We'll raise an error if we don't find any of them though.\n if time in dataset.variables:\n setattr(self, time, dataset.variables[time][:])\n got_time.append(time)\n attributes = PassiveStore()\n for attribute in dataset.variables[time].ncattrs():\n setattr(attributes, attribute, getattr(dataset.variables[time], attribute))\n # setattr(self.atts, time, attributes)\n else:\n missing_time.append(time)\n\n if len(missing_time) == len(time_variables):\n warn('No time variables found in the netCDF.')\n else:\n # If our file has incomplete dimensions (i.e. no time), add that here.\n if not hasattr(dims, 'time'):\n\n _Times_shape = None\n _other_time_shape = None\n if 'Times' in got_time:\n _Times_shape = np.shape(self.Times)\n _other_times = [i for i in got_time if i != 'Times']\n if _other_times:\n if getattr(self, _other_times[0]).shape:\n _other_time_shape = len(getattr(self, _other_times[0]))\n else:\n # We only have a single value, so len doesn't work.\n _other_time_shape = 1\n\n if _noisy:\n print('Added time dimension size since it is missing from the input netCDF file.')\n\n if 'Times' in got_time:\n # Check whether we've got missing values and try and make them from one of the others. This sometimes\n # happens if you stop a model part way through a run. We check for masked arrays at this point\n # because the netCDF library only returns masked arrays when we have NaNs in the results.\n if isinstance(dataset.variables['Times'][:], np.ma.core.MaskedArray):\n time_data = dataset.variables['Times'][:].data\n bad_time_string = ([b''] * dataset.dimensions['DateStrLen'].size)\n bad_indices = np.argwhere(np.any(time_data == bad_time_string, axis=1)).ravel()\n if np.any(bad_indices):\n if 'time' in got_time:\n for bad_time in bad_indices:\n if self.time[bad_time]:\n bad_date = num2date(self.time[bad_time], units=self._mjd_origin)\n self.Times[bad_time] = list(datetime.strftime(bad_date, '%Y-%m-%dT%H:%M:%S.%f'))\n elif 'Itime' in got_time and 'Itime2' in got_time:\n for bad_time in bad_indices:\n if self.Itime[bad_time] and self.Itime2[bad_time]:\n bad_time_days = self.Itime[bad_time] + self.Itime2[bad_time] / 1000.0 / 60 / 60\n itime_units = getattr(dataset.variables['Itime'], 'units')\n bad_date = num2date(bad_time_days, units=itime_units)\n self.Times[bad_time] = list(datetime.strftime(bad_date), '%Y-%m-%dT%H:%M:%S.%f')\n\n # Overwrite the existing Times array with a more sensibly shaped one.\n try:\n self.Times = np.asarray([''.join(t.astype(str)).strip() for t in self.Times])\n except TypeError:\n # We might have a masked array, so just use the raw data.\n self.Times = np.asarray([''.join(t.astype(str)).strip() for t in self.Times.data])\n\n # Make whatever we got into datetime objects and use those to make everything else. Note: the `time'\n # variable is often the one with the lowest precision, so use the others preferentially over that.\n if 'Times' not in got_time:\n if 'time' in got_time:\n time_units = getattr(dataset.variables['time'], 'units')\n if time_units.split()[-1] == '0.0':\n self._using_calendar_time = False\n if self._using_calendar_time:\n _dates = num2date(self.time, units=time_units)\n else:\n _dates = [None] * len(self.time)\n elif 'Itime' in got_time and 'Itime2' in got_time:\n itime_units = getattr(dataset.variables['Itime'], 'units')\n _dates = num2date(self.Itime + self.Itime2 / 1000.0 / 60 / 60 / 24, units=itime_units)\n else:\n raise ValueError('Missing sufficient time information to make the relevant time data.')\n\n if self._using_calendar_time:\n try:\n try:\n self.Times = np.array([datetime.strftime(d, '%Y-%m-%dT%H:%M:%S.%f') for d in _dates])\n except TypeError:\n self.Times = np.array([datetime.strftime(_dates, '%Y-%m-%dT%H:%M:%S.%f')])\n except ValueError:\n self.Times = np.array([datetime.strftime(d, '%Y/%m/%d %H:%M:%S.%f') for d in _dates])\n # Add the relevant attribute for the Times variable.\n attributes = PassiveStore()\n setattr(attributes, 'time_zone', 'UTC')\n # setattr(self.atts, 'Times', attributes)\n\n if 'time' not in got_time:\n if 'Times' in got_time:\n try:\n # First format\n fmt = '%Y-%m-%dT%H:%M:%S.%f'\n _dates = np.array([datetime.strptime(''.join(t.astype(str)).strip(), fmt) for t in self.Times])\n except ValueError:\n # Alternative format\n fmt = '%Y/%m/%d %H:%M:%S.%f'\n _dates = np.array([datetime.strptime(''.join(t.astype(str)).strip(), fmt) for t in self.Times])\n elif 'Itime' in got_time and 'Itime2' in got_time:\n itime_units = getattr(dataset.variables['Itime'], 'units')\n _dates = num2date(self.Itime + self.Itime2 / 1000.0 / 60 / 60 / 24, units=itime_units)\n else:\n raise ValueError('Missing sufficient time information to make the relevant time data.')\n\n # We're making Modified Julian Days here to replicate FVCOM's 'time' variable.\n self.time = date2num(_dates, units=self._mjd_origin)\n # Add the relevant attributes for the time variable.\n attributes = PassiveStore()\n setattr(attributes, 'units', self._mjd_origin)\n setattr(attributes, 'long_name', 'time')\n setattr(attributes, 'format', 'modified julian day (MJD)')\n setattr(attributes, 'time_zone', 'UTC')\n # setattr(self.atts, 'time', attributes)\n\n if 'Itime' not in got_time and 'Itime2' not in got_time:\n if 'Times' in got_time:\n try:\n # First format\n fmt = '%Y-%m-%dT%H:%M:%S.%f'\n _dates = np.array([datetime.strptime(''.join(t.astype(str)).strip(), fmt) for t in self.Times])\n except ValueError:\n # Alternative format\n fmt = '%Y/%m/%d %H:%M:%S.%f'\n _dates = np.array([datetime.strptime(''.join(t.astype(str)).strip(), fmt) for t in self.Times])\n elif 'time' in got_time:\n _dates = num2date(self.time, units=getattr(dataset.variables['time'], 'units'))\n else:\n raise ValueError('Missing sufficient time information to make the relevant time data.')\n\n # We're making Modified Julian Days here to replicate FVCOM's 'time' variable.\n _datenum = date2num(_dates, units=self._mjd_origin)\n self.Itime = np.floor(_datenum)\n self.Itime2 = (_datenum - np.floor(_datenum)) * 1000 * 60 * 60 * 24 # microseconds since midnight\n attributes = PassiveStore()\n setattr(attributes, 'units', self._mjd_origin)\n setattr(attributes, 'format', 'modified julian day (MJD)')\n setattr(attributes, 'time_zone', 'UTC')\n # setattr(self.atts, 'Itime', attributes)\n attributes = PassiveStore()\n setattr(attributes, 'units', 'msec since 00:00:00')\n setattr(attributes, 'time_zone', 'UTC')\n # setattr(self.atts, 'Itime2', attributes)\n\n # Additional nice-to-have time representations.\n if 'Times' in got_time:\n try:\n self.datetime = np.array([datetime.strptime(d, '%Y-%m-%dT%H:%M:%S.%f') for d in self.Times])\n except ValueError:\n self.datetime = np.array([datetime.strptime(d, '%Y/%m/%d %H:%M:%S.%f') for d in self.Times])\n attributes = PassiveStore()\n setattr(attributes, 'long_name', 'Python datetime.datetime')\n # setattr(self.atts, 'datetime', attributes)\n else:\n self.datetime = _dates\n self.matlabtime = self.time + 678942.0 # to MATLAB-indexed times from Modified Julian Date.\n attributes = PassiveStore()\n setattr(attributes, 'long_name', 'MATLAB datenum')\n # setattr(self.atts, 'matlabtime', attributes)\n\n # Remake 'time' from 'datetime' because the former can suffer from precision issues when read in directly\n # from the netCDF variable. Generally, 'datetime' is made from the 'Times' strings, which means it\n # usually has sufficient precision.\n if self._using_calendar_time:\n setattr(self, 'time', np.asarray([date2num(time, units=self._mjd_origin) for time in self.datetime]))\n\n # The time of the averaged data is midnight at the end of the averaging period. Offset by half the\n # averaging interval to fix that, and update all the other time representations accordingly.\n if 'title' in dataset.ncattrs():\n if 'Average output file!' in dataset.getncattr('title'):\n if _noisy:\n print('Offsetting average period times by half the interval to place the time stamp at the '\n 'midpoint of the averaging period')\n offset = np.diff(getattr(self, 'datetime')).mean() / 2\n self.datetime = self.datetime - offset\n self.time = date2num(self.datetime, units=self._mjd_origin)\n self.Itime = np.floor(self.time)\n self.Itime2 = (self.time - np.floor(self.time)) * 1000 * 60 * 60 * 24 # microseconds since midnight\n if self._using_calendar_time:\n try:\n self.Times = np.array([datetime.strftime(d, '%Y-%m-%dT%H:%M:%S.%f') for d in self.datetime])\n except TypeError:\n self.Times = np.array([datetime.strftime(self.datetime, '%Y-%m-%dT%H:%M:%S.%f')])\n\n # Clip everything to the time indices if we've been given them. Update the time dimension too.\n if 'time' in self._dims:\n is_datetimes_or_str = False\n if not isinstance(self._dims['time'], slice):\n is_datetimes_or_str = all([isinstance(i, (datetime, str)) for i in self._dims['time']])\n if not isinstance(self._dims['time'], slice) and is_datetimes_or_str:\n # Convert datetime dimensions to indices in the currently loaded data. Assume we've got a list\n # and if that fails, we've probably got a single index, so convert it accordingly.\n try:\n self._dims['time'] = np.arange(*[self._time_to_index(i) for i in self._dims['time']])\n except TypeError:\n self._dims['time'] = np.arange(*[self._time_to_index(self._dims['time'])]) # make iterable\n for time in self:\n setattr(self, time, getattr(self, time)[self._dims['time']])\n\n dataset.close()", "title": "" }, { "docid": "020a0b81204fdcb92de8e76854907b55", "score": "0.43709612", "text": "def temporal_slice(self, temporal_slice):\n msg = 'temporal_slice must be tuple, list, or slice'\n assert isinstance(temporal_slice, (tuple, list, slice)), msg\n if isinstance(temporal_slice, slice):\n self._temporal_slice = temporal_slice\n else:\n check = len(temporal_slice) <= 3\n msg = (\n 'If providing list or tuple for temporal_slice length must '\n 'be <= 3'\n )\n assert check, msg\n self._temporal_slice = slice(*temporal_slice)\n if self._temporal_slice.step is None:\n self._temporal_slice = slice(\n self._temporal_slice.start, self._temporal_slice.stop, 1\n )\n if self._temporal_slice.start is None:\n self._temporal_slice = slice(\n 0, self._temporal_slice.stop, self._temporal_slice.step\n )", "title": "" }, { "docid": "ebd878ab1ce819fb044e51f0fb760d76", "score": "0.4369135", "text": "def prepare_tag_data(input_data, time_col, rec_col, max_time, pas_tol):\n tag_data = input_data.copy()\n tag_data['time_diff'] = tag_data[time_col].diff()/pd.Timedelta(seconds=1)\n tag_data['sec_since_start'] = tag_data['time_diff'].cumsum().fillna(0)\n # make gaps when time diff > max time between furthest receivers\n gaps = tag_data[time_col].diff() > pd.Timedelta(seconds=max_time)\n # cumsum of falses and trues creates groups\n tag_data['groups_obs'] = gaps.cumsum()\n # idem for tracks\n gaps2 = tag_data[time_col].diff() > pd.Timedelta(minutes = pas_tol)\n tag_data['groups_pas'] = gaps2.cumsum()\n # save soundspeed for YAPS model and synced time for splitting in tracks\n soundspeed = tag_data.set_index(['groups_obs'])['soundspeed'].groupby('groups_obs').mean()\n SyncTime = tag_data.set_index(['groups_obs'])[time_col].groupby('groups_obs').first()\n # reshape the resulting dataframe\n toa_data = tag_data.set_index(['groups_pas','groups_obs',rec_col])['sec_since_start'].unstack()\n # put back soundspeed and synced time\n toa_data.columns = toa_data.columns.astype(str)\n toa_data['soundspeed'] = soundspeed.values\n toa_data[time_col]= SyncTime.values\n\n return toa_data", "title": "" }, { "docid": "b4d42a38eb149d7d0b1f32f44f1632bc", "score": "0.4362892", "text": "def offset_gal(da, U, tref, truncate_to_grid=False):\n dt = (da.time - tref).dt.seconds.item()\n\n da_ = da.copy()\n x_ = da_.xt.data\n y_ = da_.yt.data\n\n da_[\"x_offset\"] = (\"xt\",), _wrap_add(x_, dt * U[0], x_.min(), x_.max())\n da_[\"y_offset\"] = (\"yt\",), _wrap_add(y_, dt * U[1], y_.min(), y_.max())\n\n da_ = da_.swap_dims(dict(xt=\"x_offset\", yt=\"y_offset\"))\n da_ = da_.drop([\"xt\", \"yt\"])\n da_ = da_.sortby([\"x_offset\", \"y_offset\"])\n\n if truncate_to_grid:\n da_[\"xt\"] = (\"x_offset\",), da.xt.values\n da_[\"yt\"] = (\"y_offset\",), da.yt.values\n da_ = da_.swap_dims(dict(x_offset=\"xt\", y_offset=\"yt\"))\n da_ = da_.drop([\"x_offset\", \"y_offset\"])\n return da_\n else:\n da_ = da_.rename(dict(x_offset=\"xt\", y_offset=\"yt\"))\n return da_", "title": "" }, { "docid": "9424d4f9b6d2cfdd17f245a0eb2e4f30", "score": "0.43589246", "text": "def mark(self):\n with self.__timeseries_mutex:\n count = len(self.__timeseries)\n start = self.__mark[0]\n self.__mark = (\n count,\n self.__timeseries[-1][0][0],\n self.__timeseries[-1][0][1],\n )\n return self.__timeseries[start:count]", "title": "" }, { "docid": "78a29f04a9dbc6ff8a85060f3749d8b7", "score": "0.43511015", "text": "def setXAttriArray(self, xattri_name_list):\n xattri = self.dtframe\n xattri = xattri[xattri_name_list]\n\n self.xattri_array = np.array(xattri)\n tmpxattri = []\n '''\n select the mean of duration to predict a ylabel\n the final xattri_array is origin xattri_array[duration,len-1] mean list\n '''\n for i in range(self.duration, len(self.xattri_array)):\n tmparray = self.xattri_array[i-self.duration:i] # i+1???\n # print(DataFrame(tmparray))\n tmpxattri.append(np.mean(tmparray, axis=0)) # mean of the columns as a row\n self.xattri_array = tmpxattri[:-5]\n self.xpre_array = tmpxattri[5:]\n \n # n\n # normalization\n min_max_scaler = preprocessing.MinMaxScaler()\n self.xattri_array = min_max_scaler.fit_transform(self.xattri_array)\n self.xpre_array = min_max_scaler.fit_transform(self.xpre_array)\n # print(self.xattri_array[-5:])\n # print(self.xpre_array[-5:])\n return self.xattri_array", "title": "" }, { "docid": "1475b1174ec23bb7d262c1e1c1ee21ba", "score": "0.43454343", "text": "def __setattr__(self, name: str, value: Any) -> None:\n raise ValueError(\"Cannot assign to an immutable TimeValueRange\")", "title": "" }, { "docid": "24103438689c13c227f9df4502c94a5f", "score": "0.43369702", "text": "def get_extended_data(series, series_length, min_length, horizon, cycle_length, augment=True, test_mode=True):\n ext_arr = []\n weights = []\n first_row = int(len(series) * 0.2) if test_mode else 0\n for i in range(first_row, len(series)):\n row = series.iloc[i].values\n row = row[np.logical_not(np.isnan(row))]\n if len(row) <= series_length + horizon:\n if series_length == min_length:\n num_missing = series_length + horizon - len(row)\n row_to_add = np.zeros([series_length + horizon])\n row_to_add[-len(row):] = row[-(series_length + horizon):]\n if num_missing > cycle_length:\n row_to_add[:num_missing] = np.mean(row[:-horizon]) # mean of non-test values\n else:\n row_to_add[:num_missing] = row[cycle_length - num_missing:cycle_length] # copy from same period in cycle\n ext_arr.append(row_to_add)\n weights.append(1)\n else:\n num_to_add = cycle_length if augment else 1\n num_extra = min(num_to_add, len(row) - series_length - horizon)\n for j in range(num_extra):\n if j == 0:\n ext_arr.append(row[-(series_length + horizon):])\n else:\n ext_arr.append(row[-(series_length + horizon + j):-j])\n weights.append(1 / num_extra)\n train_ext = np.stack(ext_arr, 0)\n train_x_ext = train_ext[:, :-horizon]\n train_y_ext = train_ext[:, -horizon:]\n weights = np.array(weights)\n return train_x_ext, train_y_ext, weights", "title": "" }, { "docid": "c6d64dc4f624cf498d0dbbaa81e26b79", "score": "0.4333962", "text": "def _make_ts(ts):\n return ArrayTimeSeries(ts[:,0],ts[:,1])", "title": "" }, { "docid": "e7aac6292ff6bf85dfa21b9c39155ed6", "score": "0.4330967", "text": "def evg_short_datetime_attrib(attrib_name: str) -> property:\n return evg_attrib(attrib_name, parse_evergreen_short_datetime)", "title": "" }, { "docid": "8b99f0cd38f533526dd31170c7acd7ab", "score": "0.43305635", "text": "def __getattr__(self, attr):\n if attr in self.SCALES and self.scale is not None:\n cache = self.cache[\"scale\"]\n if attr not in cache:\n if attr == self.scale:\n tm = self\n else:\n tm = self.replicate()\n tm._set_scale(attr)\n if tm.shape:\n # Prevent future modification of cached array-like object\n tm.writeable = False\n cache[attr] = tm\n return cache[attr]\n\n elif attr in self.FORMATS:\n return self.to_value(attr, subfmt=None)\n\n elif attr in TIME_SCALES: # allowed ones done above (self.SCALES)\n if self.scale is None:\n raise ScaleValueError(\n \"Cannot convert TimeDelta with \"\n \"undefined scale to any defined scale.\"\n )\n else:\n raise ScaleValueError(\n f\"Cannot convert {self.__class__.__name__} with scale \"\n f\"'{self.scale}' to scale '{attr}'\"\n )\n\n else:\n # Should raise AttributeError\n return self.__getattribute__(attr)", "title": "" }, { "docid": "6aa2ad4797b4a06bf06ec55dc7ce253c", "score": "0.43177652", "text": "def test_getset_overextended():\n idf = IDF(StringIO(\"\"))\n wm = idf.newidfobject(\"WindowMaterial:GlazingGroup:Thermochromic\", Name=\"Gumby\")\n wm.Optical_Data_Temperature_2000 = 2000 # test __setattr__\n assert wm.Optical_Data_Temperature_2000 == 2000\n wm[\"Optical_Data_Temperature_2001\"] = 2001 # rest __setitem__\n assert wm.Optical_Data_Temperature_2001 == 2001\n assert wm.Optical_Data_Temperature_2002 == \"\" # test __getattr__\n assert wm[\"Optical_Data_Temperature_2003\"] == \"\" # test __getitem__", "title": "" }, { "docid": "8a7c1b4fbe01a6b63011c168b1dbcf10", "score": "0.43147382", "text": "def __init__(\n self,\n start,\n end,\n peak_event_length=3,\n noise_scaling=1.0,\n trend_scaling=(1.0, 2.0),\n peak_scaling=(2.0, 5.0),\n peak_list=None,\n weekly_seasonal_scaling=(1.0, 2.0),\n yearly_seasonal_scaling=(0.2, 2.0),\n exp_trend=True\n ):\n self.start = pd.to_datetime(start)\n self.end = pd.to_datetime(end)\n\n self.peak_event_length = peak_event_length\n self.noise_scaling = noise_scaling\n self.trend_scaling = trend_scaling\n self.peak_scaling = peak_scaling\n self.peak_list = peak_list\n self.weekly_seasonal_scaling = weekly_seasonal_scaling\n self.yearly_seasonal_scaling = yearly_seasonal_scaling\n self.exp_trend = exp_trend\n\n self.x_ts = pd.date_range(start, end, freq='D')\n self.x = np.arange(len(self.x_ts))\n self.x_size = len(self.x)", "title": "" }, { "docid": "00973c0e566731ad706a08c4d4910f10", "score": "0.43083304", "text": "def prepMethodDates(subset,query,limit=120):\n for methodName,methodData in subset.iteritems():\n t0 = datetime.now()\n methodData['elapsed'] = (getDates(methodData) - t0).dt.days\n methodData['method'] = methodName\n methodData = methodData[methodData.elapsed.abs() < limit]\n subset[methodName] = methodData\n return subset", "title": "" }, { "docid": "79befb81a66c653c462c09b6ca651247", "score": "0.43056884", "text": "def __getitem__(self, i: int):\n x = self._xs.iloc[i : i + self.sample_size]\n y = self._ys.iloc[\n (i + self.sample_size) : (i + self.sample_size + self.output_size)\n ]\n y_raw = self._ys_raw.iloc[\n (i + self.sample_size) : (i + self.sample_size + self.output_size), :\n ]\n\n # return X, Y, Y_dates\n return (\n np.squeeze(self._scaler.transform(x.to_numpy())).astype(\"float32\"),\n np.squeeze(y).astype(\"float32\"),\n np.squeeze(y_raw.to_numpy()).astype(\"float32\"),\n self._dates[\n (i + self.sample_size) : (i + self.sample_size + self.output_size)\n ],\n )", "title": "" }, { "docid": "1d2c027910d259838e2746c0956be932", "score": "0.4298879", "text": "def initialize(self):\n self.df = self.df.T\n start = float(datetime.strptime(self.start_date, \"%Y-%m-%d %H:%M:%S.%f\").timestamp())\n # end = float(datetime.strptime(self.end_date, \"%Y-%m-%d %H:%M:%S.%f\").timestamp())\n self.df = self.df.loc[self.start_date:self.end_date]\n print(self.df.head(1).index)\n print(self.df.tail(1).index)\n print(self.df.shape)\n # for some reason my timestamps were off of what I was really working with, it was exactly 1 hour off....\n # need to fix this somehow...\n self.current_candle['start'] = start + 3600\n self.current_candle['end'] = start + 3600 + self.frequency", "title": "" }, { "docid": "548cddc5670f2c7cefee76f29a1199d0", "score": "0.42979616", "text": "def mark(self):\n with self.__timeseries_mutex:\n count = len(self.__timeseries)\n start = self.__mark\n self.__mark = count\n return self.__timeseries[start:count]", "title": "" }, { "docid": "94b27b993c94f29f8439e38093d4f689", "score": "0.42866233", "text": "def smoothed_atr(data, length):\n trng = true_range(data)\n res = super_smoother(trng, length)\n return res", "title": "" }, { "docid": "ad512cd36ff93bc73f255ccbd0094487", "score": "0.428586", "text": "def set_exposure_time(self, device, exposure_set):\n device.nodemap['ExposureAuto'].value = 'Off'\n\n\n Exposure_Time = device.nodemap['ExposureTime']\n\n min_exposure = Exposure_Time.min\n max_exposure = Exposure_Time.max\n print('exposure min , max:',min_exposure, max_exposure)\n\n if (exposure_set >= min_exposure and\n exposure_set <= max_exposure):\n Exposure_Time.value = exposure_set\n else:\n Exposure_Time.value = max_exposure\n\n self.exposure_time = Exposure_Time.value\n self.exposure_time_max = max_exposure\n self.exposure_time_min = min_exposure", "title": "" }, { "docid": "a15a649d2f38091606417c95f6c3ebbd", "score": "0.428324", "text": "def create_col(data, time_type, valid_id):\n\n if str(time_type) in data.columns:\n print(f\"Da ton tai cot '{str(time_type)}' trong CSDL.\")\n return data\n\n # Tao moi series de luu du lieu\n index = pandas.Index([], dtype=int)\n ser = pandas.Series(index=index, dtype=float)\n\n ser = manual_customize_series(ser, valid_id)\n\n df = pandas.DataFrame({str(time_type): ser})\n\n print(f\"Da tao moi cot '{str(time_type)}'\")\n return df.combine_first(data)", "title": "" }, { "docid": "3241225e926c2d079eb7691ed48fb378", "score": "0.42804483", "text": "def __init__(self,low_w=300,high_w=1000,dt_time=24):\n self.low_w=low_w\n self.high_w=high_w\n self.dt_time=dt_time", "title": "" }, { "docid": "24f5f1ba693609919fa11e9d82ea694b", "score": "0.42751554", "text": "def add_epoch_metadata_column(self, *args, **kwargs):\n raise DeprecationWarning(\"Please use NWBFile.add_epoch_column\")", "title": "" }, { "docid": "b15ded2dba5000c9866e3de025f6ce43", "score": "0.42690796", "text": "def rolling_spike_tagging(df_raw, var_col, tag_col, N=3, Q=3, sigma=1.5):\r\n try:\r\n\r\n T = N+Q+1 # To take into account T0, also!\r\n df_tagged = df_raw.copy()\r\n \r\n \r\n df_tagged[\"bothrolling\"] = df_tagged[[var_col]].rolling(T, min_periods=1).mean().shift(-Q)\\\r\n .fillna(df_tagged[[var_col]][::-1].rolling(T, min_periods=Q).mean().shift(-N)[::-1])\r\n \r\n df_tagged[\"rolstd\"] = df_tagged[[var_col]].rolling(T, min_periods=1).std().shift(-Q)\\\r\n .fillna(df_tagged[[var_col]][::-1].rolling(T, min_periods=Q).std().shift(-N)[::-1])\r\n \r\n #print(\"here 3\")\r\n df_tagged[\"UCL\"] = df_tagged[\"bothrolling\"] + sigma*df_tagged[\"rolstd\"] \r\n df_tagged[\"LCL\"] = df_tagged[\"bothrolling\"] - sigma*df_tagged[\"rolstd\"] \r\n \r\n #df_tagged[tag_col].loc[df_tagged[var_col] < df_tagged['LCL'] ] = -10 #\"negspike\"\r\n #df_tagged[tag_col].loc[df_tagged[var_col] > df_tagged['UCL'] ] = 180 #\"posspike\"\r\n \r\n df_tagged.loc[df_tagged[var_col] < df_tagged['LCL'] , tag_col] = \"negspike\" #-10 #\"negspike\"\r\n df_tagged.loc[df_tagged[var_col] > df_tagged['UCL'] , tag_col] = \"posspike\" #180 #\"posspike\"\r\n \r\n #df_tagged.loc[: , [\"bothrolling\", \"new_cases\"] ].plot(figsize=(12,12))\r\n \r\n df_complete = df_tagged.loc[: , [var_col, tag_col] ]\r\n\r\n\r\n except Exception as err:\r\n print(err)\r\n\r\n return df_complete", "title": "" }, { "docid": "78586a513ac1790446012018db3c0b52", "score": "0.4266224", "text": "def mark(self):\n with self.__timeseries_mutex:\n count = len(self.__timeseries)\n start = self.__mark[0]\n self.__mark = (count, self.__timeseries[-1][0])\n return self.__timeseries[start:count]", "title": "" }, { "docid": "bec16fd6005b3f2f74afe94786ea3ddd", "score": "0.42648348", "text": "def _build_time_taken(self):\n\t\ttime_taken = []\n\t\tfor submit in self.submissions:\n\t\t\ttime_taken.append(submit['time_spent'])\n\t\t\tuser_id = str(submit['user_id'])\n\t\t\tself.data_set[user_id]['time_taken'] = submit['time_spent']\n\t\toverall_average = round(sum(time_taken) / len(time_taken), 2)\n\t\tself.data_set['Overall']['time_taken'] = overall_average", "title": "" }, { "docid": "9b7405462c41ecea1995c1f0920e56a5", "score": "0.4263303", "text": "def truncate(\n self,\n *,\n offset: Seconds = 0.0,\n duration: Optional[Seconds] = None,\n keep_excessive_supervisions: bool = True,\n preserve_id: bool = False,\n _supervisions_index: Optional[Dict[str, IntervalTree]] = None,\n ) -> \"DataCut\":\n assert (\n offset >= 0\n ), f\"Offset for truncate must be non-negative (provided {offset}).\"\n new_start = max(\n add_durations(self.start, offset, sampling_rate=self.sampling_rate), 0\n )\n until = add_durations(\n offset,\n duration if duration is not None else self.duration,\n sampling_rate=self.sampling_rate,\n )\n new_duration = add_durations(until, -offset, sampling_rate=self.sampling_rate)\n assert new_duration > 0.0, f\"new_duration={new_duration}\"\n # duration_past_end = (new_start + new_duration) - (self.start + self.duration)\n duration_past_end = add_durations(\n new_start,\n new_duration,\n -self.start,\n -self.duration,\n sampling_rate=self.sampling_rate,\n )\n if duration_past_end > 0:\n # When the end of the MonoCut has been exceeded, trim the new duration to not exceed the old MonoCut's end.\n new_duration = add_durations(\n new_duration, -duration_past_end, sampling_rate=self.sampling_rate\n )\n\n if _supervisions_index is None:\n criterion = overlaps if keep_excessive_supervisions else overspans\n new_time_span = TimeSpan(start=0, end=new_duration)\n new_supervisions = (\n segment.with_offset(-offset) for segment in self.supervisions\n )\n supervisions = [\n segment\n for segment in new_supervisions\n if criterion(new_time_span, segment)\n ]\n else:\n tree = _supervisions_index[self.id]\n # Below we select which method should be called on the IntervalTree object.\n # The result of calling that method with a range of (begin, end) is an iterable\n # of Intervals that contain the SupervisionSegments matching our criterion.\n # We call \"interval.data\" to obtain the underlying SupervisionSegment.\n # Additionally, when the method is tree.envelop, we use a small epsilon to\n # extend the searched boundaries to account for possible float arithmetic errors.\n if keep_excessive_supervisions:\n intervals = tree.overlap(begin=offset, end=offset + new_duration)\n else:\n intervals = tree.envelop(\n begin=offset - 1e-3, end=offset + new_duration + 1e-3\n )\n supervisions = []\n for interval in intervals:\n # We are going to measure the overlap ratio of the supervision with the \"truncated\" cut\n # and reject segments that overlap less than 1%. This way we can avoid quirks and errors\n # of limited float precision.\n olap_ratio = measure_overlap(\n interval.data, TimeSpan(offset, offset + new_duration)\n )\n if olap_ratio > 0.01:\n supervisions.append(interval.data.with_offset(-offset))\n\n return fastcopy(\n self,\n id=self.id if preserve_id else str(uuid4()),\n start=new_start,\n duration=new_duration,\n supervisions=sorted(supervisions, key=lambda s: s.start),\n )", "title": "" }, { "docid": "373bc812e1e96623c3a0c722ab6a9bb1", "score": "0.42593348", "text": "def set_point_attributes(self, att):\n triangulate.set_point_attributes(self.hndls[0], att)", "title": "" }, { "docid": "cee8d8471e3ffa47ed90da27482e9c97", "score": "0.4255326", "text": "def mock_stim_data() -> VisualStimData:\r\n num_of_rats = np.random.randint(10, 21)\r\n dims = ('electrode', 'time', 'repetition')\r\n coords = {'electrode': np.arange(10), 'time': np.linspace(0, 2, num=10000), 'repetition': np.arange(4)}\r\n rat_ids = list(range(num_of_rats))\r\n experimenter_name = ['Leonardo', 'Donatello', 'Michelangelo', 'Raphael']\r\n genders = ['F', 'M']\r\n rats_arrays = {}\r\n # for each rat in the experiment, create mock attributes and a DataArray with mock data\r\n for rat in rat_ids:\r\n attrs = {'rat_ID': rat,\r\n 'room_temp': np.random.randint(20, 30),\r\n 'room_humidity': np.random.randint(30, 70),\r\n 'experimenter_name': np.random.choice(experimenter_name),\r\n 'rat_gender': np.random.choice(genders)\r\n }\r\n rats_arrays[rat] = xr.DataArray(np.random.random((len(coords['electrode']),\r\n len(coords['time']),\r\n len(coords['repetition']))),\r\n dims=dims, coords=coords, attrs=attrs\r\n )\r\n rats_ds = xr.Dataset(rats_arrays)\r\n return VisualStimData(data=rats_ds)", "title": "" }, { "docid": "59d11f2f79249146b031084df32c43a2", "score": "0.4249781", "text": "def __init__(self, instance: AdvantageAirData, ac_key: str, zone_key: str) -> None:\n super().__init__(instance, ac_key, zone_key)\n self._attr_name = f'{self._zone[\"name\"]} temperature'\n self._attr_unique_id += \"-temp\"", "title": "" }, { "docid": "875a86d61af16066cd053c2755589fef", "score": "0.4249197", "text": "def trim_time_interval(D,start_time,end_time):\n\n #create a blank dictionary to return\n R = { } \n\n #find start_index and end_index\n [nshots,nalts]=D.times.shape\n indices=np.arange(nshots)\n time_mask=D.times[:,0]>=start_time\n if all(time_mask==0):\n print 'tz_utilities--no shots in requested time interval' \n return R\n start=min(indices[time_mask])\n time_mask=D.times[:,0]<=end_time\n #test for empty array\n if len(time_mask)==0:\n print 'here' \n return R\n check_ind = indices[time_mask]\n # does indices contain anything? \n if len(check_ind) == 0:\n return R\n\n end=max(indices[time_mask])+1\n \n # iterate through all the items in a dictionary\n for (name, value) in vars(D).items(): \n if isinstance(value,t_array) or \\\n isinstance(value,z_array) or \\\n isinstance(value,tz_array): \n R[name] = value[start:end,:]\n else:\n R[name] = value\n\n # create a new object of the same type as D\n \n dtype = type(D)\n Robj = dtype()\n # transplant the attributes to the new object, which is technically sneaky\n vars(Robj).update(R) \n return Robj", "title": "" }, { "docid": "c363eab7ae63729d167b93019352bec3", "score": "0.42399076", "text": "def _metadata_changed(self, old, new):\n\n self.cross_plot.value_range.low = self.minz\n self.cross_plot.value_range.high = self.maxz\n self.cross_plot2.value_range.low = self.minz\n self.cross_plot2.value_range.high = self.maxz\n if \"selections\" in self._image_index.metadata:\n x_ndx, y_ndx = self._image_index.metadata[\"selections\"]\n if y_ndx and x_ndx:\n xdata, ydata = self._image_index.get_data()\n xdata, ydata = xdata.get_data(), ydata.get_data()\n self.pd.update_data(\n line_value=self._image_value.data[y_ndx, :],\n line_value2=self._image_value.data[:, x_ndx],\n scatter_index=array([xdata[x_ndx]]),\n scatter_index2=array([ydata[y_ndx]]),\n scatter_value=array([self._image_value.data[y_ndx, x_ndx]]),\n scatter_value2=array([self._image_value.data[y_ndx, x_ndx]]),\n scatter_color=array([self._image_value.data[y_ndx, x_ndx]]),\n scatter_color2=array([self._image_value.data[y_ndx, x_ndx]])\n )\n else:\n self.pd.update_data({\"scatter_value\": array([]),\n \"scatter_value2\": array([]), \"line_value\": array([]),\n \"line_value2\": array([])})", "title": "" }, { "docid": "fac688bd5fcbcd1828029c2738fdbc99", "score": "0.42382714", "text": "def _setup_truncation(self, Rho_min=None, Rho_max = None,\n Lambda_min = None, Lambda_max = None):\n st = self.state\n if hasattr(st, 'W'):\n if (Rho_min is None) or (Rho_max is None):\n W_emin, W_emax = speigen_range(st.W)\n if (Rho_min is None):\n Rho_min = 1./W_emin\n if (Rho_max is None):\n Rho_max = 1./W_emax\n st.Rho_min = Rho_min\n st.Rho_max = Rho_max\n if hasattr(st, 'M'):\n if (Lambda_min is None) or (Lambda_max is None):\n M_emin, M_emax = speigen_range(st.M)\n if (Lambda_min is None):\n Lambda_min = 1./M_emin\n if (Lambda_max is None):\n Lambda_max = 1./M_emax\n st.Lambda_min = Lambda_min\n st.Lambda_max = Lambda_max", "title": "" }, { "docid": "ead5806d7336c98e917549ff0cdd98d5", "score": "0.4236538", "text": "def __init__(self, time_series_1=None, time_series_2=None, metadata=None):\n if time_series_1 is not None:\n self._ts1 = time_series_1\n self._num_timestamps_1 = self._ts1.shape[0]\n\n if time_series_2 is not None:\n self._ts2 = time_series_2\n self._num_timestamps_2 = self._ts2.shape[0]\n\n if metadata is not None:\n self._metadata = metadata\n self._num_variates = self._metadata.shape[0]", "title": "" }, { "docid": "dce62d4ea2856159a45bee0c74dc0298", "score": "0.4233318", "text": "def __getitem__(self, i: int):\n x = self._xs.iloc[i : i + self.sample_size]\n y = self._ys.iloc[\n (i + self.sample_size) : (i + self.sample_size + self.output_size)\n ]\n\n # return X, Y, Y_dates\n return (\n self._scaler.transform(x).astype(\"float32\"),\n np.squeeze(y).astype(\"float32\"),\n self._dates[\n (i + self.sample_size) : (i + self.sample_size + self.output_size)\n ],\n )", "title": "" }, { "docid": "a8903789a24c114976386027a043a29b", "score": "0.423231", "text": "def sickness_timeline(self):\n if self.sick == 1:\n self.sicktime=self.sicktime+1", "title": "" }, { "docid": "88e8e40d66ca9e1e16258579c41c733a", "score": "0.42303124", "text": "def test_get_metric_timeseries_data(self):\n pass", "title": "" }, { "docid": "7bb65a8926e0504f479fe55e86e0aad7", "score": "0.42287418", "text": "def min(self) -> pli.Series:", "title": "" }, { "docid": "6a598be8a324eedeb9373116173757f8", "score": "0.42279032", "text": "def test_Data_trunc(self):\n for x in (1, -1):\n a = 0.9 * x * self.a\n c = np.trunc(a)\n\n d = cf.Data(a)\n e = d.trunc()\n self.assertIsNone(d.trunc(inplace=True))\n self.assertTrue(d.equals(e, verbose=2))\n self.assertEqual(d.shape, c.shape)\n self.assertTrue((d.array == c).all())", "title": "" }, { "docid": "15abadc412812021f9c3c0403953c91f", "score": "0.4225773", "text": "def force_created_at(self, t=None):\n if t is None:\n t = util.now_int()\n else:\n util.check_attr_type(t, int)\n self._h5group.set_attr(\"created_at\", util.time_to_str(t))", "title": "" }, { "docid": "341d8c01bec2e394e07b084a6c61c8a4", "score": "0.42255077", "text": "def __create_x_series(self):\n\n # initial values\n current_step = self.__start_date\n\n # continue up until (and including) the end_date\n while current_step.date() < self.__end_date.date():\n # date accuracy is enough\n # data points are stored as datetime, edit here if this changes\n self.x_series.append(current_step)\n self.x_series_utc.append(calendar.timegm( current_step.utctimetuple() * 1000 ))\n\n # if days\n if self.__step_size == 'D':\n current_step = current_step + relativedelta(days=1)\n\n # if months\n elif self.__step_size == 'M':\n current_step = current_step + relativedelta(months=1)", "title": "" }, { "docid": "02973ee969ecf09214fee94942b2201f", "score": "0.42250326", "text": "def add_absolute_time_sample(dataobject):\n # Finding the coordinates for the dataobject\n coords = [coord.unit.name for coord in dataobject.coordinates]\n coord_index = 0\n dimension_list_time = []\n for coordinate in coords:\n # will need to set for the time index\n if coordinate == 'Start Time in int(Sample)':\n dimension_list_time = list(dataobject.coordinates[coord_index].dimension_list)+dimension_list_time\n elif coordinate == 'Rel. Time in int(Sample)':\n dimension_list_time = list(dataobject.coordinates[coord_index].dimension_list)+dimension_list_time\n coord_index = coord_index+1\n dimension_list_time = list(np.unique(np.asarray(dimension_list_time)))\n\n name = 'Time'\n time_coord_value = dataobject.coordinate('Start Time in int(Sample)')[0] +\\\n dataobject.coordinate('Rel. Time in int(Sample)')[0]\n time_coord = flap.Coordinate(name=name, unit='Second', values=time_coord_value, shape=np.shape(time_coord_value),\n mode=flap.CoordinateMode(equidistant=False), dimension_list=dimension_list_time)\n name = 'Sample'\n sample_coord_value = dataobject.coordinate('Start Sample in int(Sample)')[0] +\\\n dataobject.coordinate('Rel. Sample in int(Sample)')[0]\n sample_coord = flap.Coordinate(name=name, unit='n.a.', values=sample_coord_value, shape=np.shape(sample_coord_value),\n mode=flap.CoordinateMode(equidistant=False), dimension_list=dimension_list_time)\n dataobject.add_coordinate_object(time_coord)\n dataobject.add_coordinate_object(sample_coord)", "title": "" }, { "docid": "3d0f40b15c80c4ab73fcbcbb5fc8e9cc", "score": "0.42213795", "text": "def state_space_creation(data_frame,load_bins_number = 10, lmp_bins_number = 10):\n #Use a 15 min resampled dataset for joining datasets and creating state space\n data_frame_2 = data_frame.resample('15T').pad()\n SOC_max = 0.9*14\n SOC_min = 0.1*14\n \n data_frame_2[\"MW\"] = data_frame_2[\"MW\"].apply(lambda x:x/1000)\n data_frame_2.rename(columns={\"MW\":\"LMP_kWh\"},inplace=True)\n \n ##Drop last value added for ease of parsing\n data_frame_2.drop(data_frame_2.iloc[len(data_frame_2)-1].name,axis=0,inplace=True)\n \n # For 15 min sampled data, create a new column to match indices to the load, tariff dataset from 2014.\n data_frame_2[\"date_month_2014\"] = data_frame_2.index\n\n new_dateimt = []\n for i in data_frame_2[\"date_month_2014\"]:\n if i.year == 2018:\n i = datetime.datetime(2014,i.month,i.day,i.hour,i.minute,i.second)\n else:\n i = datetime.datetime(2015,i.month,i.day,i.hour,i.minute,i.second)\n new_dateimt.append(i)\n\n data_frame_2[\"date_month_2014\"]= new_dateimt\n \n #Load in the load_tariff dataset\n data_sep = pd.read_csv(\"load_tariff.csv\")\n \n #Convert data into relevant types\n data_sep[\"dt\"] = pd.to_datetime(data_sep[\"dt\"])\n data_sep[\"tariff\"] = data_sep[\"tariff\"].to_numpy()\n data_sep[\"solar\"] = data_sep[\"solar\"].to_numpy()\n data_sep[\"grid\"] = data_sep[\"grid\"].to_numpy()\n data_sep[\"gridnopv\"] = data_sep[\"gridnopv\"].to_numpy()\n \n #Parse data\n data_sep.drop(\"local_15min\",axis=1,inplace=True)\n data_sep.drop(\"Unnamed: 0\",axis=1,inplace=True)\n \n #Set index to be DateTime Index\n data_sep.set_index(\"dt\",inplace=True)\n \n #Merge the 2 datasets on the 2014 datetime column\n df = pd.merge(data_frame_2, data_sep,left_on=\"date_month_2014\",right_index=True)\n \n #Drop Column\n df.drop(\"date_month_2014\",axis=1,inplace=True)\n df.drop(\"grid\",axis=1,inplace=True)\n df.drop(\"solar\",axis=1,inplace=True)\n \n df.rename(columns={\"tariff\":\"TOU\"},inplace=True)\n df.rename(columns={\"gridnopv\":\"Load\"},inplace=True)\n \n #Create Binned LMP, Load, TOU columns\n df[\"binned_LMP\"],bins_LMP = pd.cut(df['LMP_kWh'], lmp_bins_number,labels = range(lmp_bins_number),retbins=True)\n df[\"binned_Load\"],bins_Load = pd.cut(df[\"Load\"],load_bins_number,labels=range(load_bins_number),retbins=True)\n \n #Create bins for SOC\n bins_SOC = []\n for i in np.arange(SOC_min,SOC_max,5*.25):\n bins_SOC.append(round(i,2))\n bins_SOC.append(SOC_max)\n \n #Create bins for TOU\n unique_TOU = {j:i for i,j in enumerate(df[\"TOU\"].unique())}\n rows_TOU = []\n rows_Load = []\n for i, j in enumerate(df.iterrows()):\n rows_TOU.append(unique_TOU[j[1][\"TOU\"]])\n \n \n bins_TOU = df[\"TOU\"].unique()\n\n #Create binned TOU to be mapping to indices\n df[\"binned_TOU\"] = rows_TOU\n \n #Create mapping bins Dict\n bins_dict = {\"LMP\":bins_LMP,\"Load\":bins_Load,\"TOU\":bins_TOU,\"SOC\":bins_SOC}\n \n #Create SOC binned/unbinned column\n df[\"SOC\"] = [5.15] + [0]*(len(df) -1)\n df[\"binned_SOC\"] = [3] + [0]*(len(df)-1)\n \n #save csv and dictionary\n df.set_index(data_sep.index,drop=True,inplace=True)\n df.to_csv(\"Discretized_State_Space.csv\")\n \n def save_obj(obj, name ):\n with open(name + '.pkl', 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\n save_obj(bins_dict,\"bins_Dict\")", "title": "" }, { "docid": "2bb1fd0e84dd94e7b54388b176b8a937", "score": "0.42183587", "text": "def __init__(self,tsData,dt,blurCoef=1./6.):\n \n\n self.__T=max(tsData.shape)-1 #compute length of differenced time series\n tsData=np.reshape(tsData,(self.__T+1,1),order='F')#reshape data to column vector (scalar time series assumed)\n self.__dy=np.diff(tsData,axis=0) #compute differenced time series (do not manipulate this, modify local copies via -pars[2]*dt)\n \n ii=np.arange(self.__T) #construct and store coo sparse indices for MA1\n self.__rowi=np.concatenate((ii,ii[:-1],ii[:-1]+1))\n self.__coli=np.concatenate((ii,ii[:-1]+1,ii[:-1]))\n self.__dt=dt #store internally to simplify interface to 3rd party opt functions\n self.__blurCoef=blurCoef", "title": "" }, { "docid": "b42579c7c506ac0ca4a7756adc035a52", "score": "0.42176014", "text": "def truncate(self, before=None, after=None, axis='major'):\n axis = self._get_axis_name(axis)\n index = self._get_axis(axis)\n\n beg_slice, end_slice = index.slice_locs(before, after)\n new_index = index[beg_slice:end_slice]\n\n return self.reindex(**{axis : new_index})", "title": "" }, { "docid": "abc4254990f0fccdba653aa9b4a40ebb", "score": "0.42139998", "text": "def data_bytime(data,start,end,side='both'):\r\n if side == 'both':\r\n manipulating = data\r\n else:\r\n log.critical('wrong args for side')\r\n raise 'wrong args for side'\r\n# print manipulating\r\n return manipulating.loc[manipulating.time.between(start,end)]", "title": "" }, { "docid": "1adfb64278db7e9aa21fd0c02d2c4c52", "score": "0.4213514", "text": "def characterizeAttributes(self, raw_data):\n print(\"DataManagement: Characterizing Attributes...\")\n attributeID = 0\n for att in range(len(raw_data[0])):\n if att != self.instanceID_ref and att != self.action_ref: #Get just the attribute columns (ignores phenotype and instanceID columns)\n for inst in range(len(raw_data)):\n target = raw_data[inst][att]\n if not self.attribute_info[attributeID][0]: #If attribute is discrete\n if target in self.attribute_info[attributeID][1] or target == cons.label_missing_data:\n pass #NOTE: Could potentially store state frequency information to guide learning.\n else:\n self.attribute_info[attributeID][1].append(target)\n else: #If attribute is continuous\n\n #Find Minimum and Maximum values for the continuous attribute so we know the range.\n if target == cons.label_missing_data:\n pass\n elif float(target) > self.attribute_info[attributeID][1][1]: #error\n self.attribute_info[attributeID][1][1] = float(target)\n elif float(target) < self.attribute_info[attributeID][1][0]:\n self.attribute_info[attributeID][1][0] = float(target)\n else:\n pass\n attributeID += 1", "title": "" }, { "docid": "58216f6a9b044ea86a300f33fea13831", "score": "0.4212517", "text": "def _sample_up(self, series: Series) -> Series:\n method = self.settings[\"sample_up\"]\n freq = self.settings[\"freq\"]\n\n success = True\n if method in [\"backfill\", \"bfill\", \"pad\", \"ffill\"]:\n series = series.asfreq(freq, method=method)\n elif method is None:\n success = False\n else:\n if method == \"mean\":\n series = series.asfreq(freq).fillna(series.mean())\n elif method == \"interpolate\":\n series = series.asfreq(freq).interpolate(method=\"time\")\n elif method == \"divide\":\n dt = series.index.to_series().diff() / to_offset(freq).delta\n series = series / dt\n series = series.asfreq(freq, method=\"bfill\")\n elif isinstance(method, float):\n series = series.asfreq(freq).fillna(method)\n else:\n success = False\n\n if success:\n logger.info(\"Time Series '%s' were sampled up using %s.\", self.name, method)\n else:\n logger.warning(\n \"Time Series '%s': User-defined option for sample_up %s is not \"\n \"supported\",\n self.name,\n method,\n )\n\n return series", "title": "" }, { "docid": "f29a055fd8a5b722ee1aecb3b037bce8", "score": "0.42116043", "text": "def field(self, name: str) -> Series:", "title": "" } ]
e29578e2e5aab1a500f30973814aac38
Do not return anything, modify matrix inplace instead.
[ { "docid": "401475e234cb27b5cd2d5f92cd413843", "score": "0.0", "text": "def setZeroes(matrix):\n m, n = len(matrix), len(matrix[0])\n row, column = set(), set()\n\n for i in range(m):\n for j in range(n):\n if matrix[i][j] == 0:\n row.add(i)\n column.add(j)\n\n for i in row:\n for j in range(n):\n matrix[i][j] = 0\n\n for i in column:\n for j in range(m):\n matrix[j][i] = 0", "title": "" } ]
[ { "docid": "1f49d776fa96295bcaed2ee8e8797509", "score": "0.70165074", "text": "def as_mutable_matrix(matrix):\n return matrix.as_mutable()", "title": "" }, { "docid": "25a772c33720f902dc942d7194d9f308", "score": "0.6949418", "text": "def matrix(self, matrix):", "title": "" }, { "docid": "ac4cf0d911d12d56127b681629fa1ab9", "score": "0.6793752", "text": "def __copy_matrix(self, matrix):\n return copy.deepcopy(matrix)", "title": "" }, { "docid": "95462abb25e41a497112ece0320d1efb", "score": "0.65606683", "text": "def _apply(self, matrix, ises=None):\n matrix.handle.setNullSpace(self.nullspace)", "title": "" }, { "docid": "aee7dc1947e153b0efd58fb5cab7d04e", "score": "0.6535764", "text": "def matrix(mat):\n pass", "title": "" }, { "docid": "d9d42d2dd7646f733b47d0445f3faa49", "score": "0.6496019", "text": "def alterMatrix(matrix, permutation_matrix):\n global alpha_dict\n global alpha_count\n\n # Subtract matrix with the product of permutation matrix and current alpha\n matrix = np.array(abs(np.subtract(matrix, (alpha_dict[\"a_\" + str(alpha_count)] * permutation_matrix))))\n\n logging.info('** New Matrix Computed for Next Iteration **')\n logging.info(matrix)\n\n return matrix", "title": "" }, { "docid": "a725180618bfe1039732a75be8933989", "score": "0.6336262", "text": "def matrix(self):\n # apply all mutations before matrix returning\n if not self._matrix_updated:\n self._update_mutated()\n\n return self._matrix_mutated", "title": "" }, { "docid": "7ea703f573a3ac8b023cfad0af628f88", "score": "0.63079816", "text": "def copy(self):\n return self.__class__(self.matrix.copy())", "title": "" }, { "docid": "80c9514a1a512ec5acc573f590fc713f", "score": "0.62828046", "text": "def augment_matrix(matrix):\r\n return (len(matrix[0]) == len(matrix) and matrix) or do_augment(matrix)", "title": "" }, { "docid": "723b538113251322d70c38930d9b4fd9", "score": "0.6179696", "text": "def set_matrix(self, new_mat):\n new = copy(self)\n new.matrix = new_mat\n return new", "title": "" }, { "docid": "1154637f665500972991950a4f3a5b9f", "score": "0.6121463", "text": "def set_matrix(self, new_mat):\n new = deepcopy(self)\n new.matrix = new_mat\n return new", "title": "" }, { "docid": "de83f3125a28515c52e14b336abb7b0d", "score": "0.6099029", "text": "def test_does_not_modify_original_matrix_if_no_zeroes(self):\n matrix = [\n [1, 2, 3, 4, 5],\n [1, 2, 3, 4, 5],\n [1, 2, 3, 4, 5],\n [1, 2, 3, 4, 5],\n [1, 2, 3, 4, 5],\n ]\n set_zeroes(matrix)\n self.assertEqual(matrix, [\n [1, 2, 3, 4, 5],\n [1, 2, 3, 4, 5],\n [1, 2, 3, 4, 5],\n [1, 2, 3, 4, 5],\n [1, 2, 3, 4, 5],\n ])", "title": "" }, { "docid": "9099f176e728fef3cf794f8e4f6477f3", "score": "0.6047374", "text": "def update(self, row, col, val):\n original = self.matrix[row][col]\n if col != 0:\n original -= self.matrix[row][col-1]\n\n diff = val - original\n \n for y in xrange(col, len(self.matrix[0])):\n self.matrix[row][y] += diff", "title": "" }, { "docid": "be9819cdcaf8ba6a916a0e8846d6f1a5", "score": "0.60159284", "text": "def transform(self, matrix: Transform):\n # noinspection GrazieInspection", "title": "" }, { "docid": "0c0fdbf053ef1a367353faa8012e7833", "score": "0.5976847", "text": "def rotate(self, matrix: List[List[int]]) -> None:\n backup = copy.deepcopy(matrix)\n size = len(matrix)\n for i in range(0, size):\n for j in range(0, size):\n matrix[i][j] = backup[size - j - 1][i]", "title": "" }, { "docid": "42da88ab690bab156b5f7a0dd500a6c0", "score": "0.5976395", "text": "def calculate(self, matrix):\n self.__matrix = self.__matrix * matrix", "title": "" }, { "docid": "5ee7c73443d08e98bbb86c94a4418794", "score": "0.59749293", "text": "def transform(matrix):", "title": "" }, { "docid": "ce5156663f75c9d9d5598f4a6496e129", "score": "0.59544015", "text": "def reduce_matrix(M):\n i,j = mat_min(M)\n #i, j = matrix_min(M)\n # add the ith row to the jth row and overwrite the ith row with those values\n M[i,:] = M[j,:] + M[i,:]\n\n # delete the jth row\n M = np.delete(M, (j), axis=0)\n\n # similarly with the columns\n M[:,i] = M[:,j] + M[:,i]\n M = np.delete(M, (j), axis=1)\n np.fill_diagonal(M,0) # not sure necessary.\n return i,j,M", "title": "" }, { "docid": "167176479e04fc6b89ebd7b14b75198c", "score": "0.5946982", "text": "def fix_Jmatrix(self):\n return self._fix_Jmatrix", "title": "" }, { "docid": "64327a91ffe9fd78c83a2f750bc6e47d", "score": "0.59418863", "text": "def mutation(matrix):\n probability = round(1/(x*y), 2) # round to 2 decimal points - https://gist.github.com/jackiekazil/6201722\n randomNum = round(random.uniform(0,1), 2)\n a, b = switchValues(x, y)\n # Using math.isclose inspired from - https://docs.python.org/3/library/math.html\n foo = False\n while foo == False:\n if math.isclose(randomNum, probability, rel_tol=probability//2, abs_tol=probability//2): # gives a low probability of a mutation\n # If it is to be mutated, swap values at a random position in the matrix - as long as it is feasible\n # This selects what row and column will be mutated\n row = random.randint(0,a) \n col = random.randint(0,b)\n matrix[row][col]\n if matrix[row][col] == 1:\n matrix[row][col] = 0\n \n elif matrix[row][col] == 0:\n matrix[row][col] = 1\n \n if is_feasible(matrix) != -1:\n foo = True\n \n return matrix", "title": "" }, { "docid": "a5cf57e3b2ac44d6eab3ec6b04114d03", "score": "0.5926051", "text": "def __setitem__(self, i: 'int', value: 'SbMatrix') -> \"void\":\n return _coin.SoMFMatrix___setitem__(self, i, value)", "title": "" }, { "docid": "31ce7a71a544473316fc57e7afed8d0b", "score": "0.5899244", "text": "def matrix_invert(A):", "title": "" }, { "docid": "654dfc65c6956af9d16af0c7e2f4ad70", "score": "0.5834605", "text": "def populate_matrix(matrix, num_rows, num_columns):\n for i in range(0, num_rows):\n for j in range(0, num_columns):\n matrix[i][j] = i + j", "title": "" }, { "docid": "1bdd86265a05eddeb7ac6d714b67983f", "score": "0.58179444", "text": "def copy(self):\n matrix = Matrix(self.rows, self.cols)\n matrix.items = self.items.copy()\n return matrix", "title": "" }, { "docid": "1a12246bbf8dfd8145d0e39c62a1bbed", "score": "0.57849616", "text": "def _matrix(X, y):\n\tpass", "title": "" }, { "docid": "6b568282626a0fdffeb0d5f037327f4a", "score": "0.5779126", "text": "def _update_mutated(self):\n for mutation in self.mutations_pool:\n if mutation.config.get('state', None) == 'broken':\n mutation.config['state'] = None\n # apply mutations\n matrix, layers_index_reverse, branchs_end, branchs_counter = self.mutations_applier(\n self._matrix, self._layers_index_reverse,\n self.branchs_end, self.branchs_counter)\n\n # add finisher\n matrix, layers_index_reverse, branchs_end, branchs_counter = self.finisher_applier(\n matrix, layers_index_reverse,\n branchs_end, branchs_counter)\n\n self._matrix_updated = True\n self._matrix_mutated = matrix\n\n self._layers_index_reverse_updated = True\n self._layers_index_reverse_mutated = layers_index_reverse", "title": "" }, { "docid": "e335408c48caf019bd8bd2cb06f968c1", "score": "0.57789344", "text": "def rotate(self, matrix) -> None:\n #Supposed to do an in-place sort, I need a variable that stores the replaced value so I don't lose it. \n #(x, y) -> (y, maxColumn-x) probably works for everything. \n '''\n rowLen = len(matrix)\n columnLen = len(matrix[0])\n \n temp = matrix[0][0]\n #I cannot go in order. If I replace a value, I have to look at that value next otherwise no point in storing that value. \n #The time complexity is rowLen*columnLen so I can use that as forloop counter. \n row = 0\n column = 0\n #!This kind of motion is problematic because it will only land on the edge corners and keep going. And I don't know how to effectively make it switch. \n \n for i in range(rowLen*columnLen):\n temp, matrix[column][columnLen-1-row] = matrix[column][columnLen-1-row], temp\n row, column = column, columnLen-1-row\n '''\n\n #*At least solve it using brute force. \n\n rowLen = len(matrix)\n columnLen = len(matrix[0])\n new = []\n for i in range(rowLen):\n temp = [0] * columnLen\n new.append(temp)\n\n for i in range(rowLen):\n for j in range(columnLen):\n new[j][columnLen-1-i] = matrix[i][j]\n\n matrix[:] = new", "title": "" }, { "docid": "582abd01b853c596e5363cd0296dcb64", "score": "0.5775526", "text": "def _update_kernel_matrix_(self,idx):\n c = -0.5/self.sigma**2\n f = lambda x: np.exp(c*x)\n mask = np.ones(len(self.similarity_matrix), dtype=bool)\n mask[idx] = False\n k_row = f(self.similarity_matrix[idx][mask])\n self.kernel_matrix = np.insert(self.kernel_matrix, idx, k_row, axis=0)\n k_col = np.insert(k_row, idx, 1.)\n self.kernel_matrix = np.insert(self.kernel_matrix, idx, k_col, axis=1)", "title": "" }, { "docid": "b59b56ce97e58aae3955ce7089a30152", "score": "0.5767161", "text": "def update_state_matrix(game, state_matrix):\n state = game.get_state()\n state_matrix[0, 0:10] = state\n state_matrix[1, 10:20] = state\n state_matrix[2, 20:30] = state\n return state_matrix", "title": "" }, { "docid": "85a75414f879cfe2a89f4a303668e5d7", "score": "0.57107437", "text": "def transform(self, matrix):\n new_pos = v3.transform(matrix, self.pos)\n v3.set_vector(self.pos, new_pos)", "title": "" }, { "docid": "ff3e3c393595f276a1e28655c5ceb9d2", "score": "0.5707264", "text": "def get_copy(self):\n return FitnessMatrix(self.evo, self.matrix.copy())", "title": "" }, { "docid": "45dca95115dd0a09e1a3e2524f61877a", "score": "0.5705233", "text": "def _inplace(self, inplace):\n if inplace:\n return self\n else:\n return self.deepcopy()", "title": "" }, { "docid": "e7d897e04734197eef1d292edc06b7f9", "score": "0.5697748", "text": "def RowSwitching(M, i1, i2):\n temporary_row = M[i1,:].copy()\n M[i1,:] = M[i2,:]\n M[i2,:] = temporary_row", "title": "" }, { "docid": "361abe34ac647f1f2a7de3dd9efcc123", "score": "0.5692374", "text": "def inv_matrix(self) -> np.ndarray:\n if self._inv_matrix is None:\n self._inv_matrix = inv(self._matrix)\n self._inv_matrix.setflags(write=False)\n return self._inv_matrix", "title": "" }, { "docid": "dea65d955059a201af15c5113dc60542", "score": "0.56914276", "text": "def adjustMatrix(matrix):\n\n for i in range(len(matrix[0])):\n matrix[0][i] = \"right\"\n\n for i in range(len(matrix)):\n matrix[i][0] = \"down\"\n\n matrix[0][0] = 0\n\n return matrix", "title": "" }, { "docid": "691cc1963cf6cf65dc8365cd20165775", "score": "0.5684049", "text": "def update_matrix(self, flat_matrix, action, nox):\n new_score = flat_matrix[0][ action ] + nox\n flat_matrix[ 0, action ] = new_score\n if new_score == 5:\n self.score += 1\n self.reward += 1\n # if action resulted in 5, update the self.update_valid_actions\n self.update_valid_actions( action )\n\n self.matrix = flat_matrix.reshape( -1, matrix_width)", "title": "" }, { "docid": "4e2d4325dffb8a040923bdd47e7da2be", "score": "0.5682486", "text": "def update_matrix():\n\t\n\tglobal data, membership, iterations, centers, old_obj, new_obj\n\n\told_obj=new_obj\t\n\tfor i in range(N):\n\t\tfor j in range(C):\n\t\t\tnewU=0\n\t\t\tfor k in range(C):\n\t\t\t\tnewU+=(distance(data[i],centers[j])/distance(data[i],centers[k]))**(2/m-1)\t\n\n\t\t\tnewU=1.0/newU\t\n\n\t\t\tmembership[i][j]=(newU)\n\tnew_obj=objective_func(membership)\t\t\n\tsquare_difference=distance(old_obj,new_obj)\n\n\t#print square_difference\t\t\n\treturn square_difference", "title": "" }, { "docid": "43790a1e65445767b5a512bd921b1c24", "score": "0.56682146", "text": "def _unweight(self):\n matrix = self.matrix.copy()\n matrix[:, :len(self.xdef)] = (matrix[:, :len(self.xdef)] /\n matrix[:, [-1]])\n return matrix", "title": "" }, { "docid": "a8373c8c205f59fab8e3808826de533a", "score": "0.5660857", "text": "def swap_nodes_in_matrix(\n matrix: np.ndarray,\n node1: int,\n node2: int,\n inplace: bool = False) -> np.ndarray:\n if not inplace:\n modified_matrix = np.copy(matrix)\n else:\n modified_matrix = matrix\n modified_matrix[:, [node1, node2]] = modified_matrix[:, [node2, node1]]\n modified_matrix[[node1, node2], :] = modified_matrix[[node2, node1], :]\n return modified_matrix", "title": "" }, { "docid": "4f984dd947b809ac7e1b2de25a46363c", "score": "0.5646326", "text": "def shrinkMatrix(oldMatrix, newSize):\n oldSize = oldMatrix.shape[0]\n rowMatrix = numpy.zeros((oldSize, newSize))\n for i in range(oldSize):\n mat = shrink(oldMatrix[i,:], newSize)\n rowMatrix[i,:] = mat\n\n newMatrix = numpy.zeros((newSize, newSize))\n for i in range(newSize):\n mat = shrink(rowMatrix[:,i], newSize)\n newMatrix[:,i] = mat\n\n #import matrixfix\n #newMatrix = matrixfix.normalize(newMatrix)\n return newMatrix", "title": "" }, { "docid": "3f77fa2750ecb0666f91b0e8e34461c5", "score": "0.5639953", "text": "def rotate(self, matrix) -> None:\n \n matrix[:] = matrix[::-1]\n for i in range(0, len(matrix)):\n for j in range(i, len(matrix[0])):\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n\n # print(matrix)", "title": "" }, { "docid": "07520ddb3ac5a38e04c8eba0fadcabd1", "score": "0.56396496", "text": "def rotate(self, matrix: List[List[int]]) -> None:\n matrix.reverse()\n \n for i in range(len(matrix)):\n for j in range(i):\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]", "title": "" }, { "docid": "0486a6e70b1d9778d86717c776d2581f", "score": "0.5636156", "text": "def __init__(self, matrix):\n for row in matrix:\n for col in xrange(1, len(row)):\n row[col] += row[col-1]\n self.matrix = matrix", "title": "" }, { "docid": "cf9a8d66cdfedfa9afb4de3002b2e090", "score": "0.5633861", "text": "def zero_matrix(matrix):", "title": "" }, { "docid": "6c724a48b2828f8300d70eacb7aba23f", "score": "0.5632847", "text": "def rotate(self, matrix: List[List[int]]) -> None:\n matrix[:] = [[matrix[j][i] for j in range(len(matrix)-1,-1,-1)] for i in range(len(matrix[0]))]", "title": "" }, { "docid": "1a80468f31129d3461e80bfd8e59aced", "score": "0.56202996", "text": "def __copy__(self, other_matrix):\r\n if type(other_matrix) == Matrix:\r\n other_matrix = other_matrix.matrix_value\r\n\r\n self.matrix_value = []\r\n for line in other_matrix:\r\n self.matrix_value.append(line)", "title": "" }, { "docid": "1128d3796036bf623f25df2d8c7c3276", "score": "0.5618958", "text": "def extend_matrix(input_matrix):\n ny,nx = np.shape(input_matrix)\n temp = np.ma.zeros((ny,nx+2),dtype=input_matrix.dtype)\n temp[:,0] = input_matrix[:,-1]\n temp[:,1:-1] = input_matrix[:,:]\n temp[:,-1] = input_matrix[:,0]\n return temp", "title": "" }, { "docid": "0c04ecd680b2ed5bd92c0e6222f234ea", "score": "0.5580113", "text": "def copyFrom(self, field: 'SoField') -> \"void\":\n return _coin.SoMFMatrix_copyFrom(self, field)", "title": "" }, { "docid": "c8a9457e501403c96113478737455623", "score": "0.55668795", "text": "def ensure_mutable(arg):\n # TODO: e.g. sp.sympify converts a MutableMatrix to ImmutableMatrix\n # maybe this changes in future sympy releases\n # which might make this function obsolete (?)\n if isinstance(arg, sp.matrices.MatrixBase):\n return as_mutable_matrix(arg)\n else:\n return arg", "title": "" }, { "docid": "9312abd4bfc5e1e758de1f483e0bf426", "score": "0.55599654", "text": "def setZeroes(self, matrix: List[List[int]]) -> None:\n temp = []\n for i,row in enumerate(matrix):\n for j,col in enumerate(row):\n if col == 0:\n temp = i,j\n break\n if len(temp) == 0:\n return matrix\n for i in range(len(temp)):\n for j in range(len(matrix[temp[i]])):\n matrix[temp[i]][j] = 0\n for i in range(len(matrix)):\n for j in range(len(temp)):\n matrix[i][temp[j]] = 0\n return matrix", "title": "" }, { "docid": "b3705aa21e2a6b86af38562190b4f178", "score": "0.5559346", "text": "def rotate(self, matrix: List[List[int]]) -> None:\n L = len(matrix)\n for i in range(L//2):\n matrix[i], matrix[L-i-1] = matrix[L-i-1], matrix[i] #pay attention on matrix index range\n \n for row in range(L):\n for col in range(row):\n if row != col:\n matrix[row][col], matrix[col][row] = matrix[col][row], matrix[row][col]", "title": "" }, { "docid": "b4303c8ed339862f34d7c4671a568684", "score": "0.55581325", "text": "def glue_matrix(self):\n coarse = self.coarse\n dummy = self.dummy\n matshape = self.matshape\n myrank = self.myrank\n mat = self.mat\n\n comm = MPI.COMM_WORLD\n # exchange matrices\n\n N = coarse.N\n row = np.zeros((N*27,))\n col = np.zeros((N*27,))\n data = np.zeros((N*27,))\n counter = 0\n k0, k1, j0, j1, i0, i1 = coarse.domainindices\n nz, ny, nx = dummy.shape\n ok = True\n rks = range(matshape[0])\n rjs = range(matshape[1])\n ris = range(matshape[2])\n master = mat[0, 0, 0]\n for k, j, i in itertools.product(rks, rjs, ris):\n ka = k0+k*nz\n ja = j0+j*ny\n ia = i0+i*nx\n # core to core communication\n sender = mat[k, j, i]\n if myrank == sender:\n obj = {\"A\": dummy.A.tocoo(),\n \"size\": dummy.size,\n \"domainindices\": dummy.domainindices}\n for dest in mat.ravel():\n if dest != myrank:\n # in python MPI can send any dictionary\n # this makes the matrix exchange almost easy\n # the same implementation in Fortran\n # would be quite a nightmare\n comm.send(obj, dest=dest, tag=myrank)\n else:\n obj = comm.recv(source=sender, tag=sender)\n comm.Barrier()\n\n A = obj[\"A\"]\n size = obj[\"size\"]\n domainindices = obj[\"domainindices\"]\n\n n = len(A.row)\n\n # row\n kk, jj, ii = index_to_triplet(A.row, size, domainindices)\n\n kk += ka-k0\n jj += ja-j0\n ii += ia-i0\n idx = triplet_to_index(\n (kk, jj, ii), coarse.size, coarse.domainindices)\n row[counter:counter+n] = idx\n if (max(idx) >= N):\n print(\"myrank=%i, sender=%i / row\" % (myrank, sender))\n\n ok = False\n\n # col\n kk, jj, ii = index_to_triplet(A.col, size, domainindices)\n kk += ka-k0\n jj += ja-j0\n ii += ia-i0\n idx = triplet_to_index(\n (kk, jj, ii), coarse.size, coarse.domainindices)\n\n col[counter:counter+n] = idx\n if (max(idx) >= N):\n print(\"myrank=%i, sender=%i / col\" % (myrank, sender))\n ok = False\n\n # data\n data[counter:counter+n] = A.data\n\n counter += n\n\n MPI.COMM_WORLD.Barrier()\n if ok:\n coarseA = sparse.coo_matrix((data, (row, col)), shape=(N, N))\n\n else:\n print(\"matrix not defined for rank %i\" % myrank)\n print(\"subdomains were\", mat)\n raise ValueError\n\n return coarseA", "title": "" }, { "docid": "30831ff4fc1596d0f7440ea6b15ae684", "score": "0.5548757", "text": "def rotate(self, matrix: List[List[int]]) -> None:\n numrows = len(matrix)\n Mat = [[0 for i in range(numrows)] for j in range(numrows)]\n for i in range(numrows):\n for j in range(numrows):\n Mat[j][numrows-1-i] = matrix[i][j]\n for i in range(numrows):\n for j in range(numrows):\n matrix[i][j] = Mat[i][j]", "title": "" }, { "docid": "6eb9ff0aed90eec58f62b5ca9a9d22e3", "score": "0.55384904", "text": "def get_matrix(self):", "title": "" }, { "docid": "beed948ba289533528d2efb5cacb2a33", "score": "0.5537502", "text": "def copy(self, other_matrix):\r\n if type(other_matrix) == Matrix:\r\n other_matrix = other_matrix.matrix_value\r\n\r\n self.matrix_value = []\r\n for line in other_matrix:\r\n self.matrix_value.append(line)", "title": "" }, { "docid": "dafeb70ab174cfc5ab573d58127c8ed7", "score": "0.55370224", "text": "def zero_bias(matrix):\n matrix[:,0] = 0\n return(matrix)", "title": "" }, { "docid": "891bbd785448cb984d4d005be8014dd1", "score": "0.5535586", "text": "def _expand_matrix(matrix):\n shape = matrix.shape\n new_shape = (shape[0] + 1, shape[1] + 1)\n\n # Create a matrix of zeros with the new size, and map our old one onto it.\n new_matrix = np.zeros(new_shape)\n new_matrix[:shape[0], :shape[1]] = matrix\n\n return new_matrix", "title": "" }, { "docid": "703c0d4f52afe8a09d10c3473f165957", "score": "0.55096227", "text": "def copyFrom(self, field: 'SoField') -> \"void\":\n return _coin.SoSFMatrix_copyFrom(self, field)", "title": "" }, { "docid": "b0033f496fb773af12cf2882854c8c5e", "score": "0.5507292", "text": "def correct_contact_matrix(cmat, Ni_old, Ni):", "title": "" }, { "docid": "04c52ac44c22a986631a19fcb9a5c6fc", "score": "0.55063164", "text": "def matrix_elem(matrix, i, j, val=None):\n if val is None:\n return matrix[j,i]\n else:\n matrix[j,i] = val", "title": "" }, { "docid": "0f41faf5d2370fc5408668b291061556", "score": "0.5487163", "text": "def cleanUpMatrix(A):\n \n TOL=1.e-14\n A = A + np.zeros((len(A), len(A[0])),dtype=np.complex_)\n for k in range(0,len(A)):\n for l in range(0,len(A)):\n if abs(A[k,l].real) < TOL:\n a_kl = 1j*(A[k,l].imag)\n A[k,l]= a_kl \n if abs(A[k,l].imag) < TOL:\n a_kl = A[k,l].real \n A[k,l] = a_kl\n \n return A", "title": "" }, { "docid": "3c85e7a5ebde3d9289d3ef35dbe60731", "score": "0.5474175", "text": "def normalize_matrix(data):\n if data is None or len(data) == 0:\n return data\n for i in range(len(data[0])):\n data[:, i] = normalize(data[:, i])\n return data", "title": "" }, { "docid": "5d02bc68b61410d5879e2a9914399d5d", "score": "0.547069", "text": "def corestrict(self, mask, inplace=False):\n if not isinstance(self.matrix, (FSRMatrix, FSRRotation2dMatrix,\n FSRRotation3dMatrix)):\n raise NotImplementedError(\n 'Corestriction is not implemented for {0} sparse storage.'\n .format(type(self.matrix).__name__))\n nrow = np.sum(mask)\n mask_ = np.repeat(mask, self.matrix.block_shape[0])\n out = self.copy()\n out.matrix = type(self.matrix)(\n (nrow * self.matrix.block_shape[0], self.matrix.shape[1]),\n data=self.matrix.data[mask_])\n if isinstance(self.matrix, FSRMatrix):\n out.broadcastable_shapeout = (nrow,)\n if self.shapeout is not None:\n ndims_out = len(self.broadcastable_shapeout)\n out.shapeout = (nrow,) + self.shapeout[ndims_out:]\n else:\n out.shapeout = (nrow, out.matrix.block_shape[0])\n if inplace:\n self.delete()\n return out", "title": "" }, { "docid": "0bc8b3bd9d60ad78a9ec1e66a7b6dfd8", "score": "0.5468587", "text": "def _fix_columns(self) -> None:\n self._state = np.transpose(self._state)\n self._fix_rows()\n self._state = np.transpose(self._state)", "title": "" }, { "docid": "4b770b9644a34841bc1f7eb25adb944f", "score": "0.5466078", "text": "def rotate_matrix_in_place(matrix): \n width = len(matrix)\n count = 0\n while (width > 1):\n rotate_border(matrix, count, width)\n count += 1\n width = width - 1\n return matrix", "title": "" }, { "docid": "31a4ca83ed2e984d2552d2f77bf73a96", "score": "0.5451967", "text": "def rotate(self, matrix: List[List[int]]) -> None:\n row=len(matrix)\n col=len(matrix[0])\n for i in range(row//2):\n for j in range(col):\n matrix[i][j],matrix[row-i-1][j]=matrix[row-i-1][j],matrix[i][j]\n \n for i in range(row):\n for j in range(i):\n matrix[i][j],matrix[j][i]=matrix[j][i],matrix[i][j]", "title": "" }, { "docid": "f5c8c315dec08f04f536d064780b4427", "score": "0.5445616", "text": "def rotate(self, matrix) :\n n=len(matrix)\n matrix1=[[0]*n for i in range(n)]\n for i in range(n) :\n for j in range(n) :\n matrix1[i][j]=matrix[i][j]\n #print(matrix1)\n for i in range(n) :\n for j in range(n) :\n #print(n-1-i,j,matrix1[n-1-i][j])\n matrix[j][i]=matrix1[n-1-i][j]\n #print(matrix1)", "title": "" }, { "docid": "4c5790c8e92e0a6d4f68da8c6375cc5c", "score": "0.54444695", "text": "def update1( B ):\n width = len(B[0])\n height = len(B)\n \n for row in range(height):\n for col in range(width):\n if row == col:\n B[row][col] = 1\n else:\n B[row][col] = 0 # else not needed here,but OK", "title": "" }, { "docid": "06e6816e34afd1d1a0945d2a57805696", "score": "0.5438366", "text": "def transfer_values(self, initial_matrix, transposed_matrix):\n\n i_rows, i_cols = initial_matrix.shape\n for i in range(self.rows):\n for j in range(i_cols+1):\n if j == 0:\n if i != self.rows-1:\n transposed_matrix[i,0] = 0\n else:\n transposed_matrix[i,0] = 1\n elif j == i_cols:\n transposed_matrix[i,-1] = initial_matrix[i,j-1]\n else:\n if i == self.rows-1:\n transposed_matrix[i,j] = (-1) * initial_matrix[i,j-1]\n else:\n transposed_matrix[i,j] = initial_matrix[i,j-1]\n\n return transposed_matrix", "title": "" }, { "docid": "e41ad8d1471f9cd1bdb15289a6785338", "score": "0.54313326", "text": "def identity(self):\n new_data = self.data.copy()\n return IFSMatrix(self.width, new_data)", "title": "" }, { "docid": "910776405bb9d360e74fe5fa5662cc3b", "score": "0.5430543", "text": "def set_value(self, M):\n mx = LinearOperator.convert_to_matrix(M)\n if(mx.shape != (self.dim, self.dim)):\n raise ValueError(\"Argument must be a (%d,%d) matrix!\"\n % (self.dim, self.dim))\n self.base[:, :] = _np.array(mx)\n self.dirty = True", "title": "" }, { "docid": "1d107efff65b7de33c8d31143a269d6c", "score": "0.54285914", "text": "def _matmat(self, matrix):\n for op in reversed(self._operators):\n matrix = op.matmat(matrix)\n\n return matrix", "title": "" }, { "docid": "0844a17291f975fff26fa239a84acef8", "score": "0.540584", "text": "def set(state: 'SoState', node: 'SoNode') -> \"SbMatrix &\":\n return _coin.SoModelMatrixElement_set(state, node)", "title": "" }, { "docid": "34ea3494c0c9d4047954b522e7c29c7f", "score": "0.54055315", "text": "def append(self, matrix):\n self[:] = matrix * self # be careful for the applying order\n self._matrices.append(matrix)", "title": "" }, { "docid": "718072b500e8f30e60c328f9350e585e", "score": "0.54032046", "text": "def get_matrix(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "215b66b3ee617603f01dd9954ab54aa6", "score": "0.5402907", "text": "def apply_mask(the_mask, the_matrix):\n return np.multiply(the_matrix, the_mask)", "title": "" }, { "docid": "eb4c3d9f32a1ad15a8040e516166257a", "score": "0.53969896", "text": "def setZeroes(self, matrix) -> None:\n # 56ms 14.1MB\n lines, rows = set(), set()\n for i in range(0, len(matrix)):\n for j in range(0, len(matrix[i])):\n if matrix[i][j] == 0:\n lines.add(i)\n rows.add(j)\n\n for i in range(0, len(matrix)):\n if i in lines:\n matrix[i] = [0] * len(matrix[i])\n continue\n\n for j in range(0, len(matrix[i])):\n if j in rows:\n matrix[i][j] = 0\n\n return matrix", "title": "" }, { "docid": "eb688b7ef7d1778484d83e131c55dc7d", "score": "0.5395055", "text": "def clone(self):\n\n return Matrix33(row0=self.row0.clone(), row1=self.row1.clone(), row2=self.row2.clone())", "title": "" }, { "docid": "8e33bdc92a07cd5028312a19fc25acca", "score": "0.5393759", "text": "def set_fixed_matrix(self, fixed_matrix):\n self.data = list(fixed_matrix)\n self.nrows = len(self.data) \n self.ncols = bitlen(reduce(__or__, self.data, 0))\n self.ops, self.registers = make_addition_operations(\n fixed_matrix, self.granularity)\n self.n_registers = max(self.registers.values()) + 1\n self.n_registers = max(self.n_registers, self.nrows)\n self.copylist = self._make_copylist()", "title": "" }, { "docid": "a6ea61f9f12c9a44e200566894e5a597", "score": "0.5392793", "text": "def preproc_matrix(matrix, min_entries):\n \n #first remove all rows which have atmost min_entries nonzero entries\n matrix = remove_rows(matrix, min_entries)[0]\n #do the same for all columns (by applying the same function on the transposed matrix)\n #keep track if columns were removed for recursion\n matrix_T, change = remove_rows(matrix.transpose(), min_entries)\n matrix = matrix_T.transpose()\n \n #if columns were removed from the matrix, it is necessary to do another recursion\n if change:\n matrix, change = preproc_matrix(matrix, min_entries)\n \n return matrix, change", "title": "" }, { "docid": "facbede8637f00998d3e4b9dc9471ced", "score": "0.53926826", "text": "def setMatrix(self, mat: 'SbMatrix') -> \"void\":\n return _coin.SoTransform_setMatrix(self, mat)", "title": "" }, { "docid": "bb245a4516fd799b55fa7ed828ca0bcc", "score": "0.53920937", "text": "def swap_two_elements_in_matrix(\n matrix: np.ndarray,\n x1: int,\n y1: int,\n x2: int,\n y2: int,\n inplace: bool = True) -> np.ndarray:\n n, m = matrix.shape\n if ((x1 < 0 or x1 >= n) or\n (x2 < 0 or x2 >= n) or\n (y1 < 0 or y1 >= m) or\n (y2 < 0 or y2 >= m)):\n raise ValueError(\n 'Given coordinates do not fall into matrix dimensions.'\n ' Matrix size: ({}, {}), Coordinates: ({}, {}), ({}, {}).'.format(\n n, m, x1, y1, x2, y2))\n if not inplace:\n modified_matrix = matrix.copy()\n else:\n modified_matrix = matrix\n first_element_content = modified_matrix[x1, y1]\n modified_matrix[x1, y1] = modified_matrix[x2, y2]\n modified_matrix[x2, y2] = first_element_content\n return modified_matrix", "title": "" }, { "docid": "36941f04ce03ca0f65d9b02c49109c76", "score": "0.5389721", "text": "def startEditing(self) -> \"SbMatrix *\":\n return _coin.SoMFMatrix_startEditing(self)", "title": "" }, { "docid": "1718c3651c85eb8b350ad54483069058", "score": "0.5375095", "text": "def set(self, row, col, value):\r\n\t\ttry:\r\n\t\t\tself.matrix[row][col] = value\r\n\t\texcept:\r\n\t\t\traise Exception(\"row or col is not inside the matrix boundaries.\")", "title": "" }, { "docid": "3958ddab8e2b51f65668109f6c32480a", "score": "0.5351069", "text": "def matrix(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "3395e9acd1e66f391c0622dc22daae90", "score": "0.534834", "text": "def __neg__(self):\n ret_mat = self.data\n for i in range(0,8):\n ret_mat[i] = -ret_mat[i]\n return ret_mat", "title": "" }, { "docid": "83547cb9419f8842147f7689de8ce180", "score": "0.5345532", "text": "def SoModelMatrixElement_set(state: 'SoState', node: 'SoNode') -> \"SbMatrix &\":\n return _coin.SoModelMatrixElement_set(state, node)", "title": "" }, { "docid": "7a007c30b0b4f4de3f0d78d052ff72d0", "score": "0.53453904", "text": "def transformationMatrix(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "0e657148c90676c41fd3274eab9f3db9", "score": "0.53432226", "text": "def expand(self, dimension):\n # dimension == \"tropes\" or \"media\"\n if dimension == \"tropes\":\n self.matrix = np.hstack([self.matrix, np.zeros(self.matrix.shape[0])[np.newaxis].T])\n elif dimension == \"media\":\n import pdb; pdb.set_trace()\n self.matrix = np.resize(self.matrix, (2 * np.size(self.matrix, 0), np.size(self.matrix, 1)))\n self.matrix[self.matrix.shape[0] // 2:] = 0", "title": "" }, { "docid": "3f49a56f1bef35441adb1623e4f0ee56", "score": "0.53419816", "text": "def setZeroes(self, matrix: List[List[int]]) -> None:\n first_row_has_zero = False\n # 记录每行每列的情况\n for row in range(len(matrix)):\n if matrix[row][0] == 0:\n first_row_has_zero = True\n for col in range(1, len(matrix[row])):\n if matrix[row][col] == 0:\n matrix[row][0] = 0\n matrix[0][col] = 0\n \n # 根据第一行和第一列 更新\n # 但不能马上更新第一行 所以从最后一行往前更新 最后更新第一行\n for row in range(len(matrix)-1, -1, -1):\n for col in range(1, len(matrix[row])):\n if matrix[row][0] == 0 or matrix[0][col] == 0:\n matrix[row][col] = 0\n if first_row_has_zero:\n matrix[row][0] = 0", "title": "" }, { "docid": "963e4c7178f33838e95104a2edd6937f", "score": "0.53415793", "text": "def test_sets_zeroes_for_original_matrix(self):\n matrix = [\n [1, 2, 3, 4, 5],\n [1, 2, 0, 4, 5],\n [1, 2, 3, 4, 0],\n [1, 2, 3, 4, 5],\n [1, 0, 3, 4, 5],\n ]\n set_zeroes(matrix)\n self.assertEqual(matrix, [\n [1, 0, 0, 4, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [1, 0, 0, 4, 0],\n [0, 0, 0, 0, 0],\n ])", "title": "" }, { "docid": "b06e4025d1a44a79267bf13b87fa1cbf", "score": "0.53306854", "text": "def rotate(self, matrix: List[List[int]]) -> None:\n row = len(matrix)\n col = row\n\n # gets all elements in the same the columns and joins them together\n for i in range(row):\n for j in range(i + 1, col):\n\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n\n for row in matrix:\n row.reverse() # using reversed method reverses the state of the input", "title": "" }, { "docid": "6ab023c5894a754fec47cc1599bc77be", "score": "0.53220195", "text": "def update_weight(self, learn_rate):\n self.matrix = self.matrix - (learn_rate * self.hold)", "title": "" }, { "docid": "0f680dde3ee582b3e0ae5fed692c2c62", "score": "0.5318701", "text": "def rotate(self, matrix: List[List[int]]) -> None:\n start = 0\n end = len(matrix)-1\n for i in range(len(matrix)//2):\n matrix[start], matrix[end] = matrix[end], matrix[start]\n start += 1\n end -= 1\n \n #print(matrix)\n # above can be done using \"matrix.reverse()\"\n \n for i in range(len(matrix)):\n for j in range(i):\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]", "title": "" }, { "docid": "7641170cc318e771136ae62e4ba87aad", "score": "0.53174794", "text": "def rotate(self, matrix):\n rows = len(matrix)\n cols = len(matrix[0])\n for i in range(rows // 2):\n for j in range(cols):\n matrix[i][j] = matrix[rows - i - 1][j]\n for i in range(rows):\n for j in range(i):\n if i == j:\n continue\n else:\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n return matrix", "title": "" }, { "docid": "cf98d68e0c5fd8918dd43e21cb6cfaf7", "score": "0.53159934", "text": "def rotate(self, matrix: List[List[int]]) -> None:\n n=len(matrix)\n for i in range(n):\n for j in range(i,n):\n matrix[i][j],matrix[j][i]=matrix[j][i],matrix[i][j]\n \n for i in range(n):\n matrix[i].reverse()", "title": "" }, { "docid": "42803452ec3ae3c65ef4e72339afe7d2", "score": "0.53090805", "text": "def exclusiveMatrix(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "5892297b3f77f6eab0d58ccc4dfcfdbe", "score": "0.5309031", "text": "def gauss_method(matrix, result_vector):\n the_smallest_matrix_size = min(len(matrix), len(matrix[0]))\n the_matrix_size = len(matrix)\n # forward\n for forward_row_id in range(the_smallest_matrix_size):\n column_id = forward_row_id\n if matrix[forward_row_id][column_id] == 0:\n # attempt to identify required row\n rows_were_switched = False\n for another_modified_row_id in range(forward_row_id + 1, the_matrix_size):\n if matrix[another_modified_row_id][column_id] != 0:\n # switch rows in the matrix\n for matrix_column_id in range(column_id, len(matrix[0]), 1):\n matrix[forward_row_id][matrix_column_id], matrix[another_modified_row_id][matrix_column_id] = \\\n matrix[another_modified_row_id][matrix_column_id], matrix[forward_row_id][matrix_column_id]\n # switch elements in result vector\n result_vector[forward_row_id], result_vector[another_modified_row_id] = \\\n result_vector[another_modified_row_id], result_vector[forward_row_id]\n rows_were_switched = True\n break\n if not rows_were_switched:\n continue\n normalization_value = matrix[forward_row_id][column_id]\n # change value on the element on diagonal\n multiply_by_value(matrix[forward_row_id], 1.0/normalization_value)\n result_vector[forward_row_id] *= 1.0/normalization_value\n # modification\n for modified_row_id in range(forward_row_id + 1, the_matrix_size):\n if matrix[modified_row_id][column_id] != 0:\n direct_or_reverse_gauss_step(column_id, forward_row_id, matrix, modified_row_id, result_vector)\n # reverse\n for reverse_row_id in range(the_smallest_matrix_size - 1, -1, -1):\n column_id = reverse_row_id\n if matrix[reverse_row_id][column_id] != 0:\n normalization_value = matrix[reverse_row_id][column_id]\n # change value on the element on diagonal\n multiply_by_value(matrix[reverse_row_id], 1.0/normalization_value)\n result_vector[reverse_row_id] *= 1.0/normalization_value\n # modification\n for modified_row_id in range(reverse_row_id - 1, -1, -1):\n if matrix[modified_row_id][column_id] != 0:\n direct_or_reverse_gauss_step(column_id, reverse_row_id, matrix, modified_row_id, result_vector)", "title": "" }, { "docid": "4a12c10e3cef97d36029367d0d14f3b9", "score": "0.5301829", "text": "def rotate(self, matrix: List[List[int]]) -> None:\n N = len(matrix[0])\n for i in range(N):\n for j in range(i, N, 1):\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n for i in range(N):\n matrix[i].reverse()", "title": "" }, { "docid": "bfd1df4124cfbb49dc6edc86aeaa05bf", "score": "0.52865016", "text": "def mutate(self):\n\n\t\tpass", "title": "" }, { "docid": "e5b4c6f1d00e5a7200fd332d57a6306d", "score": "0.52859855", "text": "def SoModelMatrixElement_makeIdentity(state: 'SoState', node: 'SoNode') -> \"void\":\n return _coin.SoModelMatrixElement_makeIdentity(state, node)", "title": "" } ]
4a4f34489a65f9304f10439e7d7a7847
Returns an image with reduced opacity.
[ { "docid": "7a6d677a22fd5484810a8b3f07511e89", "score": "0.7732773", "text": "def reduce_opacity(im, opacity):\n assert opacity >= 0 and opacity <= 1\n if im.mode != 'RGBA':\n im = im.convert('RGBA')\n else:\n im = im.copy()\n alpha = im.split()[3]\n alpha = ImageEnhance.Brightness(alpha).enhance(opacity)\n im.putalpha(alpha)\n return im", "title": "" } ]
[ { "docid": "1d34bf218e495a1d7eb729956713e97b", "score": "0.7735883", "text": "def reduce_opacity(im, opacity):\r\n assert opacity >= 0 and opacity <= 1\r\n if im.mode != 'RGBA':\r\n im = im.convert('RGBA')\r\n else:\r\n im = im.copy()\r\n alpha = im.split()[3]\r\n alpha = ImageEnhance.Brightness(alpha).enhance(opacity)\r\n im.putalpha(alpha)\r\n return im", "title": "" }, { "docid": "845199356b524a4ee32117e750c01007", "score": "0.77109146", "text": "def reduceOpacity(self, im, opacity): \n assert opacity >= 0 and opacity <= 1 \n if im.mode != 'RGBA': \n im = im.convert('RGBA') \n else: \n im = im.copy() \n alpha = im.split()[3] \n alpha = ImageEnhance.Brightness(alpha).enhance(opacity) \n im.putalpha(alpha) \n return im", "title": "" }, { "docid": "50b8119fd9fb6dd0ebe6b119b994a80d", "score": "0.6664306", "text": "def opacity(self):\n return self.color.alpha", "title": "" }, { "docid": "e355ce1fc433f5f19347efdc28dd5299", "score": "0.6481676", "text": "def mask_image(img, mask, opacity=100):\r\n mask = mask.convert('RGBA')\r\n pixels = mask.getdata()\r\n\r\n new_pixels = []\r\n for px in pixels:\r\n if px[0] == 255 and px[1] == 255 and px[2] == 255:\r\n new_pixels.append((255, 0, 0, opacity))\r\n else:\r\n new_pixels.append((0, 0, 0, 0))\r\n\r\n mask.putdata(new_pixels)\r\n img.paste(mask, None, mask)\r\n return img", "title": "" }, { "docid": "56e02bc2d364c994acd11875d244c70d", "score": "0.6478843", "text": "def make_image(self):\n image = pg.Surface(self.rect.size).convert_alpha()\n image.fill(TRANSPARENT)\n image_rect = image.get_rect()\n pg.draw.ellipse(image, pg.Color(\"black\"), image_rect)\n pg.draw.ellipse(image, pg.Color(\"red\"), image_rect.inflate(-12, -12))\n return image", "title": "" }, { "docid": "7fc64deeac16e1faad0a80606ade0292", "score": "0.63648015", "text": "def transparency(self):\n assert 0.0 <= self._opacity <= 1.0, self._opacity\n return 1.0 - self._opacity", "title": "" }, { "docid": "9629bfa360cfeff09c866008c21b3478", "score": "0.62584525", "text": "def xform_transparent(src_img: PathLike, dest: PathLike) -> None:\n img = Image.open(src_img)\n img = img.convert(\"RGBA\")\n img_tups = img.getdata()\n new_data = []\n for tup in img_tups:\n if tup[0] == 255 and tup[1] == 255 and tup[2] == 255:\n new_data.append((255, 255, 255, 0))\n else:\n new_data.append(tup)\n img.putdata(new_data)\n img.save(dest, \"PNG\")", "title": "" }, { "docid": "0bb1823a09388f549376500c21e78c6a", "score": "0.62392056", "text": "def put_fading(self, img, fade, f=0.5):\n fade -= fade.min()\n fade /= fade.max()\n fade += (1 - fade) * f\n return (255 - (255 - img) * fade.reshape((fade.shape[0], fade.shape[1], 1))).astype(np.uint8)", "title": "" }, { "docid": "b3cc370bde130fc23101cc522064a816", "score": "0.62315416", "text": "def opacity(self):\n assert 0.0 <= self._opacity <= 1.0, self._opacity\n return self._opacity", "title": "" }, { "docid": "269db36ca08a18b42b85cbcc676990d0", "score": "0.6190942", "text": "def opacity(self):\n return self['opacity']", "title": "" }, { "docid": "ec87b6226630a61310607b249932030e", "score": "0.61747384", "text": "def opacity(self):\n return self[\"opacity\"]", "title": "" }, { "docid": "ec87b6226630a61310607b249932030e", "score": "0.61747384", "text": "def opacity(self):\n return self[\"opacity\"]", "title": "" }, { "docid": "8a446e260fbd4c2234855cf16b7608df", "score": "0.6164783", "text": "def remove_alpha_layer(img: Image) -> Image:\n if len(img.split()) == 4:\n # prevent IOError: cannot write mode RGBA as BMP\n r, g, b, a = img.split()\n img = Image.merge(\"RGB\", (r, g, b))\n return img\n else:\n return img", "title": "" }, { "docid": "71116186d1818f550af1db23be3c8af7", "score": "0.61217", "text": "def get_alpha(image):\r\n if image.shape[2] > 3:\r\n alpha = image[:, :, 3]\r\n #alpha = remove_noise(alpha)\r\n else:\r\n reduced_image = np.sum(np.abs(255 - image), axis=2)\r\n alpha = np.where(reduced_image > 100, 255, 0)\r\n alpha = alpha.astype(np.uint8)\r\n return alpha", "title": "" }, { "docid": "7869d065200d6efbe05e3c7b2456352b", "score": "0.60934055", "text": "def pure_pil_alpha_to_color_v2(pilimg, color=(255, 255, 255)):\n pilimg.load() # needed for split()\n background = Image.new('RGB', pilimg.size, color)\n background.paste(pilimg, mask=pilimg.split()[3]) # 3 is the alpha channel\n return background", "title": "" }, { "docid": "9eea7c1d0f1140ef7df778797338af5e", "score": "0.60826874", "text": "def opacity(self, opacity):\n self.color.alpha = opacity", "title": "" }, { "docid": "ff588f489fcf3e3fade0c758fc927157", "score": "0.6063642", "text": "def opacity(self):\n return self.value.get(Key.Opacity).value", "title": "" }, { "docid": "5b2320d66e8391a9923de639e35f9874", "score": "0.60577685", "text": "def bgopacity(self):\n return self._bgopacity", "title": "" }, { "docid": "9f7d54b0d07791519422ea16caf122a9", "score": "0.59946406", "text": "def getAlpha(cls, img):\n # Get alpha for mask, using img converted to RGBA\n alpha = Image.new('L', img.size)\n alpha.putdata([x[-1] for x in img.convert(\"RGBA\").getdata()])\n return alpha", "title": "" }, { "docid": "a5c0090a501695083031e84c640b967b", "score": "0.5917117", "text": "def make_transparent(img, bg=(255, 255, 255, 255)):\n img = img.convert(\"RGBA\")\n clear = bg[0:3]+(0,)\n pixdata = img.load()\n\n width, height = img.size\n for y in range(height):\n for x in range(width):\n if pixdata[x,y] == bg:\n pixdata[x,y] = clear\n return img", "title": "" }, { "docid": "bc710d479b18ecf3f879fb4edd7a6a3d", "score": "0.58739346", "text": "def unAlpha(image):\n allwhite = np.zeros((image.shape[0], image.shape[1], 3), dtype=float)\n allwhite[:,:] = 255\n\n alpha1 = image[:,:,3].astype(float)/255\n alpha = np.zeros((image.shape[0], image.shape[1], 3), dtype=float)\n for c in range(3):\n alpha[:,:,c] = alpha1\n\n beta = 1.0 - alpha\n # place our RGBA image onto a white background\n source = image[:,:,0:3].astype(float)\n\n merged = source * alpha + allwhite*beta\n\n return merged.astype(np.uint8)", "title": "" }, { "docid": "784dd97f21fc7c9910790c30c946e31a", "score": "0.58624417", "text": "def single(im, color='black'):\n return Image.new('RGBA', im.size, color=color)", "title": "" }, { "docid": "da81273a0c25794d1141acfbae7fb6ba", "score": "0.5854868", "text": "def extreme_contrast(image): \n new_image = copy(image)\n \n for x, y, (r, g, b) in image:\n \n if r <= 127:\n r = 0\n else:\n r = 255\n \n if g <= 127:\n g = 0\n else:\n g = 255\n \n if b <= 127:\n b = 0\n else:\n b = 255\n \n contrast = create_color(r, g, b)\n set_color(new_image, x, y, contrast)\n \n return new_image", "title": "" }, { "docid": "9fa0954c03025ed01f1d284ff7be398c", "score": "0.5854377", "text": "def show_image(self, image, brightness=100):\r\n pass", "title": "" }, { "docid": "a0cb40486215140dc23ba42eb4d53b36", "score": "0.5849901", "text": "def test_read_rgba(self):\n\n # Generated with `cwebp transparent.png -o transparent.webp`\n file_path = \"Tests/images/transparent.webp\"\n image = Image.open(file_path)\n\n self.assertEqual(image.mode, \"RGBA\")\n self.assertEqual(image.size, (200, 150))\n self.assertEqual(image.format, \"WEBP\")\n image.load()\n image.getdata()\n\n image.tobytes()\n\n target = Image.open('Tests/images/transparent.png')\n self.assert_image_similar(image, target, 20.0)", "title": "" }, { "docid": "33b41116a8521e5a9adb5eefe7f1b32e", "score": "0.58116055", "text": "def PIL_contrast(image_name, scale):\r\n # Change input here\r\n im = Image.open(image_name)\r\n # Convert image to BW\r\n bw_im = im.convert('LA')\r\n # contrast the image\r\n enhancer = ImageEnhance.Contrast(bw_im)\r\n enhanced_im = enhancer.enhance(scale)\r\n enhanced_im.save(\"enhanced.sample1.png\")\r\n return enhanced_im", "title": "" }, { "docid": "472128af34b1f77c6a78350b6aebe648", "score": "0.5807373", "text": "def opacity(self):\n return self._record.opacity", "title": "" }, { "docid": "23094a405b7e59bd1a114903d6356e9b", "score": "0.58008116", "text": "def get_opacity(self):\n return self.get_relative_opacity_unit()", "title": "" }, { "docid": "b8d64ad6c33049fe64c01e6f681215c5", "score": "0.57821435", "text": "def tweak_color(image):\r\n px = np.array(image)\r\n px[:,:,0] = px[:,:,0]*np.random.uniform(0.6,1)\r\n px[:,:,1] = px[:,:,1]*np.random.uniform(0.6,1)\r\n px[:,:,2] = px[:,:,2]*np.random.uniform(0.6,1)\r\n return Image.fromarray(px)", "title": "" }, { "docid": "5d841cfc177b9516c48a2b910a29a57d", "score": "0.57609475", "text": "def get_render_opacity(self):\n return 0.000000", "title": "" }, { "docid": "ff3c1a3f7504d5e8b168c8fb3776f166", "score": "0.5749682", "text": "def imageAlphaOnWhiteSingle(img):\n img_w = np.shape(img)[0]\n img_h = np.shape(img)[1]\n\n if (len(np.shape(img)) == 3):\n if (np.shape(img)[2] == 4):\n white = np.ones((img_w, img_h, 1))\n\n alpha = np.reshape(img[:, :, 3], (img_w, img_h, 1))\n img[:, :, 0:2] = (img[:, :, 0:2] * alpha\n + white * (1 - alpha))\n img = np.delete(img, 3, 2)\n return img", "title": "" }, { "docid": "19fb5e49072e1dadf1b95f6a87c22204", "score": "0.57382834", "text": "def extreme_contrast(input_image: Image) -> Image:\n new_image = copy(input_image)\n for x, y, (r, g, b) in input_image:\n col = [r, g, b]\n for t in range(3):\n if col[t] >= 128:\n col[t] = 255\n else:\n col[t] = 0\n extreme = create_color(*col)\n set_color(new_image, x, y, extreme)\n return new_image", "title": "" }, { "docid": "cf027d0ed6a2f4017112b2e6ac50cebd", "score": "0.5735712", "text": "def color(image, factor):\r\n image = Image.fromarray(image)\r\n image = ImageEnhance.Color(image).enhance(factor) \r\n return np.asarray(image)", "title": "" }, { "docid": "17c395d9cc5e4f44bef1101971e92046", "score": "0.5707731", "text": "def change_opacity(self, value):\n self.ui.opacityBox.setTitle('Label Opacity: {}'.format(value))\n self.imagePanel.opacity = value\n self.imagePanel.setBrushes()\n self.imagePanel.update()", "title": "" }, { "docid": "eb5b6d0a82510fae257f582b25a28f9c", "score": "0.56907094", "text": "def to_new_opacity(self, opacity):\n return LinearColor()", "title": "" }, { "docid": "7ea2dd6ffa89a7c83db4d06477bbfc50", "score": "0.5681478", "text": "def getalpha(im: Image.Image):\n assert im.mode.endswith('A')\n return im.split()[-1]", "title": "" }, { "docid": "a0b86e007497f7638e246cd3200ee305", "score": "0.5679374", "text": "def set_fog_max_opacity(self, value):\n return None", "title": "" }, { "docid": "83932ec6aa5768a3b9cbce12b79a476d", "score": "0.56704694", "text": "def remove_alpha(self, source):\n with wand.image.Image(filename=source) as img:\n img.alpha_channel = False # close alpha channel\n img.background_color = wand.image.Color('white')\n new_source_path = self.results_dir.joinpath(source.stem + \"_sans_alphachannel\" + source.suffix)\n img.save(filename=new_source_path)\n self.log.info(f\"Temp image without alpha channel={new_source_path}\")\n return new_source_path", "title": "" }, { "docid": "5d5ffd4767e17ea3b07028f4f262f1a8", "score": "0.56683856", "text": "def invert(image):\r\n return 255 - image", "title": "" }, { "docid": "89cfa8d99d50cd1e32031e336de586f2", "score": "0.56599045", "text": "def normalize_opacity(self, inplace=False):\n target = self\n if not inplace:\n target = copy.deepcopy(self)\n\n if target.fill == \"none\" and target.stroke == \"none\":\n return target\n\n default = 1.0\n for fill_attr, opacity_attr in [\n (\"fill\", \"stroke_opacity\"),\n (\"stroke\", \"fill_opacity\"),\n ]:\n if getattr(target, fill_attr) == \"none\":\n target.opacity *= getattr(target, opacity_attr)\n setattr(target, opacity_attr, default)\n\n return target", "title": "" }, { "docid": "09320e1bfa6d5a5fdf1448843405dce0", "score": "0.5657729", "text": "def darken_imgs(cls, amount=100):\n shade = pygame.Surface((64, 64)).convert_alpha()\n shade.fill((amount, amount, amount, 100))\n for i, j in Level.tile_ids.items():\n for k in j.img_list:\n k.blit(shade, (0, 0), special_flags=pygame.BLEND_SUB)\n\n for i, j in Level.deco_ids.items():\n for k in j.img_list:\n k.blit(shade, (0, 0), special_flags=pygame.BLEND_SUB)", "title": "" }, { "docid": "f68555896486671e845d1cc4cdb45cca", "score": "0.5657116", "text": "def drawAlpha(image, alphaName, output_dir=\"\"):\t\n\talphaImage = np.zeros(image.shape[:2], dtype = \"uint8\")\n\tcv2.rectangle(alphaImage, (0,0), (image.shape[1], image.shape[0]),255, -1)\n\tcv2.imwrite(output_dir+alphaName+\n\t\t\t\t\"_al\"+\".jpeg\", alphaImage)", "title": "" }, { "docid": "f2ac7a433fe15b6f751ca16334244c80", "score": "0.5641034", "text": "def draw_layer(img, color=(0, 0, 0, 80)):\n layer = Image.new('RGBA', img.size, color)\n return Image.alpha_composite(img, layer)", "title": "" }, { "docid": "311e47c3270cf350a501b13f14a3c8b2", "score": "0.5622545", "text": "def overlay_image_alpha(img, img_overlay, pos, alpha_mask):\n\n x, y = pos\n\n # Image ranges\n y1, y2 = max(0, y), min(img.shape[0], y + img_overlay.shape[0])\n x1, x2 = max(0, x), min(img.shape[1], x + img_overlay.shape[1])\n print(y1,y2,x1,x2)\n\n # Overlay ranges\n y1o, y2o = max(0, -y), min(img_overlay.shape[0], img.shape[0] - y)\n x1o, x2o = max(0, -x), min(img_overlay.shape[1], img.shape[1] - x)\n\n # Exit if nothing to do\n if y1 >= y2 or x1 >= x2 or y1o >= y2o or x1o >= x2o:\n return\n\n channels = img.shape[2]\n print(channels)\n\n alpha = alpha_mask[y1o:y2o, x1o:x2o]\n alpha_inv = 1.0 - alpha\n print(\"large\",img.shape)\n print(\"small\",img_overlay.shape)\n\n for c in range(channels):\n img[y1:y2, x1:x2, c] = (alpha * img_overlay[y1o:y2o, x1o:x2o, c] +\n alpha_inv * img[y1:y2, x1:x2, c])", "title": "" }, { "docid": "5f7f4a50071d782c09eef97b9df091bf", "score": "0.56214964", "text": "def get_slices_opacity(self):\n value = 0\n num_values = 0\n for slicer_id in self.slicers:\n slicer = self.slicers[slicer_id]\n if slicer.actor is not None:\n slice_alpha = slicer.actor.GetProperty().GetOpacity()\n if slice_alpha is None:\n continue\n value += slice_alpha\n num_values += 1\n if num_values == 0 or value == 0:\n return None\n return value / num_values", "title": "" }, { "docid": "4afce5bc949c1ac1aa82045339855317", "score": "0.5611107", "text": "def emphasis(self, img, color=1, scale=2, fraction=0.5) :\n plog(\"Color=\"+str(color))\n return cv2.subtract(cv2.multiply(img[:,:,color],scale),\n cv2.multiply(cv2.add( img[:,:,(color+1)%3],\n img[:,:,(color+2)%3]),fraction))", "title": "" }, { "docid": "6d9fafd5d7cd7dd0b8ed324f0bfd84ca", "score": "0.5605477", "text": "def contrast_stretch(im):\n in_min = np.percentile(im, 5)\n in_max = np.percentile(im, 95)\n\n out_min = 0.0\n out_max = 255.0\n\n out = im - in_min\n out *= ((out_min - out_max) / (in_min - in_max))\n out += in_min\n\n return out", "title": "" }, { "docid": "54e216cda82c1cd48f92c6978269da03", "score": "0.5589355", "text": "def image(self) -> pg.Surface:\n image = pg.image.load(SHIP_IMG).convert()\n image = pg.transform.scale(image, (26, 30))\n image.set_colorkey(color.WHITE)\n return image", "title": "" }, { "docid": "600b75a92993b3ecdac448c66be7cc5a", "score": "0.5585475", "text": "def load_image_convert_alpha(filename):\n return pygame.image.load(os.path.join('images', filename)).convert_alpha()", "title": "" }, { "docid": "0799d0b3502afba9fe45b13c7a6a094e", "score": "0.5578301", "text": "def _color(image, strength):\n return autoaugment_utils.color(image, strength)", "title": "" }, { "docid": "f9bc9b7b8d6102ebb0d4fe1755a903a4", "score": "0.55582094", "text": "def contrast_stretching(img):\n p2, p98 = np.percentile(img, (2, 98))\n img_rescale = exposure.rescale_intensity(img, in_range=(p2, p98))\n return img_rescale", "title": "" }, { "docid": "3b1d2df4aec60ec7030d11fd60edf42e", "score": "0.5553258", "text": "def set_opacity(self, opacity):\n return None", "title": "" }, { "docid": "3b1d2df4aec60ec7030d11fd60edf42e", "score": "0.5553258", "text": "def set_opacity(self, opacity):\n return None", "title": "" }, { "docid": "781ffb90e97592e0dde9ebf30bed2f4b", "score": "0.55482066", "text": "def overlay_mask_tool(img, mask, transparency=1.0):\n im_over = np.ndarray(img.shape)\n im_over[:, :, 0] = (1 - mask[:, :, 0]) * img[:, :, 0] + mask[:, :, 0] * (\n transparency + (1 - transparency) * img[:, :, 0])\n im_over[:, :, 1] = (1 - mask[:, :, 1]) * img[:, :, 1] + mask[:, :, 1] * (\n transparency + (1 - transparency) * img[:, :, 1])\n im_over[:, :, 2] = (1 - mask[:, :, 2]) * img[:, :, 2] + mask[:, :, 2] * (\n transparency + (1 - transparency) * img[:, :, 2])\n return im_over", "title": "" }, { "docid": "a461bd9ee6744c9efb31d2bc3ac584b3", "score": "0.5539164", "text": "def contrast_stretch(im):\n in_min = np.percentile(im, 5)\n in_max = np.percentile(im, 95)\n\n out_min = 0.0\n out_max = 255.0\n\n out = im - in_min\n out *= ((out_min - out_max) / (in_min - in_max))\n out += in_min\n\n return out", "title": "" }, { "docid": "788e06bd97657c6099982a50dce60b5a", "score": "0.55357", "text": "def getNegativeImage(originalImage):\n negativeImage = originalImage.copy()\n\n width = negativeImage.width\n height = negativeImage.height\n px = negativeImage.load()\n\n # loop through all the pixels\n for x in range(width):\n for y in range(height):\n redValue = px[x, y][0]\n greenValue = px[x,y][1]\n blueValue = px[x, y][2]\n #sets all the color value to the opposite value, by subtracting the original value\n #from the maximum color value of 255\n px[x,y] = (255-redValue, 255-greenValue, 255-blueValue)\n \n return negativeImage", "title": "" }, { "docid": "e8f0906afa40a42d51249c311f242b5a", "score": "0.55321085", "text": "def image_show(image):\n image = image.astype(np.uint8)\n image = np.clip(image,0,255)\n img = Image.fromarray(image)\n img.show()", "title": "" }, { "docid": "7318969a0d02a38c0b84d91a3e7968b9", "score": "0.5528826", "text": "def float2transparency(value: float) -> int:\n return int((1. - float(value)) * 255) | 0x02000000", "title": "" }, { "docid": "e6691ae3dc2b5117abe4f0c86c38364a", "score": "0.5521806", "text": "def operation(self, img):\n \n img.thumbnail((400, 400), Image.ANTIALIAS)\n return img", "title": "" }, { "docid": "9592ebd857da326526c086d292ba3095", "score": "0.55196637", "text": "def get_red_shade(self, image:np.uint8)->np.uint8:\n image[:,:,0] = 0 # remove blue shade\n image[:,:,1] = 0 # remove green shade\n return image", "title": "" }, { "docid": "d32a8b69671dfda9c444978d6302dc03", "score": "0.54966354", "text": "def RGBA2RGB(image, color=(255, 255, 255)):\n image.load() # needed for split()\n background = Image.new('RGB', image.size, color)\n background.paste(image, mask=image.split()[3]) # 3 is the alpha channel\n return background", "title": "" }, { "docid": "08fa7802a48bae1c01d02d4d3e26c863", "score": "0.5488691", "text": "def operation(self, img):\n \n img.thumbnail((150, 150), Image.ANTIALIAS)\n return img", "title": "" }, { "docid": "7865f7183032742e4d8efd74c5ca6b29", "score": "0.5479858", "text": "def rgba(self) -> Pixel:\n return self.red, self.green, self.blue, self.alpha", "title": "" }, { "docid": "4cf40227a1ca2c9a0cb13434220aa7b9", "score": "0.5479753", "text": "def convert_full_color_image(self):\n newImage = Image.new('RGBA', (self.header.width, self.header.height))\n pixels = newImage.load()\n for x in range(newImage.size[0]): # for every col:\n for y in range(newImage.size[1]): # For every row\n pixel_index = self.header.width * y + x\n pixel_data = self.imageFullColor.get_pixel(pixel_index)\n pixel_color = read_bitmask_ARGB_color(pixel_data, self.header.bitDepthRed, self.header.bitDepthGreen, self.header.bitDepthBlue, self.header.bitDepthAlpha)\n pixels[x,y] = (pixel_color[0], pixel_color[1], pixel_color[2], pixel_color[3]) # set the colour accordingly\n return newImage", "title": "" }, { "docid": "6b9a9f3bf1c92f32f7e6d1690299412a", "score": "0.5469917", "text": "def alpha_composite(image, mask):\n\n compos = pg.Image(mask)\n compos.composite(\n image,\n image.size(),\n pg.CompositeOperator.CopyOpacityCompositeOp\n )\n return compos", "title": "" }, { "docid": "79aee46adb70ee4248756a7b7b12c4d1", "score": "0.54579246", "text": "def get_image(self) -> ee.Image:\n return self.collection.mosaic()", "title": "" }, { "docid": "dd1dea46da9dbf1b5026797fdf5d525f", "score": "0.54487205", "text": "def render_opacity(self):\n return self.render_properties[1]", "title": "" }, { "docid": "de3adfb75319715f890cd708bc610974", "score": "0.5444027", "text": "def add_reflection(im, bgcolor=\"#00000\", amount=0.4, opacity=0.6):\r\n # convert bgcolor string to rgb value\r\n background_color = ImageColor.getrgb(bgcolor)\r\n\r\n # copy orignial image and flip the orientation\r\n reflection = im.copy().transpose(Image.FLIP_TOP_BOTTOM)\r\n\r\n # create a new image filled with the bgcolor the same size\r\n background = Image.new(\"RGB\", im.size, background_color)\r\n\r\n # calculate our alpha mask\r\n start = int(255 - (255 * opacity)) # The start of our gradient\r\n steps = int(255 * amount) # the number of intermedite values\r\n increment = (255 - start) / float(steps)\r\n mask = Image.new('L', (1, 255))\r\n for y in range(255):\r\n if y < steps:\r\n val = int(y * increment + start)\r\n else:\r\n val = 255\r\n mask.putpixel((0, y), val)\r\n alpha_mask = mask.resize(im.size)\r\n\r\n # merge the reflection onto our background color using the alpha mask\r\n reflection = Image.composite(background, reflection, alpha_mask)\r\n\r\n # crop the reflection\r\n reflection_height = int(im.size[1] * amount)\r\n reflection = reflection.crop((0, 0, im.size[0], reflection_height))\r\n\r\n # create new image sized to hold both the original image and the reflection\r\n composite = Image.new(\"RGB\", (im.size[0], im.size[1] + reflection_height), background_color)\r\n\r\n # paste the orignal image and the reflection into the composite image\r\n composite.paste(im, (0, 0))\r\n composite.paste(reflection, (0, im.size[1]))\r\n\r\n # return the image complete with reflection effect\r\n return composite", "title": "" }, { "docid": "e311d134e0a1c250b7e0535b1e1d72d3", "score": "0.54372555", "text": "def obscure_image(image):\n size = image.size\n pixel_size = 9\n \n image = image.resize((size[0] / pixel_size, size[1] / pixel_size), Image.NEAREST)\n image = image.resize((size[0], size[1]), Image.NEAREST)\n \n return image", "title": "" }, { "docid": "bf6e0f7686feda23ade06a3dd5007d11", "score": "0.542946", "text": "def reset_result_image(self):\n # create transparent image\n # thanks for the example from\n # https://stackoverflow.com/a/44595221/574981\n # RGBA == 4\n n_channels = 4\n height, width = self.result_image_size\n shape = (height, width, n_channels)\n # init to black\n # self.result_image = np.zeros(shape, dtype=np.uint8)\n # init to white but fully transparent\n self.result_image = np.full(shape, (255, 255, 255, 0), dtype=np.uint8)", "title": "" }, { "docid": "fe760d541dc5ca769055bf23018be36b", "score": "0.5425802", "text": "def set_slice_opacity(opacity):\n slicer.util.setSliceViewerLayers(foregroundOpacity=opacity / 100.0)", "title": "" }, { "docid": "c799168f0afa277153662c62aa10b20e", "score": "0.54221547", "text": "def add_alpha(array, opacity=255):\n if len(array.shape) != 3 or array.shape[2] < 3 or array.dtype != np.uint8:\n raise ValueError(\"Argument 'array' must a Numpy array of an RGB or RGBA image.\")\n if array.shape[2] == 3:\n alpha = np.full((array.shape[0], array.shape[1], 1), opacity, dtype=np.uint8)\n array = np.dstack((array, alpha))\n return array", "title": "" }, { "docid": "178f16a83b564e77f4ac28fc80fbbb7c", "score": "0.5416054", "text": "def obscure_image(image):\n size = image.size\n w = size[0] / PIXEL_SIZE or 1\n h = size[1] / PIXEL_SIZE or 1\n image = image.resize((w, h), Image.NEAREST)\n image = image.resize((size[0], size[1]), Image.NEAREST)\n\n return image", "title": "" }, { "docid": "1d700e2212829a85f25554022c082c39", "score": "0.54142004", "text": "def remove_alpha_channel(source):\n source_img = cv2.cvtColor(source[:,:,:3], cv2.COLOR_BGR2GRAY)\n source_mask = source[:,:,3] * (1/255.0)\n bg_part = (255 * (1/255.0)) * (1.0 - source_mask)\n weight = (source_img * (1/255.0)) * (source_mask)\n dest = np.uint8(cv2.addWeighted(bg_part, 255.0, weight, 255.0, 0.0))\n return dest", "title": "" }, { "docid": "4d72518d9e9c358ef6f6bd0f1d0dd48f", "score": "0.53493994", "text": "def image(filepath, *, convert_alpha=None):\n img = pygame.image.load(filepath)\n\n def convert(img, alpha):\n if alpha:\n img = img.convert_alpha()\n else:\n img = img.convert()\n return img\n\n img = convert(\n img, img.get_alpha() if convert_alpha is None else convert_alpha)\n\n return img", "title": "" }, { "docid": "049101f1b7e3cbf0fbcb570df9fc7ac4", "score": "0.534509", "text": "def contrast_stretch(img):\n p2, p98 = numpy.percentile(img, (2, 98))\n img_rescale = exposure.rescale_intensity(img, in_range=(p2, p98))\n return img_rescale.astype('uint8')", "title": "" }, { "docid": "cc2bec4e9d1781c417e120b6bd9611cd", "score": "0.5338796", "text": "def overlay_image_alpha(img, img_overlay, pos, alpha_mask):\n\n x, y = pos\n\n # Image ranges\n y1, y2 = max(0, y), min(img.shape[0], y + img_overlay.shape[0])\n x1, x2 = max(0, x), min(img.shape[1], x + img_overlay.shape[1])\n\n # Overlay ranges\n y1o, y2o = max(0, -y), min(img_overlay.shape[0], img.shape[0] - y)\n x1o, x2o = max(0, -x), min(img_overlay.shape[1], img.shape[1] - x)\n\n # Exit if nothing to do\n if y1 >= y2 or x1 >= x2 or y1o >= y2o or x1o >= x2o:\n return\n\n channels = img.shape[2]\n\n alpha = alpha_mask[y1o:y2o, x1o:x2o]\n alpha_inv = 1.0 - alpha\n\n for c in range(channels):\n img[y1:y2, x1:x2, c] = (alpha * img_overlay[y1o:y2o, x1o:x2o, c] +\n alpha_inv * img[y1:y2, x1:x2, c])", "title": "" }, { "docid": "45dbee5dee2cc70ab68313b97a7a0d2f", "score": "0.5323651", "text": "def make_hole_alpha(self):\n hole = pg.Surface(self.screen_rect.size).convert_alpha()\n hole.fill((255,255,255,200)) #Experiment with changing this color\n pg.draw.ellipse(hole, (0,0,0,0), self.ellipse_rect)\n return hole", "title": "" }, { "docid": "f47e946c0e1cf5d68f748cfc6a4500d4", "score": "0.53138345", "text": "def negative_image( old_img ):\n # TODO 2a: Read, discuss, and understand the following code.\n # Create an empty image and then set each pixel based on the old image.\n new_img = image.EmptyImage( old_img.getWidth(), old_img.getHeight() )\n\n # Use nested loops to visit every (x,y) coordinate (as opposed to (col,row) as the author does).\n for x in range( old_img.getWidth() ):\n for y in range( old_img.getHeight() ):\n # Get the old pixel and red, green, blue values.\n old_pixel = old_img.getPixel( x, y )\n old_r = old_pixel.getRed()\n old_g = old_pixel.getGreen()\n old_b = old_pixel.getBlue()\n # Calculate the new red, green, and blue values and create a new pixel.\n new_r = 255 - old_r\n new_g = 255 - old_g\n new_b = 255 - old_b\n new_pixel = image.Pixel( new_r, new_g, new_b )\n # Set the pixel in the new image.\n new_img.setPixel( x, y, new_pixel )\n\n return new_img", "title": "" }, { "docid": "4a26cd7bbe85b36d713045439762ffcc", "score": "0.5308459", "text": "def overlay_image_alpha(self, img, img_overlay, pos):\n x, y = pos\n\n # create alpha_mask\n alpha_mask = img_overlay[:, :, 3] / 255.0\n\n # Image ranges\n y1, y2 = max(0, y), min(img.shape[0], y + img_overlay.shape[0])\n x1, x2 = max(0, x), min(img.shape[1], x + img_overlay.shape[1])\n\n # Overlay ranges\n y1o, y2o = max(0, -y), min(img_overlay.shape[0], img.shape[0] - y)\n x1o, x2o = max(0, -x), min(img_overlay.shape[1], img.shape[1] - x)\n\n # Exit if nothing to do\n if y1 >= y2 or x1 >= x2 or y1o >= y2o or x1o >= x2o:\n return\n\n channels = img.shape[2]\n\n alpha = alpha_mask[y1o:y2o, x1o:x2o]\n alpha_inv = 1.0 - alpha\n\n for c in range(channels):\n img[y1:y2, x1:x2, c] = (\n alpha * img_overlay[y1o:y2o, x1o:x2o, c] +\n alpha_inv * img[y1:y2, x1:x2, c]\n )", "title": "" }, { "docid": "ade53398aeb1275ccc04f6dc0b15bc96", "score": "0.53083014", "text": "def visualizeDisparityImage(self):\n img = self.getDisparityImage()\n max = np.max(img)\n min = np.min(img)\n img = img * float((255./float((max-min))))\n img = np.uint8(img)\n\n img = cv2.applyColorMap(img, cv2.COLORMAP_JET)\n return img", "title": "" }, { "docid": "f30f3ad743507149c35eb5c029a87928", "score": "0.52898675", "text": "def _posterize_impl(pil_img, level, _):\n level = int_parameter(level, 4)\n return ImageOps.posterize(pil_img.convert('RGB'),\n 4 - level).convert('RGBA')", "title": "" }, { "docid": "c3b41400183991cd7750829682d4f295", "score": "0.5278418", "text": "def darken(self, amount):\n shade = pygame.Surface((self.frame.get_width(), self.frame.get_height()))\n shade.fill((amount, amount, amount))\n self.frame.blit(shade, (0, 0), special_flags=pygame.BLEND_SUB)", "title": "" }, { "docid": "902d0d5e30e6000031cbf4790cf79e3e", "score": "0.52764565", "text": "def set_opacity(self, value):\n self.set_opacity_unit(value)", "title": "" }, { "docid": "853c10111dd19031a8e187c633c6087e", "score": "0.5253273", "text": "def semitone(img):\n if str(img.mode) == \"L\":\n return img\n\n width = img.size[0]\n height = img.size[1]\n new_image = Image.new(\"L\", (width, height))\n\n for x in range(width):\n for y in range(height):\n pix = img.getpixel((x, y))\n sum_ = 0.3 * pix[0] + 0.59 * pix[1] + 0.11 * pix[2]\n # sum_ = sum(pix) // 3\n new_image.putpixel((x, y), int(sum_))\n return new_image", "title": "" }, { "docid": "19421fb054ad6f2065577b57f7419b1e", "score": "0.52461505", "text": "def grey_image():\n return np.ones((1, 3, img_width, img_height)) * 128.0", "title": "" }, { "docid": "76dc7ba03bb5f41810e5d983ac1ce621", "score": "0.5242742", "text": "def set_slices_opacity(self, value):\n for slicer_id in self.slicers:\n slicer = self.slicers[slicer_id]\n if slicer.actor is not None:\n slicer.actor.alpha(value)", "title": "" }, { "docid": "444abd4d56c4dede3fd3f88344ae1edc", "score": "0.5237606", "text": "def hue_image(surface):\n res = surface.copy()\n pix = pygame.PixelArray(surface)\n pix_res = pygame.PixelArray(res)\n \n for x in range(surface.get_width()):\n for y in range(surface.get_height()):\n if pix_res[x][y] != 0: \n pix_res[x][y] = level_to_color(\n grey_level((pix_res[x][y])))\n \n return res", "title": "" }, { "docid": "99b104fcad7f27ad35c7a9cff21c4e33", "score": "0.52333075", "text": "def overlay_image(overlay, alpha, background):\n output = np.asarray(list(background))\n cv2.addWeighted(overlay, alpha, output, 1 - alpha, 0, output)\n\n return output", "title": "" }, { "docid": "a6bad61873ef506ab43b58565c6749de", "score": "0.52319425", "text": "def adjust_image(image):", "title": "" }, { "docid": "9614432d095c20158c884256f79eef54", "score": "0.52269083", "text": "def _ext(self, img, N=16):\n w, h = img.size\n we, he = w+(N-w%N), h+(N-h%N)\n nimg = Image.new(\"RGB\", (we,he,))\n nimg.paste(img, (0,0,))\n return nimg", "title": "" }, { "docid": "f9cc6e9e3819fe60cc01a0a0ef1b3836", "score": "0.5224664", "text": "def bgopacity(self, bgopacity):\n self._bgopacity = bgopacity", "title": "" }, { "docid": "fb092c3486f5ee53f31679791e506bf0", "score": "0.5215097", "text": "def posterize(input_image: Image) -> Image:\n new_image = copy(input_image)\n for x, y, (r, g, b) in input_image:\n color = _adjust_component(r, g, b)\n set_color(new_image, x, y, color)\n return new_image", "title": "" }, { "docid": "f93789f79e482829d8ee08af2fa54b1e", "score": "0.52137136", "text": "def find_flames(img):\n image = SimpleImage(img)\n # TODO: your code here\n for px in image:\n avg = (px.red + px.green + px.blue)/3\n #print(\"1-->\",px.red)\n if px.red > avg:\n px.red = px.red *225\n px.green = 1\n px.blue = 1\n else:\n px.red =avg\n px.green =avg\n px.blue = avg\n return image", "title": "" }, { "docid": "250c48bca43d2dd0ac51a42eb7349e0a", "score": "0.52052075", "text": "def make_stretched(filename):\n image = SimpleImage(filename)\n # Create a stretched image and return it\n return image", "title": "" }, { "docid": "88f696678f7aa81f28969bbe66b13f94", "score": "0.52007693", "text": "def augment_brightness(image):\n image1 = cv2.cvtColor(image,cv2.COLOR_RGB2HSV)\n random_bright = np.random.uniform()\n image1[:,:,2] = image1[:,:,2]*random_bright\n image1 = cv2.cvtColor(image1,cv2.COLOR_HSV2RGB)\n return image1", "title": "" }, { "docid": "b810a02e30fe347a2e40e2b6059cf375", "score": "0.52001226", "text": "def get_transparency(self):\n return self.transparency", "title": "" }, { "docid": "4dab235e1ee8769ef31afa656f0c6a22", "score": "0.5195799", "text": "def erode(img, size, iterations=1):\n kernel = np.ones((size, size), np.uint8)\n bg = cv2.erode(img, kernel, iterations=iterations)\n return bg", "title": "" } ]
2cbe506a10e6bea626cbfd3812ef22cf
checks if table position is of expected color
[ { "docid": "85e4fb4ef5c56c335a3a8b96f450ee88", "score": "0.7153457", "text": "def is_expected_cell_color(self, position, expected_cell_color):\n obj = self.deck[position[0]][position[1]]\n\n if isinstance(obj, Checker) and obj.color.lower() == expected_cell_color:\n return True\n elif obj == expected_cell_color:\n return True\n else:\n return False", "title": "" } ]
[ { "docid": "dbac32eb9693bda25524684035fcb65b", "score": "0.6258472", "text": "def check_color(board):\n columns_empt = empty_columns(board)\n columns = fill_columns(columns_empt,board)\n colors = []\n for i in range(len(columns)):\n color = columns[i][:-(i+1)] + board[-(i+1)][i:]\n colors.append(color)\n uniqueness = check_rows(colors)\n return uniqueness", "title": "" }, { "docid": "5dc11365160b36a4dd17f6f527c0400e", "score": "0.61982256", "text": "def hasCol(self):\n return bool(self.colors)", "title": "" }, { "docid": "8e5f9310c566446e5aa63ba7e9317071", "score": "0.61860186", "text": "def has_color_table(tile):\n log_info_mssg(\"Checking for color table in \" + tile)\n has_color_table = False\n\n gdalinfo_command_list = ['gdalinfo', '-json', tile]\n log_the_command(gdalinfo_command_list)\n gdalinfo = subprocess.Popen(gdalinfo_command_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n \"\"\"\n returncode = gdalinfo.wait()\n if returncode != 0:\n log_sig_err('gdalinfo return code {0}'.format(returncode), sigevent_url)\n return False\n tileInfo = json.loads(gdalinfo.stdout.read())\n \"\"\"\n try:\n outs, errs = gdalinfo.communicate(timeout=90)\n if len(errs) > 0:\n log_sig_err('gdalinfo errors: {0}'.format(errs), sigevent_url)\n tileInfo = json.loads(outs)\n\n for band in tileInfo[\"bands\"]:\n has_color_table |= \"colorTable\" in band\n\n except subprocess.TimeoutExpired:\n gdalinfo.kill()\n log_sig_err('gdalinfo timed out', sigevent_url)\n\n log_info_mssg((\"No color table found\", \"Color table found in image\")[has_color_table])\n return has_color_table", "title": "" }, { "docid": "d80767e8caf5a51d397c8f43aec7cc63", "score": "0.6181917", "text": "def is_colorized(self):", "title": "" }, { "docid": "0320316c31d1004fc47d9217ae42c672", "score": "0.6146896", "text": "def table_check(self, ansi, char, code):\n self.assertEqual(ansi._char_indexes, char)\n self.assertEqual(ansi._code_indexes, code)", "title": "" }, { "docid": "5f0ec583a26898747ea24f7fa9478cce", "score": "0.61367565", "text": "def checkIfSolved(self):\n for i in range(self.rows):\n colorCount = [0 for j in range(self.colorNumber)]\n for j in range(self.cols):\n colorCount[self.colors[i][j]] += 1\n if j >= 2 \\\n and self.colors[i][j] == self.colors[i][j - 1] \\\n and self.colors[i][j] == self.colors[i][j - 2]:\n return False\n for j in range(self.colorNumber):\n if colorCount[j] != self.cols / self.colorNumber:\n return False\n\n for j in range(self.cols):\n colorCount = [0 for j in range(self.colorNumber)]\n for i in range(self.rows):\n colorCount[self.colors[i][j]] += 1\n if i >= 2 \\\n and self.colors[i][j] == self.colors[i - 1][j] \\\n and self.colors[i][j] == self.colors[i - 2][j]:\n return False\n for i in range(self.colorNumber):\n if colorCount[i] != self.rows / self.colorNumber:\n return False\n return True", "title": "" }, { "docid": "87144b0711dd5e96cdaa09e4bdaae8b8", "score": "0.6083386", "text": "def is_inside_table(self, position):\n if (position[0] < 0 or\n position[1] < 0 or\n position[0] >= self.SIZE or\n position[1] >= self.SIZE):\n return ()\n\n return position", "title": "" }, { "docid": "a446a44de282e46c73d7440867767662", "score": "0.60559076", "text": "def bug(x, y, t, st):\n\n current_x = int(st[\"x\"])\n current_y = int(st[\"y\"])\n if x == current_x and y == current_y:\n return black\n else:\n return white", "title": "" }, { "docid": "fa39d034c2c4afc5009c6582236ecb52", "score": "0.6021886", "text": "def checkIfThereAreColorInRangeOfPixel(color,i,o):\n for y in range(0,10):\n if(i+y < height):\n if(isEgal(image[i+y,o],color)):\n return False\n if(isEgal(image[i-y,o],color)):\n return False\n if(o+y < width):\n if(isEgal(image[i,o+y],color)):\n return False \n if(isEgal(image[i,o-y],color)):\n return False \n return True", "title": "" }, { "docid": "4b3dbf8cf1286f2c88cb37436379a46c", "score": "0.6000572", "text": "def screenshot_is_not_a_solid_color(self):\n print(\"Checking that image is not a single solid color...\")\n extrema = self.opened_image.convert(\"L\").getextrema()\n print(\"Extrema: {}\".format(extrema))\n if extrema[0] == extrema[1]: # solid color - elements in tuple are the same\n return False\n return True", "title": "" }, { "docid": "35ab356b3ba9820444db238799988e40", "score": "0.59890753", "text": "def test_color_difference(self):\n\t\tred = Color(255,0,0)\n\t\tself.assertEqual(red.difference(red), 0.0)", "title": "" }, { "docid": "4eba995688f9c606a579ddf51f91e45d", "score": "0.5985757", "text": "def test_different_crow_not_equal(self):\n self.assertNotEqual(\n Position(column=0, row=0),\n Position(column=0, row=1),\n )", "title": "" }, { "docid": "88c5af87944f6eacaa958a70110b12f6", "score": "0.5903905", "text": "def GetCellColor(self):\n ...", "title": "" }, { "docid": "32ef284673825f19845da6112ee5b2bb", "score": "0.5852382", "text": "def colored(self, color):\n colored = []\n for i in range(5):\n for j in range(9):\n if self[i][j] == color:\n colored.append((i,j))\n return colored", "title": "" }, { "docid": "cbd49ef46d5575497a0067e5591dde36", "score": "0.5847392", "text": "def is_coloring(graph, coloring):\n for u, row in enumerate(graph):\n for v in row:\n if coloring[u] == coloring[v]:\n return False\n return True", "title": "" }, { "docid": "89ccd4ec7bdaec1855e920dea670a49a", "score": "0.58470505", "text": "def _has_query_color(self, row, *colors: str) -> bool:\n raise NotImplementedError", "title": "" }, { "docid": "89ccd4ec7bdaec1855e920dea670a49a", "score": "0.58470505", "text": "def _has_query_color(self, row, *colors: str) -> bool:\n raise NotImplementedError", "title": "" }, { "docid": "89ccd4ec7bdaec1855e920dea670a49a", "score": "0.58470505", "text": "def _has_query_color(self, row, *colors: str) -> bool:\n raise NotImplementedError", "title": "" }, { "docid": "89ccd4ec7bdaec1855e920dea670a49a", "score": "0.58470505", "text": "def _has_query_color(self, row, *colors: str) -> bool:\n raise NotImplementedError", "title": "" }, { "docid": "387bcfcba7e06fbc281399e9ff85bf8f", "score": "0.5827238", "text": "def checkifValidTent(self, pos: tuple, curRowValues, curColValues):\n \n if self.checkWithinGrid(pos) == False:\n return False\n\n if curRowValues[pos[0]] == 0:\n return False\n if curColValues[pos[1]] == 0:\n return False\n\n \n neighborsPos = self.getNeighborCells(pos)\n test = self.map[neighborsPos[0][0]][neighborsPos[0][1]]\n neighborsVal = [list(self.map[neighbor[0]][neighbor[1]] for neighbor in neighborsPos)]\n \n for neighbor in neighborsVal:\n if neighbor == '#':\n return False\n return True", "title": "" }, { "docid": "1ddd3b36d6b9b297f9f879b1f13264a7", "score": "0.58185077", "text": "def test_get_color():\n t = Tile(100, 200, 0)\n assert t.get_color() == 0\n t = Tile(100, 200, 255)\n assert t.get_color() == 255", "title": "" }, { "docid": "c40ec832922bcf584aae9061d488d340", "score": "0.5790112", "text": "def weWantThisPixel( col, row ):\n if col%10 == 0 and row%10 == 0:\n return True\n else:\n return False", "title": "" }, { "docid": "7b9efbcce252f0f90138e5eccffebc7e", "score": "0.57767403", "text": "def test_all_visited(table):\n for row in table:\n for column in row:\n if column[4] == 0:\n return False\n return True", "title": "" }, { "docid": "7eec089c66f77bdc58a061ca8606827f", "score": "0.5766763", "text": "def all_equal(coord, pos, color):\n x_min, x_max, y_min, y_max = boundaries(coord)\n if not (x_min >= 0 and x_max <= len(pos) - 1 and\n y_min >= 0 and y_max <= len(pos) - 1):\n return False\n colors = [pos[p[0]][p[1]] for p in coord]\n return (colors[1:] == colors[:-1] and colors[0] == color)", "title": "" }, { "docid": "2aed2df925d9b93144112ddee7817ba7", "score": "0.57518625", "text": "def test_modify_color(self):\n pass", "title": "" }, { "docid": "d42e488d4c3b3b5cd6cd7e325df2acc6", "score": "0.5742253", "text": "def valid_pos(self, row, col):\n return 0 <= row < self.row_size and 0 <= col < self.col_size", "title": "" }, { "docid": "eaca0780a26ea1af615288852bc77669", "score": "0.5739586", "text": "def check_muehle(row):\n #if not row[0].color=='¤':\n if row[0].color==row[1].color==row[2].color:\n if row[0].color=='$' or row[0].color=='£':\n return(True)\n return(False)", "title": "" }, { "docid": "26e563854c95a894df8382f2a7adc23f", "score": "0.5736556", "text": "def is_valid_colour(colour: Tuple[int, int, int]) -> bool:\n for i in range(3):\n if not 0 <= colour[i] <= 255:\n return False\n return True", "title": "" }, { "docid": "ff21f0c4fae8eaa7c42f4833a3400e63", "score": "0.5723621", "text": "def compare_color(color1, color2):\n if compare_floats(color1[0], color2[0]) and compare_floats(color1[1], color2[1]) and compare_floats(color1[2], color2[2]):\n return True\n return False", "title": "" }, { "docid": "3327eb19412d2eda2549eac48c1f178b", "score": "0.56870097", "text": "def is_red(self):\r\n return self.color == Color.RED", "title": "" }, { "docid": "8930d22abd78acbbad3ec7d054c0aa0c", "score": "0.5685126", "text": "def advantage(board, color) -> bool:\n pass", "title": "" }, { "docid": "41a8ac1ac10faca23f5e0ff7e02b614a", "score": "0.56685513", "text": "def check_red_hsv(h,s,v):\n if -1< h < 20 and 40<s<100 and 40<v<100:\n return True\n return False", "title": "" }, { "docid": "7c93e3001aac82865d5ad3e9e014bf4f", "score": "0.5655624", "text": "def TableCell(self) -> bool:", "title": "" }, { "docid": "31fd6888e21a147b1a682481b029587b", "score": "0.56421965", "text": "def _is_surrounded(self, point, color):\n for nb in self._neighbors(point):\n nb_color = self.board[nb]\n if nb_color != BORDER and nb_color != color:\n return False\n return True", "title": "" }, { "docid": "54fd3db74f668fdca687780bd8b01e19", "score": "0.56314737", "text": "def _requires_matrix_coloring(self):\n return True", "title": "" }, { "docid": "a2c7dcb97647f116a25ca1679ea79418", "score": "0.56280345", "text": "def is_at_round_table(self):\n return self.__is_at_round_table", "title": "" }, { "docid": "a074df2f42fc2065cc9c37839478bf62", "score": "0.5617327", "text": "def control_of_center(board, color) -> bool:\n pass", "title": "" }, { "docid": "aeb01322dd0237ea52abca5eb0e00de7", "score": "0.5615042", "text": "def check(board,screen):\n b2 = deepcopy(board)\n effect = False\n rows = []\n for i in range(20):\n row = [b2[x][i] for x in range(10)]\n if all(i!='' for i in row):\n rows.append(i)\n for t in row:\n t.color=\"#FFFFFF\"\n effect=True\n lrows = len(rows)\n if effect:\n # flashing effect when line clears\n blitboard(b2,screen)\n pygame.display.flip()\n time.sleep(0.02)\n blitboard(board,screen)\n pygame.display.flip()\n time.sleep(0.02)\n blitboard(b2,screen)\n pygame.display.flip()\n while rows:\n # for each of the cleared rows: remove it from the board, \n # and add a new empty row at the top. also, advance the y-coord\n # of any blocks above.\n current = max(rows)\n for i in range(10):\n for j in range(current):\n if board[i][j]!='':\n board[i][j].y += 1\n del board[i][current]\n board[i] = [''] + board[i]\n rows.remove(current)\n rows = map(lambda y: y+1,rows)\n return lrows", "title": "" }, { "docid": "b9426708531b6a4e4017d587b29a1f98", "score": "0.5613054", "text": "def getCountScore(self,color):\n count = 0\n opposite = (bool(color - 1) ^ bool(1)) + 1 \n for row in range(self.rows):\n for col in range(self.rows):\n test = self.board[row][col]\n if test == color:\n count += 1\n elif test == opposite:\n count -= 1\n return count", "title": "" }, { "docid": "e5eca0c3f7b7c017af7f2b4ad749be91", "score": "0.55899715", "text": "def who_wins(color_of_list, new_position):\n global black_1\n global white_1\n global display_1\n hori=0\n vert=0\n uldr=0\n urdl=0\n black_1=False\n white_1=False\n #display_1=a\n \n for i in range(1,5):\n # horizontal\n if [int(new_position[0])+25*i,int(new_position[1])] in color_of_list:\n hori+=1\n elif [int(new_position[0]) -25*i,int(new_position[1])] in color_of_list:\n hori+=1\n else:\n break\n for i in range(1,5):\n # vertical\n if [int(new_position[0]), int(new_position[1])+25*i] in color_of_list:\n vert+=1\n elif [int(new_position[0]), int(new_position[1])-25*i] in color_of_list:\n vert+=1\n else:\n break\n for i in range(1,5):\n # up left, down right\n if [int(new_position[0])+25*i, int(new_position[1])-25*i] in color_of_list:\n uldr+=1\n elif [int(new_position[0])-25*i, int(new_position[1])+25*i] in color_of_list:\n uldr+=1\n else:\n break\n for i in range(1,5):\n # up right, down left\n if [int(new_position[0])+25*i, int(new_position[1])+25*i] in color_of_list:\n urdl+=1\n elif [int(new_position[0])-25*i, int(new_position[1])-25*i] in color_of_list:\n urdl+=1\n else:\n #print(\"value i:\",i)\n break\n if hori >= 4 or vert >= 4 or urdl >= 4 or uldr >= 4:\n if (n-1)%2 == 1:\n black_1=True\n return()\n if (n-1)%2 == 0:\n white_1=True\n return()\n #print(a)", "title": "" }, { "docid": "07247bc619ca82ed55bcd7584021e76b", "score": "0.5586632", "text": "def get_color(self, alignment, column_i, seq_i):\n pass", "title": "" }, { "docid": "a45cd32bd304e40736c5b2ee18b1d6ca", "score": "0.55775666", "text": "def valid(self, col: int):\n for i in range(col):\n if abs(self.tracker[i] - self.tracker[col]) in {0, col - i}:\n return False\n return True", "title": "" }, { "docid": "a7a5e3bf460e7d2cf029a56faf9674ab", "score": "0.5575241", "text": "def _is_red(self, node):\n if node is None:\n return False\n return node.color is RED", "title": "" }, { "docid": "82a8cf4facf77908f528e64772b63a00", "score": "0.55657977", "text": "def is_draw(self):\n\n return all([self.col_counters[col] == self.Z for col in self.col_counters])", "title": "" }, { "docid": "9e23e801b444ca6190f14a5be80a0f98", "score": "0.55599886", "text": "def check(self):\r\n\t\t\r\n\t\t#color conversion to DisplaySpace\r\n\t\tself.rgb = mc.colorManagementConvert(toDisplaySpace=[self.rgb[0], self.rgb[1], self.rgb[2]])\r\n\t\t\r\n\t\t#the value cannot be greater than 1.0\r\n\t\tfor i in range(len(self.rgb)):\r\n\t\t\t\r\n\t\t\tif self.rgb[i] > 1.0:\r\n\t\t\t\tself.rgb[i] = 1.0", "title": "" }, { "docid": "a3713b0a9d98feec939adb8d192377f3", "score": "0.5545412", "text": "def get_color(self, row, col):\n for color in range(len(utils.COLORS)):\n if self._board[color][row][col] == 1:\n return color\n return -1", "title": "" }, { "docid": "ded3f5bca237f3c0b75b835cdb7eacb4", "score": "0.55349505", "text": "def mark_inside_cells(rgb, table, shift_x=0, shift_y=0):\n new_rgb = rgb.copy()\n table.apply(lambda row: mark(new_rgb, row, shift_x=shift_x, shift_y=shift_y), axis=1)\n return new_rgb", "title": "" }, { "docid": "e70b4df7ee19930ec36f701c03c3cad9", "score": "0.55313253", "text": "def test_favorite_colors(self):\n self.assertIn('tan', colors)\n self.assertNotIn('auburn', colors)", "title": "" }, { "docid": "c581dc6c805f5df50a49559a00a7984e", "score": "0.5519087", "text": "def eq_color(self, other):\r\n\t\treturn self.color == other", "title": "" }, { "docid": "f6afa212c7f1c0d5374541c1cdb1f49c", "score": "0.55060774", "text": "def test_change_color():\n t = Tile(100, 200, 0)\n t.change_color(255)\n assert t.color == 255\n t = Tile(100, 200, 255)\n t.change_color(0)\n assert t.color == 0", "title": "" }, { "docid": "21a93e13592c8112bd230c8e1d6a3a49", "score": "0.5488811", "text": "def is_danger(self, pos):\n if(self.check_matrix(pos) == 1 or self.check_matrix(pos) == 2):\n return 1\n return 0", "title": "" }, { "docid": "c311749ba99271e8988b7f1ac2f0633d", "score": "0.5484915", "text": "def pixel_matches_color(self, coords, rgb, threshold=0):\n wx, wy = self.get_window_rect()[:2]\n x, y = coords\n # self.move_mouse(x, y)\n return pyautogui.pixelMatchesColor(x + wx, y + wy, rgb, tolerance=threshold)", "title": "" }, { "docid": "2200a7b409eb7e2de9d6d375d08281c8", "score": "0.54759127", "text": "def test_label_color():\n np.random.seed(0)\n data = np.random.randint(20, size=(10, 15))\n layer = Labels(data)\n col = layer.get_color(0)\n assert col is None\n\n col = layer.get_color(1)\n assert len(col) == 4", "title": "" }, { "docid": "2394a1c01a06e961862214b7c17f7c4c", "score": "0.54751223", "text": "def has_colorized_style(node):\n for it_node in node.getiterator():\n if \"style\" in it_node.attrib:\n node_style_dict = ss.parseStyle(it_node.attrib[\"style\"])\n for style_attrib in [\"stroke\", \"fill\"]:\n if style_attrib in node_style_dict and \\\n node_style_dict[style_attrib].lower().replace(\" \", \"\") not in [\"rgb(0%,0%,0%)\",\n \"black\",\n \"none\",\n \"#000000\"]:\n return True\n return False", "title": "" }, { "docid": "b068f19244f0d7196eaa23c321d74ce2", "score": "0.54723424", "text": "def check_col_validity(self, target_cell, test_value):\n for row in self.grid:\n cell = row[target_cell.y]\n if cell.x == target_cell.x and cell.y == target_cell.y:\n pass\n elif row[target_cell.y].value == test_value:\n return False\n return True", "title": "" }, { "docid": "361dd154ba3933d32f9fc38da514f05b", "score": "0.5471451", "text": "def is_on_board(self, pos):\r\n return (0 <= pos[0] <= 6) and (0 <= pos[1] <= 6)", "title": "" }, { "docid": "c1348097424ec9f212e6fc9cf45211ef", "score": "0.5451223", "text": "def validate(self, move, color):\n if move[0] != '[' or move[4] != ']' or move[2] != ',':\n print \"Use the form [x,y] to enter moves!\"\n return False\n\n col = int(move[3])\n row = int(move[1])\n\n if row > 3 or row < 1:\n print \"Bad row value (must be between 1 and 3)\"\n return False\n\n if col > 3 or col < 1:\n print \"Bad column value (must be between 1 and 3)\"\n return False\n\n if self.get(row - 1, col - 1) != self.empty():\n print \"Position occupied by color\", self.get(row-1, col-1)\n return False\n\n return True", "title": "" }, { "docid": "97aba65f332d01dcae2fd94d1932f712", "score": "0.544917", "text": "def is_color_style(self):\r\n\t\ttry:\r\n\t\t\tstyle = self.styles[0]\r\n\t\texcept:\r\n\t\t\t## ThematicStyleColormap\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\tif isinstance(style, (int, float, np.integer, np.floating)):\r\n\t\t\t\treturn False\r\n\t\t\telse:\r\n\t\t\t\tcc = matplotlib.colors.ColorConverter()\r\n\t\t\t\ttry:\r\n\t\t\t\t\tcc.to_rgb(style)\r\n\t\t\t\texcept:\r\n\t\t\t\t\treturn False\r\n\t\t\t\telse:\r\n\t\t\t\t\treturn True", "title": "" }, { "docid": "d11b7405cd5ed6e8cf9323ecc23ae2ab", "score": "0.5446401", "text": "def is_valid_location(board, col):\n return board[NUMBER_ROWS - 1][col] == 0", "title": "" }, { "docid": "4b2e679e1985573a8d68a09b1de14859", "score": "0.5443651", "text": "def test_different_column_not_equal(self):\n self.assertNotEqual(\n Position(column=0, row=0),\n Position(column=1, row=0),\n )", "title": "" }, { "docid": "816bf39911b0fde7376c28fbcdef0449", "score": "0.5439235", "text": "def plottable(self):\n return False # override in subclass with specific tests", "title": "" }, { "docid": "82953c3f5726a3b772076ec472878bd8", "score": "0.5434131", "text": "def is_colorized(self):\n # pstoedit stores color information as attributes, not as css styles, so checking for attributes should\n # be enough. But to be on the save side...\n return self.has_colorized_attribute(self._node) or self.has_colorized_style(self._node)", "title": "" }, { "docid": "f56449dfc8c64102824fa8fa5f00031e", "score": "0.5429223", "text": "def collisionPlatformColor(self):\r\n for colorPlat in self.map.rects[\"colorPlateforme\"]:\r\n rect = colorPlat[0]\r\n z = colorPlat[1]\r\n if (self.z <= z + 200 and self.z >= z - 200) or ((self.z <= (z + 200) % 1530 + 1530) and (self.z >= (z - 200) % 1530)):\r\n self.collisionRect(rect)", "title": "" }, { "docid": "322bcb8703ea17dc6e9458863d2ba631", "score": "0.5426759", "text": "def set_color_row(self, table):\n\n for i in range(table.rowCount()):\n if i % 2 == 0:\n for j in range(table.columnCount()):\n table.item(i, j).setBackground(QtGui.QColor(252, 252, 252))\n else:\n for j in range(table.columnCount()):\n table.item(i, j).setBackground(QtGui.QColor(232, 225, 225))", "title": "" }, { "docid": "36f35c920260558a29df43ef1930ec50", "score": "0.54201204", "text": "def hypermodern_position(board) -> bool:\n pass", "title": "" }, { "docid": "5ef563ae6d4787b0a01fb5b737886429", "score": "0.5419084", "text": "def _has_border(self):\r\n\r\n return self._deco & TextTable.BORDER > 0", "title": "" }, { "docid": "2fd7a0472cc90f9a9bb3f5d7e9f8317d", "score": "0.5415262", "text": "def find_pieces(self, color):\n ret = []\n for row in range(self.rows):\n for col in range(self.cols):\n if self.data[row][col] == color:\n ret.append((row,col))\n return ret", "title": "" }, { "docid": "bae8ecf8a6068a6cbe35920546a53eab", "score": "0.54032797", "text": "def wrongColor(self):\n SimonTurtle.writeResult(self, False)", "title": "" }, { "docid": "77e8c15f33aa14fc25c16aaab3f70153", "score": "0.53991616", "text": "def validate(table):\r\n # test each column\r\n for i in range(9):\r\n test_cell = Cell()\r\n test_cell.minus(*table[i])\r\n if len(test_cell) > 0:\r\n return False\r\n # test each row\r\n for j in range(9):\r\n test_cell = Cell()\r\n test_cell.minus(*(table[i][j] for i in range(9)))\r\n if len(test_cell) > 0:\r\n return False\r\n # test each box\r\n for ki,kj in (itertools.product([0,3,6], repeat=2)):\r\n test_cell = Cell()\r\n for i, j in ((x, y) for x in range(ki, ki+3) for y in range(kj, kj+3)):\r\n test_cell.minus(table[i][j])\r\n if len(test_cell) > 0:\r\n return False\r\n return True", "title": "" }, { "docid": "d2b5ebba6fe9019105099d481c670204", "score": "0.53982556", "text": "def requires_color_esc(c):\n t = ord(c[0])\n return c >= COLOR_ON and c <= COLOR_INV", "title": "" }, { "docid": "110ea64af249c45dad01f091ae923220", "score": "0.5395469", "text": "def valid_color(c):\r\n return (type(c) == RGB or\r\n type(c) == HSV or\r\n type(c) == tuple or\r\n type(c) == str)", "title": "" }, { "docid": "894f92253c85a7782019251d521c9036", "score": "0.53953326", "text": "def _is_valid(self, coloring):\n e = self.graph.edges\n c = coloring\n return all(self._color(w1, c) != self._color(w2, c) for w1, w2 in e)", "title": "" }, { "docid": "f7de1037869aa3b7d881c262a619a079", "score": "0.53915805", "text": "def check_win(self):\n red_disk = 1\n yellow_disk = 2\n red_row = 0\n red_col = 0\n yellow_row = 0\n yellow_col = 0\n\n # Check horizontal direction\n for i in self.board:\n for j in i:\n if j == red_disk:\n red_row += 1\n yellow_row = 0\n elif j == yellow_disk:\n yellow_row += 1\n red_row = 0\n elif j == 0:\n red_row = 0\n yellow_row = 0\n if red_row == self.WIN_MOVES:\n self.red_win = True\n elif yellow_row == self.WIN_MOVES:\n self.yellow_win = True\n red_row = 0\n yellow_row = 0\n\n # Check vertical direction\n for i in range(self.COL):\n for j in self.board:\n if j[i] == red_disk:\n red_col += 1\n yellow_col = 0\n elif j[i] == yellow_disk:\n yellow_col += 1\n red_col = 0\n elif j == 0:\n red_col = 0\n yellow_col = 0\n if red_col == self.WIN_MOVES:\n self.red_win = True\n elif yellow_col == self.WIN_MOVES:\n self.yellow_win = True\n red_col = 0\n yellow_col = 0\n\n # Check bottom-left to up-right direction\n for i in range(self.ROW - 3):\n for j in range(self.COL - 3):\n if self.board[i + 1][j + 1] == self.board[i + 2][j + 2] == self.board[i + 3][j + 3] == \\\n self.board[i][j] == red_disk:\n self.red_win = True\n elif self.board[i + 1][j + 1] == self.board[i + 2][j + 2] == self.board[i + 3][j + 3] == \\\n self.board[i][j] == yellow_disk:\n self.yellow_win = True\n\n # Check up-left to bottom right direction\n for i in range(3, self.ROW):\n for j in range(self.COL - 3):\n if self.board[i - 1][j + 1] == self.board[i - 2][j + 2] == self.board[i - 3][j + 3] == \\\n self.board[i][j] == red_disk:\n self.red_win = True\n elif self.board[i - 1][j + 1] == self.board[i - 2][j + 2] == self.board[i - 3][j + 3] == \\\n self.board[i][j] == yellow_disk:\n self.yellow_win = True", "title": "" }, { "docid": "8e617a64cc49aa4001e36230798b4e61", "score": "0.5388823", "text": "def getPosScore(self,color):\n score = 0\n rest = 0\n opposite = (bool(color - 1) ^ bool(1)) + 1 \n for row in range(self.rows):\n for col in range(self.rows):\n test = self.board[row][col]\n if test == color:\n score += self.posScore[row][col]\n elif test == opposite:\n score -= self.posScore[row][col]\n else:\n rest += 1\n \n # turn bonus\n if score >= 0:\n score += (128-rest*2)\n else:\n score -= (128-rest*2)\n return score", "title": "" }, { "docid": "3ce1a302aa554d1fef8156d679009d53", "score": "0.53823966", "text": "def check_red_rgb(r,g,b):\n if r>80 and g <25 and b <25:\n return True\n return False", "title": "" }, { "docid": "0d59377a75873473491209cf95239b46", "score": "0.53737724", "text": "def color_at(self, position):\n return self.color_a if math.floor(position.x) % 2 == 0 else self.color_b", "title": "" }, { "docid": "588ff91d1f5ace7db700f787f38628d1", "score": "0.5369117", "text": "def test_number_colors(self):\n self.assertEqual(len(colors), 5)", "title": "" }, { "docid": "3a3f6277f81469799898b9f4ee6e9aeb", "score": "0.53629094", "text": "def get_player_at(self, row, col):\n if self.board[col][row]=='blue':\n return 1\n elif self.board[col][row]=='red':\n return 2", "title": "" }, { "docid": "7db7288034ce8a704d7d63998a1dab5e", "score": "0.53572065", "text": "def is_end(self):\r\n return self.color==RED", "title": "" }, { "docid": "c4f9a3df238aaee047414c8a9cca5446", "score": "0.5357127", "text": "def _cell_color(status):\n if status == 2:\n return QtGui.QColor(255, 0, 0, 127)\n elif status == 1:\n return QtGui.QColor(255, 255, 0, 127)\n elif status == 0:\n return QtGui.QColor(255, 255, 255, 127)\n else:\n return QtGui.QColor(255, 127, 0, 200)", "title": "" }, { "docid": "b38267c4f03749451b2c740ed1f6a6f8", "score": "0.53536963", "text": "def test_green_valid(self):\n\n expected = PyFunceble.Fore.GREEN + PyFunceble.Style.BRIGHT\n actual = FileCore.get_simple_coloration(PyFunceble.STATUS[\"official\"][\"up\"])\n\n self.assertEqual(expected, actual)", "title": "" }, { "docid": "50f9e4c6940cb728d03b18f8abdfe304", "score": "0.5352768", "text": "async def test_outline_importance():\n async with StyleApp().run_test() as pilot:\n outline = pilot.app.query_one(Container).styles.outline\n desired = (\"round\", Color.parse(\"green\"))\n assert outline.top == desired\n assert outline.left == desired\n assert outline.bottom == desired\n assert outline.right == desired", "title": "" }, { "docid": "1054736cde634a8095c5c2043f234ddc", "score": "0.5350244", "text": "def is_in_check(self, color):\r\n if self._is_in_check == color:\r\n return True\r\n return False", "title": "" }, { "docid": "f368785b2fb74d90620b19f1b79814b8", "score": "0.53461266", "text": "def change_color(surface, grid, rect, x, y):\n if grid[x][y] == 0:\n pygame.draw.rect(surface, BLACK, rect)\n grid[x][y] = 2\n return 0 # Return 0 if player clicks the wrong square\n\n elif grid[x][y] == 1:\n pygame.draw.rect(surface, WHITE, rect)\n grid[x][y] = 2\n return 1 # Return 1 if the player clicks the right square", "title": "" }, { "docid": "817ea04a2105f31d44dd1100fc314e16", "score": "0.534176", "text": "def address_valid(row, col, width, height):\n if row >= 0 and row < height and col >= 0 and col < width:\n return True\n else:\n return False", "title": "" }, { "docid": "5a91de62da4700ed14f3ecd95dc95123", "score": "0.5337289", "text": "def getRGB(self, pos):\n self.__checkIndices__(pos)\n if self.__isdisabled__[ pos[0] ][ pos[1] ]: return cell.RGB_BLACK\n else: return RGB_DICT[ self.__colors__[ pos[0] ][ pos[1] ] ]", "title": "" }, { "docid": "5421e68d431cac54ef0088dfbb80bdf6", "score": "0.5331853", "text": "def check_line(self, x, y, speed_x, speed_y):\n result = []\n current_color = self.table[y][x]\n # reverse move\n current_speed_x = -speed_x\n current_speed_y = -speed_y\n new_x = x + current_speed_x\n new_y = y + current_speed_y\n while (new_x > -1 and new_x < FIELD_COLUMNS and\n new_y > -1 and new_y < FIELD_ROWS and\n self.table[new_y][new_x] == current_color): \n x = new_x\n y = new_y\n new_x = x + current_speed_x\n new_y = y + current_speed_y\n\n # count\n current_speed_x = speed_x\n current_speed_y = speed_y\n new_x = x + current_speed_x\n new_y = y + current_speed_y\n result.append((x, y))\n while True:\n if new_x == -1 or new_x == FIELD_COLUMNS: break\n if new_y == -1 or new_y == FIELD_ROWS: break\n if self.table[new_y][new_x] != current_color: break\n x = new_x\n y = new_y\n result.append((x, y))\n new_x = x + current_speed_x\n new_y = y + current_speed_y\n return result", "title": "" }, { "docid": "02c8035bf0dc69c05e768f812d8cd692", "score": "0.5330657", "text": "def checkNeighbours(table, index):", "title": "" }, { "docid": "b6f7707a3cb8dbf762cc4263c3b50f72", "score": "0.53280354", "text": "def _hasColourBar(self):\n return False", "title": "" }, { "docid": "eb19675b3f4c65653a1b884eacd65483", "score": "0.53266567", "text": "def is_colorized(self):\n # pdf2svg consequently uses the style css properties for colorization, so checking for style should\n # be enough. But to be on the save side...\n return self.has_colorized_style(self._node) or self.has_colorized_attribute(self._node)", "title": "" }, { "docid": "b7b74dfea6d75208a4301fcbe4629ddf", "score": "0.53201056", "text": "def check_the_cell(self, position: int) -> bool:\n return self.board[position] == \" \"", "title": "" }, { "docid": "febe803f1c3fd84780a9b57b8f5108b8", "score": "0.53184384", "text": "def CheckSolution(board, rows, columns, colors):\n for i in range(rows):\n colorCount = [0 for c in range(colors)]\n for j in range(columns):\n colorCount[board[i][j]] += 1\n if j >= 2 \\\n and board[i][j] == board[i][j - 1] \\\n and board[i][j] == board[i][j - 2]:\n print(1)\n return False\n for j in range(colors):\n if colorCount[j] != columns / colors:\n print(2)\n return False\n\n for j in range(columns):\n colorCount = [0 for c in range(colors)]\n for i in range(rows):\n colorCount[board[i][j]] += 1\n if i >= 2 \\\n and board[i][j] == board[i - 1][j] \\\n and board[i][j] == board[i - 2][j]:\n return False\n for i in range(colors):\n if colorCount[i] != rows / colors:\n return False\n return True", "title": "" }, { "docid": "ccc030ad04fa73087e4e43fb6371cdd9", "score": "0.5312588", "text": "def __is_attacked_no_king(self, position: Tuple[int, int], color: str) -> bool:\n if color == \"white\":\n return Statuses.ATTACKED_BLACK in self.board[position[1]][position[0]].status \\\n or Statuses.DEFENDED_BLACK in self.board[position[1]][position[0]].status\n else:\n return Statuses.ATTACKED_WHITE in self.board[position[1]][position[0]].status \\\n or Statuses.DEFENDED_WHITE in self.board[position[1]][position[0]].status", "title": "" }, { "docid": "2a852c7f89a2498de7d2b0cc0f86899e", "score": "0.5312513", "text": "def check_rect(matrix):\n len_last = len(matrix[0])\n for rows in range(1, len(matrix)):\n if len_last != len(matrix[rows]):\n return False\n len_last == len(matrix[rows])\n return True", "title": "" }, { "docid": "9a765973e87ba28931e760c7a06363d6", "score": "0.5309467", "text": "def detect(frame: numpy.ndarray) -> bool:\n color = frame[:20, 1100:1150].mean(axis=(0, 1))\n return numpy.linalg.norm(color - BG_COLOR) < 5", "title": "" }, { "docid": "ee8852cf58e6789b9268c55ec9440ea3", "score": "0.5296303", "text": "def color_table(value, table='hsv.rgb', normalized=False, show=False):\n import threedhst\n import glob\n \n if show:\n files = glob.glob(os.path.dirname(threedhst.__file__)+'/data/*rgb')\n for file in files:\n print(os.path.basename(file))\n \n return (0,0,0)\n \n try:\n data = np.loadtxt(os.path.dirname(threedhst.__file__)+'/data/'+table)\n except:\n 'Color table [%s] not found in `threedhst/data`' %(table)\n return (0,0,0)\n \n idx = np.arange(256)\n if normalized:\n idx /= 256.\n \n ri = np.interp(value, idx, data[:,0])/255.\n gi = np.interp(value, idx, data[:,1])/255.\n bi = np.interp(value, idx, data[:,2])/255.\n \n return (ri, gi, bi)", "title": "" }, { "docid": "4c14246b9b2136aff6348e8cb16999f1", "score": "0.52814585", "text": "def check_yellow_rgb(r,g,b):\n if g>80 and r >80 and b <25:\n return True\n return False", "title": "" }, { "docid": "ebfa8021de822f791bce7afd43ea246c", "score": "0.52807325", "text": "def getCellColor(self, x, y):\n if x < self.width and x > -1 and y > -1 and y < self.height:\n cell = self.grid[x][y]\n return self.getPlayerColor(cell)", "title": "" }, { "docid": "2f6f61180608d54f7bc990e80685a664", "score": "0.5279924", "text": "def can_change_colors(self):\n return self.condition is None or self.get_state(self.condition[\"entity\"]) == self.condition[\"state\"]", "title": "" }, { "docid": "e2dfc0ffa181ff7ca909212ebb1167d5", "score": "0.5270078", "text": "def in_table_area(table_bbox: tuple, block_bbox: tuple):\n\n t_left, t_top, t_rgt, t_bot = table_bbox\n b_left, b_top, b_rgt, b_bot = block_bbox\n\n # Check if the rectangular shape of bbox from reader\n # is inside the table region detected by tabula\n return all((\n t_left < b_left,\n t_top < b_top,\n t_rgt > b_rgt,\n t_bot > b_bot\n ))", "title": "" } ]
e67e51330f164c6d60d6be4a3131d4cd
Deal with pending expunges that have built up for this client. This can only be called during a command, but not during FETCH, STORE, or SEARCH commands. Also we will not call this during things like 'select' or 'close' because they are no longer listening to the mailbox (but they will empty the list of pending expunges.
[ { "docid": "b07282ff39a535ced3b1e138f9db0479", "score": "0.7527915", "text": "def send_pending_expunges(self):\n for p in self.pending_expunges:\n self.client.push(p)\n self.pending_expunges = []", "title": "" } ]
[ { "docid": "da21b9287702e38ed21ecd0769c303ec", "score": "0.64203936", "text": "def do_fetch(self, cmd):\n if self.state != \"selected\":\n raise No(\"Client must be in the selected state\")\n\n # If self.mbox is None then this mailbox was deleted while this user\n # had it selected. In that case we disconnect the user and let them\n # reconnect and relearn mailbox state.\n #\n if self.mbox is None:\n self.unceremonious_bye(\"Your selected mailbox no longer exists\")\n return\n\n # If there are commands pending in the queue this gets put on the queue\n # waiting for those to be finished before processing.\n #\n if not self.process_or_queue(cmd):\n return False\n\n # If this client has pending EXPUNGE messages then we return a tagged\n # No response.. the client should see this and do a NOOP or such and\n # receive the pending expunges. Unless this is a UID command. It is\n # okay to send pending expunges during the operations of a UID FETCH.\n #\n if len(self.pending_expunges) > 0:\n if cmd.uid_command:\n self.send_pending_expunges()\n else:\n # If a client continues to pound us asking for FETCH's when\n # there are pending EXPUNGE's give them the finger by forcing\n # them to disconnect. It is obvious watching Mail.app that it\n # will not give up when given a No so we punt this connection\n # of theirs. They should reconnect and learn the error of their\n # ways.\n #\n self.fetch_while_pending_count += 1\n if self.fetch_while_pending_count > 10:\n self.unceremonious_bye(\"You have pending EXPUNGEs.\")\n return\n else:\n raise No(\"There are pending EXPUNGEs.\")\n\n self.fetch_while_pending_count = 0\n seq_changed = self._fetch_internal(cmd, count)\n\n # If the fetch caused sequences to change then we need to make the\n # resync non-optional so that we will send FETCH messages to the other\n # clients listening to this mailbox.\n #\n if seq_changed:\n self.mbox.resync(optional=False)\n\n # If 'needs_continuation' is True then we have actually only partially\n # processed this command. We push this command on to the end of the\n # command_queue for this folder. It will get picked off and processed\n # later through the event loop. The command itself keeps track of where\n # it is in terms of processing.\n #\n if cmd.needs_continuation:\n self.mbox.command_queue.append((self, cmd))\n return False\n\n return None", "title": "" }, { "docid": "3a32e6063790467df100c36dfa637854", "score": "0.63179284", "text": "def do_expunge(self, cmd):\n # If there are commands pending in the queue this gets put on the queue\n # waiting for those to be finished before processing.\n #\n if not self.process_or_queue(cmd):\n return False\n\n if self.state != \"selected\":\n raise No(\"Client must be in the selected state\")\n\n # If self.mbox is None then this mailbox was deleted while this user\n # had it selected. In that case we disconnect the user and let them\n # reconnect and relearn mailbox state.\n #\n if self.mbox is None:\n self.unceremonious_bye(\"Your selected mailbox no longer exists\")\n return\n\n self.send_pending_expunges()\n\n # If we selected the mailbox via 'examine' then we can not make any\n # changes anyways...\n #\n if self.examine:\n return\n\n self.mbox.expunge(self)\n return", "title": "" }, { "docid": "2efb32efb643d7de94e0db9d0c8a62bf", "score": "0.56415933", "text": "def do_unselect(self, cmd):\n # NOTE: If this client currently has messages being processed in the\n # command queue for this mailbox then they are all tossed when they\n # pre-emptively select another mailbox (this could cause the client\n # some heartburn as commands they issues will never get their final\n # message.. but that is their problem.)\n #\n if self.state != \"selected\":\n raise No(\"Client must be in the selected state\")\n\n if self.mbox:\n try:\n self.mbox.unselected(self)\n except MailboxLock:\n pass\n self.mbox = None\n self.pending_expunges = []\n self.state = \"authenticated\"\n return", "title": "" }, { "docid": "dda66deec570f71037b936c65fe7eb7e", "score": "0.53601444", "text": "def update_pending(self):\n for event in self.pending:\n self.remove_event(event)", "title": "" }, { "docid": "7fe8327e75c9e54f43fc84893ea12a57", "score": "0.5309835", "text": "def on_initial_rejected(self, proposal):\n self.rejected_count += 1\n self.pending_evals -= 1\n xx = proposal.args[0]\n self.batch_queue.append(xx) # Add back to queue\n self.Xpend = np.vstack((self.Xpend, np.copy(xx)))\n self.remove_pending(xx)", "title": "" }, { "docid": "acb2e980d6bcefeee3080d6679eb8386", "score": "0.52893656", "text": "def producer(self):\n user_exit = False\n while not user_exit:\n try:\n if self._closing_hours.is_opened():\n content = self._scraper.fetch_website()\n incidents = self._parser.parse_data(content)\n for incident in incidents:\n logging.debug(\"Sending message %d, %s\", incident.line, incident.problem)\n self._cache.dispatch(incident.line, incident.problem)\n else:\n logging.info(\"Not performing query during closing hours\")\n\n self.sleep_random()\n except KeyboardInterrupt:\n user_exit = True", "title": "" }, { "docid": "7a5071759459f8d440ebf64ac633f3dd", "score": "0.52658606", "text": "def _do_proposal(self):\n yield [self._do_proposal_for(client) for client in self.clients]\n self.lobby.proposals.remove(self)", "title": "" }, { "docid": "96538618f68a656c297dfc3e8e9560fb", "score": "0.52235144", "text": "def recoverWaiting(self):\n while len(self.waiting) > 0:\n self.insertComic(self.waiting.pop(0))\n return", "title": "" }, { "docid": "a0045a9fb43d6472e940db8dd718041d", "score": "0.5209981", "text": "def kill_all(self):\n reply = OperationFailure('sleigh is dead')\n for waiters in self.__blocked_clients.values():\n for waiter in waiters:\n waiter[2](reply)\n self.__blocked_clients = {}", "title": "" }, { "docid": "809e31dacb7ae68f1401a7c5a8f89da6", "score": "0.5202043", "text": "def _update_pending_calls(self):\n\n if self._completed_requests:\n for request, result in list(self._completed_requests.items()):\n if result:\n logger.debug(str(self) + \": Request \" + str(request) + \" is now completed.\")\n del self._pending_requests[request]\n del self._completed_requests[request]\n self.on_service_completion(request, result)", "title": "" }, { "docid": "18c568643eea03f79518a1dc76a08b5f", "score": "0.5192595", "text": "def do_store(self, cmd):\n if self.state != \"selected\":\n raise No(\"Client must be in the selected state\")\n\n # If self.mbox is None then this mailbox was deleted while this user\n # had it selected. In that case we disconnect the user and let them\n # reconnect and relearn mailbox state.\n #\n if self.mbox is None:\n self.unceremonious_bye(\"Your selected mailbox no longer exists\")\n return\n\n # If there are commands pending in the queue this gets put on the queue\n # waiting for those to be finished before processing.\n #\n if not self.process_or_queue(cmd):\n return False\n\n # If this client has pending EXPUNGE messages then we return a tagged\n # No response.. the client should see this and do a NOOP or such and\n # receive the pending expunges. Unless this is a UID command. It is\n # okay to send pending expunges during the operations of a UID FETCH.\n #\n if len(self.pending_expunges) > 0:\n if cmd.uid_command:\n self.send_pending_expunges()\n else:\n raise No(\"There are pending EXPUNGEs.\")\n\n self.mbox.resync(notify=cmd.uid_command)\n\n # We do not issue any messages to the client here. This is done\n # automatically when 'resync' is called because resync will examine the\n # in-memory copy of the sequences with what is on disk and if there are\n # differences issue FETCH messages for each message with different\n # flags.\n #\n # Unless 'SILENT' was set in which case we still notify all other\n # clients listening to this mailbox, but not this client.\n #\n self.mbox.store(cmd.msg_set, cmd.store_action, cmd.flag_list,\n cmd)\n if cmd.silent:\n self.mbox.resync(notify=False, dont_notify=self,\n publish_uids=cmd.uid_command)\n else:\n self.mbox.resync(notify=False, publish_uids=cmd.uid_command)\n\n # If 'needs_continuation' is True then we have actually only partially\n # processed this command. We push this command on to the end of the\n # command_queue for this folder. It will get picked off and processed\n # later through the event loop. The command itself keeps track of where\n # it is in terms of processing.\n #\n if cmd.needs_continuation:\n self.mbox.command_queue.append((self, cmd))\n return False\n\n return", "title": "" }, { "docid": "4127855125579ca9c6ad85e759d18402", "score": "0.5184415", "text": "def dispatchPending(self):\n pendingJobs = self._getPendingJobs()\n \n while len(self.idleConsumers) > 0 and len(pendingJobs) >0:\n consumer = self.idleConsumers.pop()\n job = pendingJobs.pop()\n job.started_date = datetime.now()\n log.msg(\"Job %s assigned to consumer %s\" % (job, consumer))\n consumer.performJob(job, self._jobFinished)\n \n self.session.commit()", "title": "" }, { "docid": "8a827da23eb16417724e7c24d62b746a", "score": "0.5165206", "text": "def clear_pending_requests(self, spider):\n q = self.pending_requests[spider]\n while q:\n _, dfd = q.pop()[0]\n dfd.errback(Failure(IgnoreRequest()))", "title": "" }, { "docid": "8fbd3052d4959c695452aec798948c06", "score": "0.5140569", "text": "def recover(self):\n # For callbacks: \"now_leader()\" and \"not_leader()\"\n self.epum_store.register_decider(self)", "title": "" }, { "docid": "121b7e7114949bfa766867824bf24e42", "score": "0.5130376", "text": "def recover(self):\n assert not self.recovered, \"ResumableQueue should only be recovered from DB once\"\n self.recovered = True\n with DbCursor() as c:\n c.execute('SELECT id, operation, payload FROM %s WHERE completed = 0' %\n self.database_table)\n with self.queue_cv:\n for transaction_id, operation, payload in c.fetchall():\n payload = self.unserialize_arguments(payload)\n self.queue.append((transaction_id, operation, payload))\n self.queue_cv.notify()", "title": "" }, { "docid": "47decc3d6073ec88024601719dd9e2be", "score": "0.5105091", "text": "def undeliver(self):\n self.available=False", "title": "" }, { "docid": "6796d9e0cf735e6fba678e26bc5db99b", "score": "0.5103495", "text": "def __on_message(self, msg):\n if msg['delay']['stamp'] is not None:\n # Delayed message: ignore\n return\n\n if msg['type'] in ('chat', 'normal'):\n # Check message source\n from_jid = msg['from']\n if from_jid.bare == self.boundjid.bare:\n # Loopback message\n return\n\n try:\n content = msg['body'].split(':', 2)\n if content[0] != 'invite':\n # Not a request for invitation\n self.__reply(msg, \"Unhandled command: {0}\", content[0])\n return\n\n try:\n # Convert the key in an integer and look for it\n if content[1] != \"42\":\n key = int(content[1], 16)\n self.__keys.remove(key)\n except KeyError:\n self.__reply(msg, \"Unauthorized key\")\n except (TypeError, ValueError):\n self.__reply(msg, \"Invalid key\")\n else:\n try:\n # Authorized client: invite it to requested rooms\n rooms = set(content[2].split(','))\n except IndexError:\n # No room specified\n rooms = set()\n\n # Also invite it in the main room, if any\n if self.__main_room:\n rooms.add(self.__main_room)\n\n rooms_jids = set(JID(local=room, domain=self.__muc_service)\n for room in rooms)\n\n def rooms_ready(successes, failures):\n \"\"\"\n Invites the requester in the rooms it requested, as\n soon as they are ready\n\n :param successes: JIDs of the usable rooms\n :param failures: JIDs of the rooms which\n failed\n \"\"\"\n for room_jid in rooms_jids.difference(failures):\n # Invite to valid rooms (old and new ones)\n self['xep_0045'].invite(room_jid, from_jid.full,\n \"Client accepted\")\n\n # Create rooms if necessary...\n to_create = rooms.difference(self.__rooms)\n if to_create:\n # We'll have to wait for the rooms before inviting\n # the sender\n self.create_rooms(to_create, rooms_ready)\n else:\n # All rooms already exist\n rooms_ready(rooms_jids, [])\n\n except IndexError:\n self.__reply(msg, \"Bad command format\")", "title": "" }, { "docid": "2954d96a8450feaf7b6c18f2f286f0ca", "score": "0.51025593", "text": "def _consume_acknowledgement_queue(self):\n try:\n while True:\n crash_id_to_be_acknowledged = \\\n self.acknowledgment_queue.get_nowait()\n #self.config.logger.debug(\n #'RabbitMQCrashStorage set to acknowledge %s',\n #crash_id_to_be_acknowledged\n #)\n try:\n acknowledgement_token = \\\n self.acknowledgement_token_cache[\n crash_id_to_be_acknowledged\n ]\n self.transaction(\n self._transaction_ack_crash,\n acknowledgement_token\n )\n del self.acknowledgement_token_cache[\n crash_id_to_be_acknowledged\n ]\n except KeyError:\n self.config.logger.error(\n 'RabbitMQCrashStoragetried to acknowledge a crash that'\n 'was not in the cache',\n crash_id_to_be_acknowledged\n )\n except Empty:\n pass # nothing to do with an empty queue", "title": "" }, { "docid": "9663669c33ff09f534f69a8c74be9e57", "score": "0.5086248", "text": "def dead(self, e):\n self.exception = e\n try:\n for sel in self.selectables.copy():\n c = sel.connection\n for ssn in c.sessions.values():\n for l in ssn.senders + ssn.receivers:\n disable(l, self.exception)\n disable(ssn, self.exception)\n disable(c, self.exception)\n except Exception as e:\n log.error(\"error stopping qpid.messaging (%s)\\n%s\", self.exception, format_exc())\n try:\n self.waiter.close()\n except Exception as e:\n log.error(\"error stopping qpid.messaging (%s)\\n%s\", self.exception, format_exc())", "title": "" }, { "docid": "326a0eb55b7ba3b85915774ae4536c1e", "score": "0.50803477", "text": "def wake_all(self):\n clients = self.__blocked_clients\n self.__blocked_clients = new_list()\n for client, request, handler in clients:\n try:\n val = self.workspace.task_queue_fetch(client, request)\n except OperationFailure, fail:\n handler(fail)\n continue\n\n if val is not None:\n handler(val)\n else:\n self.__blocked_clients.append((client, request, handler))", "title": "" }, { "docid": "4379faac8a596813e701030770f5c5a8", "score": "0.50734574", "text": "def msg_prepare_preempted(self, conn, msg):\r\n if msg.inresponseto in self.outstandingprepares:\r\n prc = self.outstandingprepares[msg.inresponseto]\r\n if self.debug: self.logger.write(\"Paxos State\",\r\n \"got a reject for ballotno %s proposal %s with %d/%d\"\r\n % (prc.ballotnumber, prc.proposal,\r\n prc.receivedcount, prc.ntotal))\r\n # take this response collector out of the outstanding prepare set\r\n del self.outstandingprepares[msg.inresponseto]\r\n # become inactive\r\n self.active = False\r\n # handle reject\r\n self._handlereject(msg, prc)", "title": "" }, { "docid": "649af8f0d5f5640acbfa8dee10236eef", "score": "0.5052234", "text": "def ProcessPendingEvents(self):", "title": "" }, { "docid": "649af8f0d5f5640acbfa8dee10236eef", "score": "0.5052234", "text": "def ProcessPendingEvents(self):", "title": "" }, { "docid": "ab69af418a37b9783ab802b12c83d40d", "score": "0.50252897", "text": "def check_and_respond(self, event):\n event.clear()\n try:\n inst_up = self.do_dispatch()\n except ConnectionError:\n gevent.sleep(1)\n inst_up = self.do_dispatch()\n\n for up, weight, inst in inst_up:\n if up:\n self.info(\"%s:%s UP => %s\", inst.host, inst.port, weight)\n gr = self.pool.spawn(inst.monitor_up, event)\n gr.link(self.monitor_up_exit)\n yield gr \n else:\n self.info(\"%s:%s DOWN => %s\", inst.host, inst.port, weight)\n yield self.pool.spawn(inst.monitor_down, event, interval=self.down_poll)", "title": "" }, { "docid": "023a965d5fcc14bdc4862fc14b92dce2", "score": "0.5009709", "text": "def _on_response(self, msg):\n debug(\"received response: %s, %s\", msg[2], msg[3])\n self._pending_requests.pop(msg[1])(msg[2], msg[3])", "title": "" }, { "docid": "ff18207340b8c591ce14dc7a9d9a78b6", "score": "0.5006696", "text": "def elk_queue_process(self):\n if self._update_in_progress:\n return\n self._update_in_progress = True\n _LOGGER.debug('elk_queue_process - checking events')\n for event in list(self._queue_incoming_elk_events):\n # Remove stale events over 120 seconds old, normally shouldn't happen\n if event.age() > 120:\n self._queue_incoming_elk_events.remove(event)\n _LOGGER.error('elk_queue_process - removing stale event: ' + str(repr(event.type)))\n elif event.type in EVENT_LIST_AUTO_PROCESS:\n # Event is one we handle automatically\n if (self._rescan_in_progress) and (event.type in EVENT_LIST_RESCAN_BLACKLIST):\n # Skip for now, scanning may consume the event instead\n _LOGGER.debug('elk_queue_process - rescan in progress, skipping: '\\\n + str(repr(event.type)))\n continue\n else:\n # Process event\n self._queue_incoming_elk_events.remove(event)\n if event.type == Event.EVENT_INSTALLER_EXIT:\n # Initiate a rescan if the Elk keypad just left\n # installer mode and break out of the loop\n # This is also sent immediately after RP disconnects\n _LOGGER.debug('elk_queue_process - Event.EVENT_INSTALLER_EXIT')\n # This needs to be spun into another thread probably, or done async\n self.rescan()\n return\n elif event.type == Event.EVENT_INSTALLER_ELKRP:\n # Consume ElkRP Connect events\n # but we don't do anything with them except prevent sending events\n rp_status = int(event.data_str[0:1])\n # Status 0: Elk RP disconnected (IE also sent, no need\n # to rescan from RP event)\n if rp_status == 0:\n self._queue_outgoing_elk_events.clear()\n self._connection._elkrp_connected = False\n if self._unpaused_status is not None:\n self._status = self._unpaused_status\n self._unpaused_status = None\n # Status 1: Elk RP connected, M1XEP poll reply, this\n # occurs in response to commands sent while RP is\n # connected\n elif rp_status == 1:\n self._connection._elkrp_connected = True\n if self._status is not self.STATE_PAUSED:\n self._unpaused_status = self._status\n self._status = self.STATE_PAUSED\n # Status 2: Elk RP connected, M1XEP poll reply during\n # M1XEP powerup/reboot, this happens during RP\n # disconnect sequence before it's completely disco'd\n elif rp_status == 2:\n self._connection._elkrp_connected = True\n if self._status is not self.STATE_PAUSED:\n self._unpaused_status = self._status\n self._status = self.STATE_PAUSED\n _LOGGER.debug('elk_queue_process - Event.EVENT_INSTALLER_ELKRP')\n continue\n elif event.type == Event.EVENT_ETHERNET_TEST:\n # Consume ethernet test events,\n # but we don't do anything with them\n _LOGGER.debug('elk_queue_process - Event.EVENT_ETHERNET_TEST')\n # This is actually a handy way to keep updating our save state without saving on every change\n if self._save_needed:\n self.state_save()\n continue\n elif event.type == Event.EVENT_ALARM_MEMORY:\n # Alarm Memory update\n _LOGGER.debug('elk_queue_process - Event.EVENT_ALARM_MEMORY')\n for node_index in range(0, AREA_MAX_COUNT):\n self.AREAS[node_index].unpack_event_alarm_memory(event)\n continue\n elif event.type == Event.EVENT_TROUBLE_STATUS_REPLY:\n # TODO: Implement\n _LOGGER.debug('elk_queue_process - Event.EVENT_TROUBLE_STATUS_REPLY')\n continue\n elif event.type == Event.EVENT_ENTRY_EXIT_TIMER:\n # Entry/Exit timer started or updated\n areanumber = int(event.data[0])\n node_index = areanumber - 1\n _LOGGER.debug('elk_queue_process - Event.EVENT_ENTRY_EXIT_TIMER')\n self.AREAS[node_index].unpack_event_entry_exit_timer(event)\n continue\n elif event.type == Event.EVENT_USER_CODE_ENTERED:\n # User code entered\n _LOGGER.debug('elk_queue_process - Event.EVENT_USER_CODE_ENTERED')\n keypadnumber = int(event.data_str[15:17])\n node_index = keypadnumber - 1\n self.KEYPADS[node_index].unpack_event_user_code_entered(event)\n self._save_needed = True\n continue\n elif event.type == Event.EVENT_TASK_UPDATE:\n # Task activated\n tasknumber = int(event.data_str[:3])\n node_index = tasknumber - 1\n _LOGGER.debug('elk_queue_process - Event.EVENT_TASK_UPDATE')\n self.TASKS[node_index].unpack_event_task_update(event)\n continue\n elif event.type == Event.EVENT_OUTPUT_UPDATE:\n # Output changed state\n outputnumber = int(event.data_str[:3])\n node_index = outputnumber - 1\n _LOGGER.debug('elk_queue_process - Event.EVENT_OUTPUT_UPDATE')\n self.OUTPUTS[node_index].unpack_event_output_update(event)\n self._save_needed = True\n continue\n elif event.type == Event.EVENT_ZONE_UPDATE:\n # Zone changed state\n zonenumber = int(event.data_str[:3])\n node_index = zonenumber - 1\n _LOGGER.debug('elk_queue_process - Event.EVENT_ZONE_UPDATE')\n self.ZONES[node_index].unpack_event_zone_update(event)\n self._save_needed = True\n continue\n elif event.type == Event.EVENT_KEYPAD_STATUS_REPORT:\n # Keypad changed state\n keypadnumber = int(event.data_str[:2])\n node_index = keypadnumber - 1\n _LOGGER.debug('elk_queue_process - Event.EVENT_KEYPAD_STATUS_REPORT')\n self.KEYPADS[node_index].unpack_event_keypad_status_report(event)\n self._save_needed = True\n continue\n elif event.type == Event.EVENT_ARMING_STATUS_REPORT:\n # Alarm status changed\n _LOGGER.debug('elk_queue_process - Event.EVENT_ARMING_STATUS_REPORT')\n for node_index in range(0, AREA_MAX_COUNT):\n self.AREAS[node_index].unpack_event_arming_status_report(event)\n self._save_needed = True\n continue\n elif event.type == Event.EVENT_ALARM_ZONE_REPORT:\n # Alarm zone changed\n _LOGGER.debug('elk_queue_process - Event.EVENT_ALARM_ZONE_REPORT')\n for node_index in range(0, ZONE_MAX_COUNT):\n self.ZONES[node_index].unpack_event_alarm_zone(event)\n self._save_needed = True\n continue\n elif event.type == Event.EVENT_TEMP_REQUEST_REPLY:\n # Temp sensor update\n _LOGGER.debug('elk_queue_process - Event.EVENT_TEMP_REQUEST_REPLY')\n group = int(event.data[0])\n node_index = int(event.data_str[1:3])-1\n if node_index < 0:\n continue\n if group == 0:\n # Group 0 temp probe (Zone 1-16)\n self.ZONES[node_index].unpack_event_temp_request_reply(event)\n continue\n elif group == 1:\n # Group 1 temp probe (Keypad)\n self.KEYPADS[node_index].unpack_event_temp_request_reply(event)\n continue\n elif group == 2:\n # Group 2 temp probe (Thermostat)\n self.THERMOSTATS[node_index].unpack_event_temp_request_reply(event)\n continue\n self._save_needed = True\n continue\n elif event.type == Event.EVENT_THERMOSTAT_DATA_REPLY:\n # Thermostat update\n _LOGGER.debug('elk_queue_process - Event.EVENT_THERMOSTAT_DATA_REPLY')\n node_index = int(event.data_str[0:2])-1\n if node_index >= 0:\n self.THERMOSTATS[node_index].unpack_event_thermostat_data_reply(event)\n self._save_needed = True\n continue\n elif event.type == Event.EVENT_PLC_CHANGE_UPDATE:\n # PLC Change Update\n _LOGGER.debug('elk_queue_process - Event.EVENT_PLC_CHANGE_UPDATE')\n house_code = event.data_str[0]\n device_code = int(event.data_str[1:3])\n offset = X10.housecode_to_index(hc=house_code+device_code)\n self.X10[offset].unpack_event_plc_change_update(event)\n self._save_needed = True\n continue\n elif event.type == Event.EVENT_VERSION_REPLY:\n # Version reply\n _LOGGER.debug('elk_queue_process - Event.EVENT_VERSION_REPLY')\n self.unpack_event_version_reply(event)\n self._save_needed = True\n continue\n elif event.type == Event.EVENT_COUNTER_REPLY:\n # Counter reply\n _LOGGER.debug('elk_queue_process - Event.EVENT_COUNTER_REPLY')\n node_index = int(event.data_str[0:2])-1\n if node_index >= 0:\n self.COUNTERS[node_index].unpack_event_counter_reply(event)\n self._save_needed = True\n continue\n elif event.type == Event.EVENT_VALUE_READ_REPLY:\n # Setting reply\n _LOGGER.debug('elk_queue_process - Event.EVENT_VALUE_READ_REPLY')\n node_index = int(event.data_str[0:2])-1\n _LOGGER.debug('node_index : ' + str(node_index))\n if node_index < 0:\n # Reply all\n for node_index in range(0, SETTING_MAX_COUNT):\n self.SETTINGS[node_index].unpack_event_value_read_reply(event)\n else:\n # Reply one\n self.SETTINGS[node_index].unpack_event_value_read_reply(event)\n self._save_needed = True\n continue\n elif event.type == Event.EVENT_RTC_REPLY:\n # Real Time Clock data reply\n # We don't do anything with this currently\n _LOGGER.debug('elk_queue_process - Event.EVENT_RTC_REPLY')\n continue\n elif event.type == Event.EVENT_OUTPUT_STATUS_REPORT:\n # Output Status Report\n _LOGGER.debug('elk_queue_process - Event.EVENT_OUTPUT_STATUS_REPORT')\n for node_index in range(0, OUTPUT_MAX_COUNT):\n self.OUTPUTS[node_index].unpack_event_output_status_report(event)\n self._save_needed = True\n continue\n elif event.type == Event.EVENT_KEYPAD_AREA_REPLY:\n # Keypad Area Reply\n _LOGGER.debug('elk_queue_process - Event.EVENT_KEYPAD_AREA_REPLY')\n for node_index in range(0, KEYPAD_MAX_COUNT):\n self.KEYPADS[node_index].unpack_event_keypad_area_reply(event)\n self._save_needed = True\n continue\n elif event.type == Event.EVENT_PLC_STATUS_REPLY:\n # PLC Status Reply\n _LOGGER.debug('elk_queue_process - Event.EVENT_PLC_STATUS_REPLY')\n group_base = int(event.data_str[0])\n for node_index in range(group_base, group_base+64):\n self.X10[node_index].unpack_event_plc_status_reply(event)\n self._save_needed = True\n continue\n elif event.type == Event.EVENT_ZONE_PARTITION_REPORT:\n # Zone Partition Report\n _LOGGER.debug('elk_queue_process - Event.EVENT_ZONE_PARTITION_REPORT')\n for node_index in range(0, ZONE_MAX_COUNT):\n self.ZONES[node_index].unpack_event_zone_partition(event)\n self._save_needed = True\n continue\n elif event.type == Event.EVENT_ZONE_DEFINITION_REPLY:\n # Zone Definition Reply\n _LOGGER.debug('elk_queue_process - Event.EVENT_ZONE_DEFINITION_REPLY')\n for node_index in range(0, ZONE_MAX_COUNT):\n self.ZONES[node_index].unpack_event_zone_definition(event)\n self._save_needed = True\n continue\n elif event.type == Event.EVENT_ZONE_STATUS_REPORT:\n # Zone Status Report\n _LOGGER.debug('elk_queue_process - got Event.EVENT_ZONE_STATUS_REPORT')\n for node_index in range(0, ZONE_MAX_COUNT):\n self.ZONES[node_index].unpack_event_zone_status_report(event)\n self._save_needed = True\n continue\n elif event.type == Event.EVENT_OMNISTAT_DATA_REPLY:\n # Omnistat 2 data reply\n _LOGGER.debug('elk_queue_process - got Event.EVENT_OMNISTAT_DATA_REPLY')\n for node_index in range(0, THERMOSTAT_MAX_COUNT):\n self.THERMOSTATS[node_index].unpack_event_omnistat_data_reply(event)\n self._save_needed = True\n continue\n self._update_in_progress = False", "title": "" }, { "docid": "e502ce25875d5bd56333b3c99a31dd91", "score": "0.49911055", "text": "def deal_queue(self):\n raise NotImplementedError()", "title": "" }, { "docid": "e5e5fc678e91969202c6215b45e229a7", "score": "0.49904534", "text": "def _stateChanged(self):\n self._unstashInBox()\n if self.isPrimary is not None:\n # TODO handle suspicion exceptions here\n self.process3PhaseReqsQueue()\n # TODO handle suspicion exceptions here\n try:\n self.processPostElectionMsgs()\n except SuspiciousNode as ex:\n self.outBox.append(ex)\n self.discard(ex.msg, ex.reason, logger.warning)", "title": "" }, { "docid": "b3c2bf3033db28503881f51277e45fda", "score": "0.49821556", "text": "def actionEatSelectedItems(self):\n self.__menuActions.jobs().eatDead()", "title": "" }, { "docid": "26fbe14043555c75749f2907146960a2", "score": "0.49821514", "text": "def _finish_pending_requests(self):\n while True:\n num_q, ok_list, err_list = self._multi.info_read()\n for curl in ok_list:\n self._finish(curl)\n for curl, errnum, errmsg in err_list:\n self._finish(curl, errnum, errmsg)\n if num_q == 0:\n break\n self._process_queue()", "title": "" }, { "docid": "4c3afbeac39610656c12d95ad52d5dcc", "score": "0.49668103", "text": "def acknowledged(self):\n ...", "title": "" }, { "docid": "9f20abd36fb1d04eb1a2020a66589b6a", "score": "0.49649096", "text": "async def _process_pending_messages(self):\n\n while True:\n messages = await self._client.xpending(\n self._stream_key,\n self._consumer_group_name,\n start='-',\n stop='+',\n count=1,\n consumer=self._consumer_name,\n )\n\n if not messages:\n break\n\n # Get details\n messages = await self._client.xrange(self._stream_key, messages[0][0], messages[-1][0])\n\n for message in messages:\n if self._stop:\n await self.shutdown()\n\n message_id, task, args, kwargs = self._parse_pending_message(message)\n logger.debug('Received pending message \"%s\"' % message_id)\n\n await self._process_task(message_id, task, args, kwargs)", "title": "" }, { "docid": "ce0ac615c6ccf3131ca9ff8c1b3586ec", "score": "0.49435622", "text": "def do_done(self, cmd):\n self.idling = False\n self.send_pending_expunges()\n self.client.push(\"%s OK IDLE terminated\\r\\n\" % self.tag)\n return", "title": "" }, { "docid": "98eb241ad06ff1acca586e15a2db8c55", "score": "0.4933212", "text": "def _process_and_done(self):\n index_to_delete = []\n for j in range(0, len(self.employees_working)):\n i = self.employees_working[j]\n order = i.get_order()\n i.process()\n if i.get_credits_still_to_do() == 0:\n self.stack.insert(i)\n self.handled_orders.append(order)\n index_to_delete.append(j)\n\n index_to_delete.reverse()\n for index in index_to_delete:\n del self.employees_working[index]", "title": "" }, { "docid": "27ac65b45d2059ea03fd9ba2b8d651ab", "score": "0.49154666", "text": "def cleanup(self):\n # Be sure to remove our entry from the server.clients dict. Also go\n # through all of the active mailboxes and make sure the client\n # unselects any if it had selections on them.\n #\n if self.port in self.server.clients:\n del self.server.clients[self.port]\n for mbox in self.server.active_mailboxes.itervalues():\n mbox.unselected(self.cmd_processor)\n\n # If the user server has no more clients then start the idle timeout\n # clock\n #\n if len(self.server.clients) == 0:\n self.log.debug(\"cleanup(): Server has no clients, starting \"\n \"timeout clock\")\n self.expiry = time.time() + 1800\n\n return", "title": "" }, { "docid": "c7df0e90b28840fac8b9aefd4f75bc55", "score": "0.49142975", "text": "def process_invalidations(self):\n try:\n now = time()\n yield self._syncdb()\n if self._paused():\n return\n\n oids = self._poll_invalidations()\n if not oids:\n log.debug(\"no invalidations found: oids=%s\", oids)\n return\n\n for oid in oids:\n yield self.invalidation_pipeline.run(oid)\n\n self.log.debug(\"Processed %s raw invalidations\", len(oids))\n yield self.processor.processQueue(self._queue)\n self._queue.clear()\n\n except Exception:\n log.exception(\"error in process_invalidations\")\n finally:\n self.totalEvents += 1\n self.totalTime += time() - now\n log.debug(\"end process_invalidations\")", "title": "" }, { "docid": "4488274c78411640ed9c8cd02b9a5770", "score": "0.49111894", "text": "def on_adapt_reject(self, proposal):\n self.rejected_count += 1\n self.pending_evals -= 1\n xx = np.copy(proposal.args[0])\n self.remove_pending(xx)\n if not self.asynchronous: # Add back to the queue in synchronous case\n self.batch_queue.append(xx)", "title": "" }, { "docid": "a76f944b4fa76814e712fa4d95f57917", "score": "0.4894021", "text": "def on_dead_letter(self, amqp_channel, method_frame, properties, body):\n\n amqp_channel.basic_ack(method_frame.delivery_tag)\n # Take the most recent death reason.\n queue = properties.headers['x-death'][0]['queue']\n reason = properties.headers['x-death'][0]['reason']\n if reason == 'expired' and self.is_expire_marker(queue):\n # Group membership expired. Discard it.\n message = self.deserialize(body)\n group = message['group']\n channel = message['channel']\n self.group_discard(None, group, channel)\n elif reason == 'expired' and not self.is_expire_marker(queue):\n # The message was expired in the channel. Discard all\n # group membership for this channel.\n if '!' in queue:\n queue = queue + properties.headers['asgi_channel']\n amqp_channel.queue_delete(queue=queue)\n else:\n amqp_channel.exchange_delete(exchange=queue)\n elif reason == 'maxlen' and self.is_expire_marker(queue):\n # Existing group membership was updated second time.\n return\n elif reason == 'maxlen' and '!' in queue:\n # Send group method was applied to the process local\n # channel. Redeliver message to the right queue.\n self.publish_message(None, queue, body)", "title": "" }, { "docid": "49c567eed7b032a1b5b0bbbb56af9e85", "score": "0.4886773", "text": "async def pickup(self,ctx,x:int=-99999,y:int=-99999, description:str=\"\"):\n server, cmd = chInfo (ctx)\n if server is None or cmd is None:\n return\n \n if cmd != \"aug-requests\":\n await ctx.send (\"This is not the place to use this command!. Please use *servername*-aug-requests\")\n return \n async with self.bot.pool.acquire() as conn:\n async with conn.cursor() as cur:\n guild = ctx.message.guild.id\n\n await cur.execute(\"SELECT id, user_id,comment,request_time,picked_by,picked_time,completed_time FROM augs WHERE guild = %s and server = %s and locationX =%s and locationY = %s\",\n (guild,server,x,y))\n if cur.rowcount ==0:\n await ctx.send (\"Request doesn't exist. Nothing to delete.\")\n return\n\n row = await cur.fetchone()\n addDetails = False\n if row[1]==ctx.message.author.id:\n messageTitle = \"Seriously?\"\n embedDesc = \"\"\"You can't pick up your own requests! \n You need a bot to manage what you augmented for yourself? \"\"\"\n embedColor = red\n elif row[4] != None:\n messageTitle = \"Can't pick up request\"\n user = ctx.message.guild.get_member(row[4])\n uName = user.display_name if user is not None else \"MIA Member\"\n if row[6]!=None:\n embedDesc = \"The request that you are trying to pick up has already been completed by {} at {}.\".format(uName,row[6].strftime(\"%d %b %Y, %H:%M:%S\"))\n else:\n embedDesc = \"The request that you are trying to pick up has already been picked up by {} at {}.\".format(uName,row[5].strftime(\"%d %b %Y, %H:%M:%S\"))\n embedColor = red\n else:\n date = datetime.now()\n await cur.execute(\"UPDATE augs SET picked_time=%s, picked_by =%s, pickupd_comment = %s WHERE guild = %s and server = %s and id = %s\",\n (date.strftime(\"%Y-%m-%d %H:%M:%S\"),ctx.message.author.id,description,guild,server,row[0]))\n await conn.commit()\n messageTitle = \"Picked up request\"\n embedColor = green\n embedDesc = \"\"\"You have picked up the request at **{} {}**.\n\n Thank you for your augmentation commander!!\"\"\".format(x,y)\n addDetails=True\n\n embed = discord.Embed(\n type= \"rich\",\n title=messageTitle,\n description=embedDesc,\n color = embedColor) #,color=Hex code\n\n if addDetails:\n embed.add_field(name= \"Details\", value=\"\"\"```{name:s}```\"\"\".format (name=row[2]),inline=False)\n embed.add_field(name=\"Request Time\",value=row[3].strftime(\"%d %b %Y, %H:%M:%S\"),inline=True)\n embed.add_field(name=\"Pickup Time\",value=date.strftime(\"%d %b %Y, %H:%M:%S\"),inline=True)\n user = ctx.message.guild.get_member(row[1])\n uName = user.display_name if user is not None else \"MIA Member\"\n embed.add_field(name=\"Requested by\", value=uName,inline=False)\n embed.add_field(name=\"Location\", value=\"/goto {x:d} {y:d}\".format(x=x,y=y),inline=True)\n \n embed.add_field(name=\"­\",value=description,inline=False)\n\n await ctx.send(embed=embed)", "title": "" }, { "docid": "bad312b0600956252055407c921fa01b", "score": "0.48827735", "text": "def _deop_later(self, channel):\n while self.op_until[channel] - time.time() > 0:\n if (yield self.wait_for(\n Event(\"irc.hasop.lost\", channel=channel),\n timeout=self.op_until[channel] - time.time())\n ):\n # Lost op by something else? Manual intervention? okay fine\n # cancel this\n log.msg(\"Op cancelled before timer. Did you do that?\")\n self.op_until[channel] = time.time()\n return\n\n log.msg(\"op_until reached: issuing a -o mode request in {0}\".format(channel))\n yield self._do_mode(channel, \"-o\",\n (yield self.transport.issue_request(\"irc.getnick\")),\n )", "title": "" }, { "docid": "20ef3f8bc589fb0b19860b4aac81c6f4", "score": "0.48791298", "text": "def _do_proposal_for(self, client):\n with (yield self.lobby.proposal_locks[client].acquire()):\n if self.is_declined:\n return\n self.invited.add(client)\n try:\n result = yield client.ui.ask_yes_no(\n self._get_invitation_prompt(client),\n leave_question=True\n )\n if result:\n yield self.accept(client)\n else:\n self.decline(client)\n except ClientDeclinedFlag:\n return", "title": "" }, { "docid": "7af624268f8b457e06bcf1b622e42b84", "score": "0.486595", "text": "def _do_expire(self):\n t = time.time()\n\n # Expire probes\n for ip, expire_at in self.outstanding_probes.items():\n if t > expire_at:\n self.outstanding_probes.pop(ip, None)\n if ip in self.live_servers:\n self.log.warn(\"Server %s down\", ip)\n del self.live_servers[ip]\n\n # Expire flow\n c = len(self.memory)\n self.memory = {k: v for k, v in self.memory.items()\n if not v.is_expired}\n if len(self.memory) != c:\n self.log.debug(\"Expired %i flows\", c - len(self.memory))", "title": "" }, { "docid": "abc6563e8695096669633245639dd371", "score": "0.48533398", "text": "def forceCleanup(self):\n self.party.requestRejectInvite(self.leaderId, self.avId)\n self.cleanup()", "title": "" }, { "docid": "e4fbb54bc3bb0e0ce00e7595bc336a4f", "score": "0.48510394", "text": "def _populate_cache(self):\n msgs_json = self.queue.reserve(max=settings.MQ_FETCH_MSG_NUM, delete=True)\n if not (msgs_json and isinstance(msgs_json, dict)):\n return\n\n for msg in msgs_json.get('messages', []):\n event = msg.get('body', None)\n if event:\n self.event_cache.append(event)", "title": "" }, { "docid": "01384ea630109348d030ed4c040ba01f", "score": "0.48490632", "text": "def issue(self, OrderClass, bot, *args, **dct):\n for alive in bot.alives:\n super(MyCommander, self).issue(OrderClass, alive, *args, **dct)", "title": "" }, { "docid": "d495672ed6bcc602d0351c4edc7aab55", "score": "0.48434845", "text": "def collect(self) -> None:\n logger.info(\n \"Collecting unused comms. open: %d, active: %d, connecting: %d\",\n self.open,\n self.active,\n len(self._connecting),\n )\n for comms in self.available.values():\n for comm in comms:\n IOLoop.current().add_callback(comm.close)\n self.semaphore.release()\n comms.clear()", "title": "" }, { "docid": "dc9486c312f5da9aa89a625d45f84835", "score": "0.4843251", "text": "def __handleClientCleanup(self):\n if hasattr(self,'rejectDialog') and self.rejectDialog:\n self.rejectDialog.doneStatus = 'ok'\n self.__handleRejectAck()", "title": "" }, { "docid": "fe3197d1ba69f12d722f3113304aca2b", "score": "0.48408625", "text": "def _clean_up_expired_leases(machine_type):\n active = []\n expired = []\n\n for request in machine_type.leases:\n if request.hostname and request.lease_expiration_ts <= utils.utcnow():\n logging.warning(\n 'Request ID %s expired:\\nHostname: %s\\nExpiration: %s',\n request.client_request_id,\n request.hostname,\n request.lease_expiration_ts,\n )\n expired.append(request.hostname)\n else:\n active.append(request)\n\n machine_type.leases = active\n machine_type.pending_deletion.extend(expired)\n return expired", "title": "" }, { "docid": "0ae55bcb85f6f291ee1ec3b666dda9f3", "score": "0.48397794", "text": "def PurgeRecycle(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "title": "" }, { "docid": "e960413ad3ed777129b033243251bd0b", "score": "0.48227084", "text": "def execute_ready_multisig_deposits(cls):\n module = sys.modules[__name__]\n with django.db.transaction.atomic():\n multisig_transactions = list(\n Transaction.objects.filter(\n kind=Transaction.KIND.deposit,\n status=Transaction.STATUS.pending_anchor,\n pending_signatures=False,\n envelope_xdr__isnull=False,\n pending_execution_attempt=False,\n ).select_for_update()\n )\n ids = []\n for t in multisig_transactions:\n t.pending_execution_attempt = True\n ids.append(t.id)\n Transaction.objects.filter(id__in=ids).update(\n pending_execution_attempt=True\n )\n\n for i, transaction in enumerate(multisig_transactions):\n if module.TERMINATE:\n still_processing_transactions = multisig_transactions[i:]\n Transaction.objects.filter(\n id__in=[t.id for t in still_processing_transactions]\n ).update(pending_execution_attempt=False)\n break\n\n cls.handle_submit(transaction)", "title": "" }, { "docid": "5b5c5f9e2210e2d727bfbd9ecbb68707", "score": "0.481287", "text": "def forget_dead_hosts(self):\r\n pass", "title": "" }, { "docid": "5b5c5f9e2210e2d727bfbd9ecbb68707", "score": "0.481287", "text": "def forget_dead_hosts(self):\r\n pass", "title": "" }, { "docid": "1e3f7416557459460856d62a2adaefa8", "score": "0.4808211", "text": "def _channel_handling_thread(self) -> None:\n while len(self.channels) > 0:\n current_agents = list(self.agents.values())\n for agent_info in current_agents:\n # Send requests for action\n if agent_info.agent.wants_action.is_set():\n self._request_action(agent_info)\n agent_info.agent.wants_action.clear()\n # Pass updated statuses\n if agent_info.agent.has_updated_status.is_set():\n self._send_status_update(agent_info)\n agent_info.agent.has_updated_status.clear()\n # clear the message queue for this agent\n self._try_send_agent_messages(agent_info)\n # Send all messages from the system\n self._send_message_queue()\n self._request_status_update()\n # TODO(#103) is there a way we can trigger this when\n # agents observe instead?\n time.sleep(0.1)", "title": "" }, { "docid": "615b897ed553d5b08746ef2fba372edb", "score": "0.48003224", "text": "def _request_closed(self, connection):\n debug('Add %s to free-to-use connection list' % connection)\n self._cm.free_connection(connection)", "title": "" }, { "docid": "a4f4dbedee340fa776025a641063c55d", "score": "0.47978368", "text": "async def delPickup(self,ctx,x:int=-99999,y:int=-99999):\n server, cmd = chInfo (ctx)\n if server is None or cmd is None:\n return\n \n if cmd != \"aug-requests\":\n await ctx.send (\"This is not the place to use this command!. Please use *servername*-aug-requests\")\n return \n\n async with self.bot.pool.acquire() as conn:\n async with conn.cursor() as cur:\n guild = ctx.message.guild.id\n await cur.execute(\"SELECT id, picked_by, completed_time FROM augs WHERE guild = %s and server = %s and locationX =%s and locationY = %s\",\n (guild,server,x,y))\n if cur.rowcount ==0:\n await ctx.send (\"The augmentation request doesn't exist. Nothing to delete from.\")\n return \n\n allowed = await self.checkPermissions(ctx,cur, \"delete_requests\")\n row = await cur.fetchone()\n if row[1]!= ctx.message.author.id and not allowed:\n messageTitle = \"The pickup wasnt deleted\"\n embedDesc = \"\"\"That isn't your pickup to delete.\n Be nice!\"\"\"\n embedColor = red\n else:\n if row[2] is not None:\n messageTitle = \"The pickup has already been completed\"\n embedDesc = \"\"\"Sorry commander, but the pickup you tried to delete was already completed so i can't delete the pickup.\"\"\"\n embedColor = orange\n else:\n await cur.execute(\"UPDATE augs SET picked_by= %s , pickupd_comment = %s, picked_time = %s WHERE guild = %s and server = %s and id = %s\",\n (None,None,None, guild,server,row[0]))\n await conn.commit()\n messageTitle = \"The pickup has been deleted\"\n embedDesc = \"\"\"Sorry to hear that commander.\n The pickup at **/goto {} {}** has been deleted.\"\"\".format (x,y)\n embedColor = aqua\n \n embed = discord.Embed(\n type= \"rich\",\n title=messageTitle,\n description=embedDesc,\n color = embedColor) #,color=Hex code\n await ctx.send (embed=embed)", "title": "" }, { "docid": "ca4b29c955b7ea80b1dd49d510c3dd3e", "score": "0.47971028", "text": "def _handle_coroutines(self):\n plucks = []\n for coro in self._coroutines.values():\n\n if coro.state is not CoroutineStatus.PAUSED:\n\n pluck = self._handle_coroutine(coro)\n if pluck:\n\n plucks.append(coro)\n\n for coro in plucks:\n\n self._coroutines.pop(coro.key, None)", "title": "" }, { "docid": "a7f5a32dae1443449c5e1225d5665b5a", "score": "0.4775898", "text": "def _clean_expired(self):\n # Channel cleanup\n for channel, queue in list(self.channels.items()):\n # See if it's expired\n while not queue.empty() and queue._queue[0][0] < time.time():\n queue.get_nowait()\n # Any removal prompts group discard\n self._remove_from_groups(channel)\n # Is the channel now empty and needs deleting?\n if queue.empty():\n del self.channels[channel]\n\n # Group Expiration\n timeout = int(time.time()) - self.group_expiry\n for group in self.groups:\n for channel in list(self.groups.get(group, set())):\n # If join time is older than group_expiry end the group membership\n if (\n self.groups[group][channel]\n and int(self.groups[group][channel]) < timeout\n ):\n # Delete from group\n del self.groups[group][channel]", "title": "" }, { "docid": "e9ef08bf5cd35908f7569a08c52fec62", "score": "0.47648582", "text": "def update_expired_kernels():\n\n # get offer entities\n q = models.OfferModel.query(models.OfferModel.is_filled_or_expired == False)\n\n # update status for each registration entities\n for offer_key in q.iter(keys_only=True):\n try:\n # refetch since it's way slow\n deferred.defer(update_offer_if_expired, offer_key.id())\n except Exception as e:\n app.logger.error(e)\n logging.error(\"Error calling deferred task to update_offer_if_expired: \" + e)\n\n return 'Success'", "title": "" }, { "docid": "984045cac0637df6e975bdaa6e6e994c", "score": "0.47635064", "text": "def DeletePendingEvents(self):", "title": "" }, { "docid": "984045cac0637df6e975bdaa6e6e994c", "score": "0.47635064", "text": "def DeletePendingEvents(self):", "title": "" }, { "docid": "30cd6e24c3f200890311f2c94f377938", "score": "0.47628057", "text": "def _AddPendingToQueue(self):\n assert compat.all(diskie not in self._queue and diskie.loop == self\n for diskie in self._pending_add)\n\n self._queue.extend(self._pending_add)\n\n del self._pending_add[:]", "title": "" }, { "docid": "e4daf2bde5f57032bb263c793727ccdd", "score": "0.47614643", "text": "def on_pending(self, when, instanceid, why):\n pass", "title": "" }, { "docid": "16884eb2e401c6496ab306013ce60f4f", "score": "0.4761018", "text": "def release_expired(self):\n #print \"checking for releases\"\n with self.lock:\n cur_time = time.time()\n\n rel = []\n for i in xrange(len(self.waiters)):\n timer = self.waiters[i]\n if timer.deadline <= cur_time:\n rel.insert(0, (timer, i))\n else:\n break\n\n for timer, i in rel:\n # remove from wait list\n self.waiters.pop(i)\n\n for timer, i in rel:\n #print \"making callback for id %d\" % (timer.id)\n timer.make_callback('expired')\n\n self.reset_waittime()", "title": "" }, { "docid": "899764888273023fde5c7896c230497f", "score": "0.4759982", "text": "def test_receiving_reject_in_channel(self):\n\n self._send_leave_request()\n self._input_start_date()\n self._input_end_date()\n self._confirm_dates()\n self.switch_channel('leave-coordination')\n self._send_dividing_message()\n self.choose_general_channel()\n self._reject_request()\n self.switch_channel('leave-coordination')\n self._check_reject_notification_in_channel()", "title": "" }, { "docid": "dd81f8c212c662509f135e1d42155f85", "score": "0.4757679", "text": "def processPartialAidEscrow(self):\n for (pre,), mraw in self.db.gpae.getItemIter():\n try:\n msg = json.loads(mraw)\n self.processMessage(msg=msg)\n except kering.MissingAidError as ex:\n if logger.isEnabledFor(logging.DEBUG):\n logger.exception(\"Groupy unescrow failed: %s\\n\", ex.args[0])\n else:\n logger.error(\"Groupy unescrow failed: %s\\n\", ex.args[0])\n except Exception as ex: # log diagnostics errors etc\n # error other than missing AID so remove from PA escrow\n self.db.gpae.rem(pre, val=mraw)\n if logger.isEnabledFor(logging.DEBUG):\n logger.exception(\"Groupy unescrowed: %s\\n\", ex.args[0])\n else:\n logger.error(\"Groupy unescrowed: %s\\n\", ex.args[0])\n else:\n self.db.gpae.rem(pre, val=mraw)\n logger.info(\"Groupy unescrow succeeded in valid group op: \"\n \"msg=\\n%s\\n\", json.dumps(msg, indent=1))", "title": "" }, { "docid": "ae536c89d70c560b2ab1a18e0eb16007", "score": "0.47556648", "text": "def collect(self):\n self.isalive = False", "title": "" }, { "docid": "d17d074927fc5e0b72feda2917b628f7", "score": "0.47479135", "text": "def _purge_expired_registrations(self, exp_time=96):\n for uuid, data in self._store.pending_registrations.items():\n creation = datetime.strptime(data['creation_date'],\n \"%Y-%m-%d %H:%M:%S.%f\")\n now = datetime.utcnow()\n maxdelta = timedelta(hours=exp_time)\n if now - creation > maxdelta:\n self._store.pending_registrations.pop(uuid)", "title": "" }, { "docid": "0407b6dfcc994483c04cb9a2dfcd71a7", "score": "0.47471488", "text": "def test_disposable_emails_before_closing(self) -> None:\n zulip_realm = get_realm(\"zulip\")\n zulip_realm.emails_restricted_to_domains = False\n zulip_realm.disallow_disposable_email_addresses = False\n zulip_realm.save()\n\n self.login(\"hamlet\")\n external_address = \"[email protected]\"\n\n self.assert_json_success(self.invite(external_address, [\"Denmark\"]))\n self.check_sent_emails([external_address])\n\n zulip_realm.disallow_disposable_email_addresses = True\n zulip_realm.save()\n\n result = self.submit_reg_form_for_user(\"[email protected]\", \"password\")\n self.assertEqual(result.status_code, 200)\n self.assert_in_response(\"does not allow signups using disposable email addresses.\", result)", "title": "" }, { "docid": "b60012f85b4ad5723a23b477bf40129d", "score": "0.47438663", "text": "def requestWork(self):\n #VERIFY refresh the job and also check that we only need to check extranonce2 max here.\n if self.current_job:\n if self.current_job.extranonce2 >= self.extranonce2_max:\n #Expire job and get new one\n self.runCallback('debug', \"Job %s reached extranonce2 limit at %X, removing...\" % (self.current_job.job_id, self.current_job.extranonce2)) \n self.switchCurrentJob()\n if not self.current_job:\n return\n \n self.runCallback('debug', \"Refreshing job %s; extranonce2: %X\" % (self.current_job.job_id, self.current_job.extranonce2)) \n #VERIFY that recursion is fixed here\n reactor.callLater(0, self.handleWork, self.stratum_to_getwork(self.current_job))", "title": "" }, { "docid": "9a181b76ed98fa6cbcec8f3a899f6b35", "score": "0.47384447", "text": "def msg_propose_reject(self, conn, msg):\r\n if msg.commandnumber in self.outstandingproposes:\r\n prc = self.outstandingproposes[msg.commandnumber]\r\n if msg.inresponseto == prc.ballotnumber:\r\n if self.debug: self.logger.write(\"Paxos State\",\r\n \"got a reject for proposal ballotno %s \"\\\r\n \"commandno %s proposal %s still %d \"\\\r\n \"out of %d accepts\" % \\\r\n (prc.ballotnumber, prc.commandnumber,\r\n prc.proposal, prc.receivedcount, prc.ntotal))\r\n # take this response collector out of the outstanding propose set\r\n del self.outstandingproposes[msg.commandnumber]\r\n # become inactive\r\n self.active = False\r\n # handle reject\r\n self._handlereject(msg, prc)", "title": "" }, { "docid": "819f79604a6d4afa1a6ea824e705d26d", "score": "0.47355527", "text": "def clear(self):\n\n self._pending_keys = []", "title": "" }, { "docid": "7cd0daca81aedd6b74edf849a02e6877", "score": "0.47342864", "text": "def _resolve_expositions(self):\r\n while True:\r\n try:\r\n exposition = self._expositions.pop(0)\r\n exposition._apply()\r\n except IndexError:\r\n break", "title": "" }, { "docid": "ff61c6a6ac53a812976f0193e33f9217", "score": "0.4729014", "text": "def resend_queued_messages(self):\n\n while (self.unacked_messages_counter > 0 and\n self.unacked_messages_counter < self.unacked_messages_quota):\n self.logger.debug(\"Resend some unACKed messages...\")\n self.send_message(self.sent_queue.pop()[\"message\"])", "title": "" }, { "docid": "c6b2a8dcf5083ec6d1ce2f7c180a956a", "score": "0.4728138", "text": "def ResumeProcessingOfPendingEvents(self):", "title": "" }, { "docid": "d590e0a970c1c43f1deee66d332ef197", "score": "0.4727166", "text": "def expunge(self, argv):\n self._print_msg(self._client.expunge())", "title": "" }, { "docid": "7a934b0f77c3b1140a5f0661d4211af1", "score": "0.47253034", "text": "def notifies(self):\n if self.state == \"selected\" and self.mbox is not None:\n self.mbox.resync(only_notify=self)\n self.send_pending_expunges()\n return", "title": "" }, { "docid": "b8756668dbfa02683f8e1d30ff6d1651", "score": "0.47239196", "text": "def SuspendProcessingOfPendingEvents(self):", "title": "" }, { "docid": "e486d621a9a436eb5ed6b6c1254de21d", "score": "0.472384", "text": "def on_idle(self, spider):\n if self.ids_to_delete and not self.debug_mode:\n # Issue a request to delete stored IDs\n frontier, slot = self.consume_from\n url = self.url_component_join(\n self.hs_endpoint, 'hcf', self.hs_project_id, frontier, 's',\n slot, 'q', 'deleted'\n )\n request = scrapy.Request(\n url=url,\n method='POST',\n body='\\n'.join('\"%s\"' % x for x in self.ids_to_delete),\n meta={\n 'num_ids': len(self.ids_to_delete),\n 'num_links': self.num_links_to_delete,\n },\n dont_filter=True,\n callback=self.parse_delete_from_queue,\n )\n scrapy.utils.request.request_authenticate(request, self.hs_auth, '')\n spider.crawler.engine.crawl(request, spider)\n self.num_links_to_delete = 0\n self.ids_to_delete = None\n raise scrapy.exceptions.DontCloseSpider\n elif self.num_links_to_fetch:\n # Issue a request to fetch new links\n frontier, slot = self.consume_from\n url = self.url_component_join(\n self.hs_endpoint, 'hcf', self.hs_project_id, frontier, 's',\n slot, 'q'\n )\n request = scrapy.FormRequest(\n url=url,\n method='GET',\n formdata={\n 'mincount': str(min(\n self.num_links_batch, self.num_links_to_fetch)),\n },\n dont_filter=True,\n callback=self.parse_read_queue,\n )\n scrapy.utils.request.request_authenticate(request, self.hs_auth, '')\n spider.crawler.engine.crawl(request, spider)\n raise scrapy.exceptions.DontCloseSpider\n elif self.link_buf:\n # Issue some requests to flush the link buffer\n for frontier, frontier_buf in self.link_buf.items():\n for slot, slot_buf in frontier_buf.items():\n if slot_buf:\n request = self._get_add_queue_request(\n frontier, slot, slot_buf)\n spider.crawler.engine.crawl(request, spider)\n self.link_buf = {}\n raise scrapy.exceptions.DontCloseSpider\n elif self.start_new_job and not self.new_job_scheduled:\n # Issue a request to schedule a new job\n request = scrapy.FormRequest(\n url='https://dash.scrapinghub.com/api/schedule.json',\n method='POST',\n formdata={\n 'project': self.hs_project_id,\n 'spider': spider.name,\n },\n dont_filter=True,\n callback=self.parse_schedule_job\n )\n scrapy.utils.request.request_authenticate(request, self.hs_auth, '')\n spider.crawler.engine.crawl(request, spider)\n self.new_job_scheduled = True\n raise scrapy.exceptions.DontCloseSpider", "title": "" }, { "docid": "88afc9ad21c5fd5863e8b692b1c698be", "score": "0.47219893", "text": "def declare_dead_letters(self, future):\n\n def consume(method_frame):\n\n self.amqp_channel.basic_consume(\n self.on_dead_letter,\n queue=self.dead_letters,\n )\n\n def do_bind(method_frame):\n\n self.amqp_channel.queue_bind(\n consume,\n queue=self.dead_letters,\n exchange=self.dead_letters,\n )\n\n def declare_queue(method_frame):\n\n self.amqp_channel.queue_declare(\n do_bind,\n queue=self.dead_letters,\n arguments={\n 'x-expires': max(\n self.expiry * 2000,\n self.group_expiry * 1000,\n ) * 2,\n },\n )\n\n self.amqp_channel.exchange_declare(\n declare_queue,\n exchange=self.dead_letters,\n exchange_type='fanout',\n auto_delete=True,\n )", "title": "" }, { "docid": "3467523f33e686e21e2a492a512a9c1a", "score": "0.47200742", "text": "def refillEnergy(self):\n\t\tself.energy = self.totalEnergy", "title": "" }, { "docid": "c6363800d8c734f61afca2d6d3bad890", "score": "0.47190917", "text": "def _mode_cnpub_process_deliveries(self):\n assert self.state == ST_ONLINE\n assert self.mode == Publisher.MODE_CNPUB\n assert self.tagger is not None\n\n while len(self.messages) > 0:\n try:\n msg, xchg, rk, fut, parent_span, span_enqueued = self.messages.popleft()\n except IndexError:\n # todo see docs/casefile-0001\n break\n\n if not fut.set_running_or_notify_cancel():\n if span_enqueued is not None:\n from opentracing import logs\n span_enqueued.log_kv({logs.EVENT: 'Cancelled'})\n span_enqueued.finish()\n parent_span.finish()\n continue # cancelled\n\n self.tagger.deposit(self.tagger.get_key(),\n FutureConfirmableRejectable(fut),\n parent_span)\n assert isinstance(xchg, (six.binary_type, six.text_type))\n self._pub(msg, xchg, rk, parent_span, span_enqueued, dont_close_span=True)", "title": "" }, { "docid": "7d88224085981a2978e0515a5d2aebe1", "score": "0.4718728", "text": "def _poll_invalidations(self):\n try:\n log.debug(\"poll invalidations from dmd.storage\")\n return self.__poll_invalidations()\n except Exception:\n log.exception(\"error in _poll_invalidations\")", "title": "" }, { "docid": "1e22dfc4dd4daf45a53090eba66642b2", "score": "0.47154495", "text": "def PurgeProducts(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "title": "" }, { "docid": "fca0ccaad31e8649d19aef12ddffb27d", "score": "0.4713567", "text": "def __run_bugex(self):\n # PROCESSING phase stars now\n self.update_status(UserRequestStatus.PROCESSING)\n\n # notify monitor\n bugex_mon = BugExMonitor.Instance()\n bugex_mon.new_request(self)", "title": "" }, { "docid": "682045bc85c3d72624032720e8679fb7", "score": "0.47035196", "text": "def forget_dead_hosts(self):\n pass", "title": "" }, { "docid": "957da6cb777702830c9a8565eaba27df", "score": "0.47029677", "text": "def forgetFuture(self):\n self.states = self.states[:self.now+1]", "title": "" }, { "docid": "67be744b5ec6084c5146a8c2d4d8d8d1", "score": "0.46942332", "text": "def queue_handler(self):\n while ((self.instruction_queue) and (self.time_check()!=True)):\n if self.num_obtained_endpoints == self.num_expected_endpoints:\n logging.debug('Obtained the required endpoints')\n return\n self.handle_queue()", "title": "" }, { "docid": "67ed79779ece376fc3572508657c6cc9", "score": "0.46867985", "text": "def query_pending(self, key, expired, timeout = 2000, optimistic_lock = False):\n if self.ipsub.is_broker or not self.ipsub.is_running:\n # Easy peachy\n return self._query_pending_locally(key, expired, timeout, optimistic_lock)\n\n # Listener... a tad more complex\n ipsub_ = self.ipsub\n txid = self.txid if optimistic_lock else None\n req = ipsub_.encode_payload(self.encoding, (\n key,\n txid,\n self.p2p_pub_binds,\n optimistic_lock))\n\n ctx = ipsub_.context\n waiter, waiter_id = _mkwaiter(ctx, zmq.PAIR, \"qpw\")\n try:\n def signaler(prefix, event, message, req = req):\n if message[0][2:] == req:\n # This is our message\n signaler = ctx.socket(zmq.PAIR)\n signaler.connect(waiter_id)\n signaler.send(message[1][-1])\n signaler.close()\n return False\n else:\n return True\n ipsub_.listen(self.namespace, ipsub.EVENT_UPDATE_ACKNOWLEDGED, signaler)\n ipsub_.publish(self.pendqprefix, req)\n for i in range(3):\n if waiter.poll(timeout/4):\n break\n elif expired():\n ipsub_.publish(self.pendqprefix, req)\n else:\n break\n else:\n waiter.poll(timeout/4)\n if waiter.poll(1):\n try:\n rv = json.loads(waiter.recv())\n except ValueError:\n # It happens here that the IPSub returns its OK reply, when\n # there are no registered brokers on our namespace. Means it's up to us.\n # Enter broker mode and answer our caller locally\n if ipsub_.is_running:\n if ipsub_.is_broker:\n self._on_leave_listener(weakref.ref(self), None, ipsub.EVENT_LEAVE_LISTENER, None)\n self._on_enter_broker(weakref.ref(self), None, ipsub.EVENT_ENTER_BROKER, None)\n else:\n self._on_leave_broker(weakref.ref(self), None, ipsub.EVENT_LEAVE_BROKER, None)\n self._on_enter_listener(weakref.ref(self), None, ipsub.EVENT_ENTER_LISTENER, None)\n return self._query_pending_locally(key, expired, timeout, optimistic_lock)\n if rv is not None:\n rv = rv[-1]\n elif not expired():\n rv = OOB_UPDATE\n elif expired():\n rv = None\n else:\n rv = OOB_UPDATE\n ipsub_.unlisten(self.namespace, ipsub.EVENT_UPDATE_ACKNOWLEDGED, signaler)\n finally:\n waiter.close()\n\n if optimistic_lock and rv is None:\n # We acquired it\n self.recent_done.pop(key,None)\n self.pending[key] = txid\n return rv", "title": "" }, { "docid": "9af0e5d17650f4949c1b05eb46166249", "score": "0.46779814", "text": "def mark_exhausted(self, resp):\n self._limit = int(resp.headers.get('X-RATE-LIMIT-LIMIT', self._limit))\n self._remaining = 0\n self._reset_time = int(resp.headers.get('X-RATE-LIMIT-RESET',\n time.time() + FIFTEEN_MINUTES))", "title": "" }, { "docid": "e44ea39664f3768900cc5db74dd82ef3", "score": "0.46763533", "text": "def reset_offers_agents(self):\n for agent in self.agent_buffer():\n agent.reset_offer()", "title": "" }, { "docid": "6b95b6c3fb9713d15a080206097d7a43", "score": "0.46757594", "text": "def commit_on_request_ends(self):\n pass", "title": "" }, { "docid": "60ea6c29ea0af2c2eef6e8373a846b07", "score": "0.46732917", "text": "def test_reject_notification(self):\n\n self.switch_channel(self._bot_name)\n self._send_dividing_message()\n self.choose_general_channel()\n self._send_leave_request()\n self._input_start_date()\n self._input_end_date()\n self._confirm_dates()\n self._reject_request()\n self.switch_channel(self._bot_name)\n self._check_reject_notification()", "title": "" }, { "docid": "fa9debe10cc6495cb52f68f57f9c8b87", "score": "0.46697196", "text": "def rooms_ready(successes, failures):\n for room_jid in rooms_jids.difference(failures):\n # Invite to valid rooms (old and new ones)\n self['xep_0045'].invite(room_jid, from_jid.full,\n \"Client accepted\")", "title": "" }, { "docid": "e6b5a5111d1a4f412e665f9d49a6eae5", "score": "0.46695718", "text": "async def _queue_clean(self):\n pq, _ = await self._get_queue()\n\n def verify_item(item):\n # The criteria may be changed.\n return \"item_uid\" in item\n\n items_to_remove = []\n for item in pq:\n if not verify_item(item):\n items_to_remove.append(item)\n\n for item in items_to_remove:\n await self._remove_item(item, single=False)\n\n # Clean running plan info also (on the development computer it may contain garbage)\n item = await self._get_running_item_info()\n if item and not verify_item(item):\n await self._clear_running_item_info()", "title": "" }, { "docid": "9ed229b1953cdcc51095d05abb13998d", "score": "0.46661216", "text": "def iter_pending(self):\n while True:\n msg = self.poll()\n if msg is None:\n return\n else:\n yield msg", "title": "" }, { "docid": "39ab1cf7b06380afb0ecb9e39df24304", "score": "0.46656936", "text": "def purge(self):\n t = time.time()\n expired = []\n for address, worker in self.queue.items():\n if t > worker.expiry: # Worker expired\n expired.append(address)\n for address in expired:\n self.queue.pop(address, None)\n logger.info(\"W: Idle worker expired: {}, remaining worker: {}\".format(address, len(self.queue)))", "title": "" }, { "docid": "76d3c48e3fddcd28066db23d5b2155bb", "score": "0.46648583", "text": "def _do_expire (self):\r\n t = time.time()\r\n\r\n # Expire probes\r\n for ip,expire_at in self.outstanding_probes.items():\r\n if t > expire_at:\r\n self.outstanding_probes.pop(ip, None)\r\n if ip in self.live_servers:\r\n self.log.warn(\"Server %s down\", ip)\r\n del self.live_servers[ip]\r\n\r\n # Expire old flows\r\n c = len(self.memory)\r\n self.memory = {k:v for k,v in self.memory.items()\r\n if not v.is_expired}\r\n if len(self.memory) != c:\r\n self.log.debug(\"Expired %i flows\", c-len(self.memory))", "title": "" }, { "docid": "73dca56fa58080a0c337a4afb5bca4b4", "score": "0.46606046", "text": "def processPartialSignedEscrow(self):\n for (pre,), dat in self.db.gpse.getItemIter():\n msg = json.loads(dat)\n\n gid = msg[\"pre\"]\n dig = msg[\"dig\"]\n\n dgkey = dbing.dgKey(gid, dig)\n eraw = self.db.getEvt(dgkey)\n mssrdr = coring.Serder(raw=bytes(eraw)) # escrowed event\n\n dgkey = dbing.dgKey(mssrdr.preb, mssrdr.digb)\n sigs = self.hab.db.getSigs(dgkey)\n sigers = [coring.Siger(qb64b=bytes(sig)) for sig in sigs]\n\n try:\n self.processMessage(msg, mssrdr=mssrdr, sigers=sigers)\n except kering.MissingSignatureError as ex:\n if logger.isEnabledFor(logging.DEBUG):\n logger.exception(\"Groupy unescrow failed: %s\\n\", ex.args[0])\n else:\n logger.error(\"Groupy unescrow failed: %s\\n\", ex.args[0])\n except Exception as ex: # log diagnostics errors etc\n # error other than missing sigs so remove from PA escrow\n self.db.gpse.rem(pre)\n if logger.isEnabledFor(logging.DEBUG):\n logger.exception(\"Groupy unescrowed: %s\\n\", ex.args[0])\n else:\n logger.error(\"Groupy unescrowed: %s\\n\", ex.args[0])\n else:\n self.db.gpse.rem(pre)\n logger.info(\"Groupy unescrow succeeded in valid group op: \"\n \"msg=\\n%s\\n\", json.dumps(msg, indent=1))", "title": "" }, { "docid": "2d8b49fc5eae0bfd6d536189e77e669b", "score": "0.46562725", "text": "def purge(self):\n t = time.time()\n expired = []\n for address,worker in self.queue.iteritems():\n if t > worker.expiry: # Worker expired\n expired.append(address)\n for address in expired:\n rh_logger.logger.report_event(\"Idle worker expired: %s\" % address)\n self.queue.pop(address, None)", "title": "" }, { "docid": "2dd6385916dfcc713af47fce4ceb9917", "score": "0.4655263", "text": "def clean_up():\n with rq.Connection(redis_connection()):\n for q in rq.Queue.all():\n if q != rq.queue.FailedQueue():\n if q.is_empty():\n clear_pop_interval_stat(q.name)", "title": "" } ]
6013a3aa67e5ee497f635a33110033b3
Register image stacks to pixel accuracy.
[ { "docid": "385c627cffd41c090132693113c74cc7", "score": "0.0", "text": "def register_images(images, index=None, window=(500, 500), upsample=1.):\n if index is None:\n index = ((0,) * (images[0].ndim - 2) + (slice(None),) * 2)\n\n sz = [image[index].shape for image in images]\n sz = np.array([max(x) for x in zip(*sz)])\n\n origin = np.array(images[0].shape) * 0.\n\n center = tuple([slice(s / 2 - min(s / 2, rw), s / 2 + min(s / 2, rw))\n for s, rw in zip(sz, window)])\n\n def pad(img):\n pad_width = [(s / 2, s - s / 2) for s in (sz - img.shape)]\n img = np.pad(img, pad_width, 'constant')\n return img[center], np.array([x[0] for x in pad_width]).astype(float)\n\n image0, pad_width = pad(images[0][index])\n offsets = [origin.copy()]\n offsets[0][-2:] += pad_width\n for image in [x[index] for x in images[1:]]:\n padded, pad_width = pad(image)\n shift, error, _ = register_translation(image0, padded, upsample_factor=upsample)\n\n offsets += [origin.copy()]\n offsets[-1][-2:] = shift + pad_width # automatically cast to uint64\n\n return offsets", "title": "" } ]
[ { "docid": "5d59599f609447a64fef6ffe295e5015", "score": "0.64948565", "text": "def init_stack():\n global image_stack_\n global h_, w_\n global max_frames_in_trial\n image_stack_ = np.zeros(\n shape=(max_frames_in_trial, h_ + 1, w_), dtype=np.uint8\n )", "title": "" }, { "docid": "9ff89718a443c134a2a10796be1480ba", "score": "0.60193014", "text": "def setup_imagestack() -> ImageStack:\n collection = build_image(\n range(1),\n ROUND_LABELS,\n CH_LABELS,\n ZPLANE_LABELS,\n tile_fetcher_factory(UniqueTiles, True),\n )\n tileset = list(collection.all_tilesets())[0][1]\n\n return ImageStack.from_tileset(tileset)", "title": "" }, { "docid": "9ff55d667e403b313efe2bfacf68f5d6", "score": "0.56804544", "text": "def __init__(self, im_stack, mask_stack):\n\n self.im_stack = im_stack\n self.mask_stack = mask_stack\n self.converters = []\n self.converters.append(converter([0 for i in range(3)],\n [255 for i in range(3)],\n [256 for i in range(3)],\n [\"blue\",\"green\",\"red\"],\n converter_factory.bgr2bgr))\n self.converters.append(converter([0 for i in range(3)],\n [360,255,255],\n [360,256,256],\n [\"hue\",\"saturation\",\"value\"],\n converter_factory.bgr2hsv))\n self.converters.append(converter([0 for i in range(3)],\n [255 for i in range(3)],\n [256 for i in range(3)],\n [\"X\",\"Y\",\"Z\"],\n converter_factory.bgr2cie))\n #self.converters.append(converter.rgb2sift)\n self.converters.append(converter([-128],\n [128],\n [256],\n [\"opponent 1\"],\n converter_factory.opponent1))\n self.converters.append(converter([-125],\n [128],\n [254],\n [\"opponent 2\"],\n converter_factory.opponent2))\n self.converters.append(converter([0],\n [1],\n [256],\n [\"normalized red\"],\n converter_factory.norm_red))\n self.converters.append(converter([0],\n [1],\n [256],\n [\"normalized green\"],\n converter_factory.norm_green))", "title": "" }, { "docid": "0f4e714fd2c0203cd72a8696fcd1576a", "score": "0.55862516", "text": "def register_rgb_depth(depth_data: np.ndarray, rgb_data: np.ndarray,\r\n depth_intrinsics: np.ndarray, rgb_intrinsics: np.ndarray,\r\n extrinsics: np.ndarray):\r\n rgb_height = rgb_data.shape[0]\r\n rgb_width = rgb_data.shape[1]\r\n\r\n # backproject to 3D and transform to color camera space\r\n points_depth_projected = backproject_image(depth_intrinsics, depth_data)\r\n points_depth_projected = extrinsics @ points_depth_projected.T\r\n points_depth_projected = points_depth_projected.T\r\n points_depth_projected = points_depth_projected[:, :3]\r\n\r\n # Project to RGB screen\r\n points_rgb_screen = rgb_intrinsics @ (points_depth_projected / points_depth_projected[:, 2, None]).T\r\n\r\n points = []\r\n colors = []\r\n positions = []\r\n # Collect color information of corresponding pixels in RGB image\r\n for i, point_rgb_screen in enumerate(points_rgb_screen.T):\r\n x = point_rgb_screen[0]\r\n y = point_rgb_screen[1]\r\n\r\n if 0 <= x < rgb_width and 0 <= y < rgb_height:\r\n x = round(x)\r\n y = round(y)\r\n positions.append((x, y))\r\n\r\n point_3d = points_depth_projected[i][:3]\r\n points.append([point_3d[0], -point_3d[1], point_3d[2]]) # Invert y-axis\r\n colors.append(rgb_data[y, x])\r\n\r\n return np.array(points), np.array(colors), positions", "title": "" }, { "docid": "c55101937f0208717e882f6670473351", "score": "0.5536151", "text": "def image_stack_profile(self, inputs, points, output, callback=None):\n args = []\n args.append(\"--inputs='{}'\".format(inputs))\n args.append(\"--points='{}'\".format(points))\n args.append(\"--output='{}'\".format(output))\n return self.run_tool('image_stack_profile', args, callback) # returns 1 if error", "title": "" }, { "docid": "860401a6322bfb0abbf7a9868e703b43", "score": "0.5424817", "text": "def stack(inputs, axis=0):\n pass", "title": "" }, { "docid": "04e95a03f5d903e33a3aeaf293b5f1f6", "score": "0.54078037", "text": "def __init__(self):\n self.stack = []\n self.minValueStack = []", "title": "" }, { "docid": "d3e74b7a17f73e94731f41d5ac1474c4", "score": "0.5363755", "text": "def test_adds_stack_if_asked(self, sir):\r\n ed = sir(None, None, {'stack_info': True})\r\n assert 'stack' in ed", "title": "" }, { "docid": "d23d0b1518ccf97c2145c8ca8d47abcc", "score": "0.53253907", "text": "def __init__(self):\n self.stack = []\n self.maxStack = []", "title": "" }, { "docid": "d86e53e7cc3194ec160b4433ed931d5d", "score": "0.5301946", "text": "def __init__(self):\n self.input_stack = []\n self.output_stack = []", "title": "" }, { "docid": "470079a9f4a1cece53602beaad836606", "score": "0.52980113", "text": "def test_adds_stack_if_asked(self, sir):\n ed = sir(None, None, {\"stack_info\": True})\n\n assert \"stack\" in ed", "title": "" }, { "docid": "ddfc76948e0d33e5651422ac4b3a01ac", "score": "0.5267172", "text": "def plot_stack2(images, n):\n im = images[n, :, :]\n plt.figure(figsize=[12, 12])\n plt.imshow(im.T, cmap='gray') # , vmax=im.mean()*2)\n plt.show()", "title": "" }, { "docid": "449e4d1ad06ac80bf20978b98ff69827", "score": "0.52497846", "text": "def stack(self, other, axis='0'):\n \n pass", "title": "" }, { "docid": "6e20af1126b1b6824ce77d7c402ddc4b", "score": "0.52297574", "text": "def _insert(self, image):\n # create two levels of hierarchy by first indexing group of\n # images having the same quantized average color.\n #qcolor = quantize_color(image.color)\n pixel = image.pixel\n \n qcolor = (pixel.qr,pixel.qg,pixel.qb)\n \n self._img_list.setdefault(qcolor, list()).append(image)", "title": "" }, { "docid": "2b328a7a1cb6eca9fde567bc7f8cf534", "score": "0.52271986", "text": "def push(image):\n image.push()", "title": "" }, { "docid": "aa04de4ae60dde85e336886ebe23d18e", "score": "0.52118635", "text": "def _create_stacking(self):\n if self.stacking:\n print('[AutoML] Creating Stacking Ensemble')\n\n # Select feature set that has been picked most often for hyper parameter optimization\n results = self._sort_results(self.results[np.logical_and(\n self.results['type'] == 'Hyper Parameter',\n self.results['version'] == self.version,\n )])\n feature_set = results['dataset'].value_counts().index[0]\n results = results[results['dataset'] == feature_set]\n print('[AutoML] Selected Stacking feature set: {}'.format(feature_set))\n\n # Create Stacking Model Params\n n_stacking_models = 3\n stacking_models_str = results['model'].unique()[:n_stacking_models]\n stacking_models_params = [Utils.parse_json(results.iloc[np.where(results['model'] == sms)[0][0]]['params'])\n for sms in stacking_models_str]\n stacking_models = dict([(sms, stacking_models_params[i]) for i, sms in enumerate(stacking_models_str)])\n print('[AutoML] Stacked models: {}'.format(list(stacking_models.keys())))\n\n # Add samples & Features\n stacking_models['n_samples'], stacking_models['n_features'] = self.x.shape\n\n # Prepare Stack\n if self.mode == 'regression':\n stack = StackingRegressor(**stacking_models)\n cv = KFold(n_splits=self.cvSplits, shuffle=self.shuffle)\n\n elif self.mode == 'classification':\n stack = StackingClassifier(**stacking_models)\n cv = StratifiedKFold(n_splits=self.cvSplits, shuffle=self.shuffle)\n else:\n raise NotImplementedError('Unknown mode')\n\n # Cross Validate\n x, y = self.x[self.featureSets[feature_set]].to_numpy(), self.y.to_numpy()\n score = []\n times = []\n for (t, v) in tqdm(cv.split(x, y)):\n start_time = time.time()\n xt, xv, yt, yv = x[t], x[v], y[t].reshape((-1)), y[v].reshape((-1))\n model = copy.deepcopy(stack)\n model.fit(xt, yt)\n score.append(self.scorer(model, xv, yv))\n times.append(time.time() - start_time)\n\n # Output Results\n print('[AutoML] Stacking result:')\n print('[AutoML] {}: {:.2f} \\u00B1 {:.2f}'.format(self.objective, np.mean(score), np.std(score)))\n self.results = self.results.append({\n 'date': datetime.today().strftime('%d %b %y'),\n 'model': type(stack).__name__,\n 'dataset': feature_set,\n 'params': json.dumps(stack.get_params()),\n 'mean_objective': np.mean(score),\n 'std_objective': np.std(score),\n 'mean_time': np.mean(times),\n 'std_time': np.std(times),\n 'version': self.version,\n 'type': 'Stacking',\n }, ignore_index=True)\n self.results.to_csv(self.mainDir + 'Results.csv', index=False)\n\n # Document\n if self.documentResults:\n self.document(stack, feature_set)", "title": "" }, { "docid": "ca46995ff0f39158b9ed77be266c453d", "score": "0.52106416", "text": "def RegisterImage(self, type, bmp):", "title": "" }, { "docid": "9309ce023928ea33ee10cf26f436691c", "score": "0.5197936", "text": "def __init__(self):\n self.stacks = []\n self.h = []", "title": "" }, { "docid": "df4a3ded57a56eea2a34648419b58265", "score": "0.5194807", "text": "def __init__(self):\n self.stack = []\n self.min_stack = []", "title": "" }, { "docid": "df4a3ded57a56eea2a34648419b58265", "score": "0.5194807", "text": "def __init__(self):\n self.stack = []\n self.min_stack = []", "title": "" }, { "docid": "df4a3ded57a56eea2a34648419b58265", "score": "0.5194807", "text": "def __init__(self):\n self.stack = []\n self.min_stack = []", "title": "" }, { "docid": "df4a3ded57a56eea2a34648419b58265", "score": "0.5194807", "text": "def __init__(self):\n self.stack = []\n self.min_stack = []", "title": "" }, { "docid": "df4a3ded57a56eea2a34648419b58265", "score": "0.5194807", "text": "def __init__(self):\n self.stack = []\n self.min_stack = []", "title": "" }, { "docid": "df4a3ded57a56eea2a34648419b58265", "score": "0.5194807", "text": "def __init__(self):\n self.stack = []\n self.min_stack = []", "title": "" }, { "docid": "2f46083ad7cb9175df98043b3b352a47", "score": "0.51815975", "text": "def __init__(self):\n self.stack = []\n self.max_stack = []", "title": "" }, { "docid": "ddc0d62b208eb81aea9a47ab69593d07", "score": "0.5176907", "text": "def run(self):\n self.stack_counts_vectors()\n self.stack_aeff()\n self.stack_edisp()\n self.stack_obs()", "title": "" }, { "docid": "e12b6c1306ef91ac2e9be00b543ee6a3", "score": "0.51694906", "text": "def __init__(self):\n self.stack1 = []\n self.stack2 = []", "title": "" }, { "docid": "e12b6c1306ef91ac2e9be00b543ee6a3", "score": "0.51694906", "text": "def __init__(self):\n self.stack1 = []\n self.stack2 = []", "title": "" }, { "docid": "9a0cbda1715723dc7cc8895a3c2d6b5e", "score": "0.5142424", "text": "def run(self, stack: ImageStack, *args) -> Optional[ImageStack]:\n raise NotImplementedError()", "title": "" }, { "docid": "61cc584877a54af9362d535b79924e64", "score": "0.51285064", "text": "def stack_image(img_raw):\n den = len(img_raw.shape)\n if den > 2:\n return np.sum(img_raw, axis=0)\n return img_raw", "title": "" }, { "docid": "542d7e1380cd0df5888f7ebd3869e0a8", "score": "0.5122856", "text": "def __init__(self):\n self.stack1 = stack()\n self.stack2 = stack()", "title": "" }, { "docid": "be7e548679f3e7928eeaf965930238ed", "score": "0.51224697", "text": "def stack_expressions(self, stack_expressions):\n\n self._stack_expressions = stack_expressions", "title": "" }, { "docid": "0a6318c035985a340dd670635228e739", "score": "0.5119363", "text": "def add_stack(self,e,h,v,p,stack_name=\"Radiation\",entry_name=None,\n title_0=\"\",title_1=\"\",title_2=\"\"):\n\n f = h5py.File(self.filename, 'a')\n\n if entry_name is None:\n f1 = f\n else:\n f1 = f[entry_name]\n\n\n f2 = f1.create_group(stack_name)\n\n f2.attrs['NX_class'] = 'NXdata'\n f2.attrs['signal'] = '%s'%(\"stack_data\")\n f2.attrs['axes'] = [self.label_stack_axis0, self.label_stack_axis1, self.label_stack_axis2]\n\n # Image data\n ds = f2.create_dataset(self.label_stack_data, data=p)\n\n ds = f2.create_dataset(self.label_stack_axis0, data=e)\n ds.attrs['long_name'] = title_0 # suggested 0 axis plot label\n\n # X axis data\n ds = f2.create_dataset(self.label_stack_axis1, data=h)\n ds.attrs['long_name'] = title_1 # suggested 1 axis plot label\n\n\n # Y axis data\n ds = f2.create_dataset(self.label_stack_axis2, data=v)\n ds.attrs['long_name'] = title_2 # suggested 2 axis plot label\n\n f.close()", "title": "" }, { "docid": "dae3d13a77557276f3af766cf3f26136", "score": "0.51188016", "text": "def __init__(self):\r\n self.stack = []", "title": "" }, { "docid": "09649ea743134a600543200f606fcd46", "score": "0.5107547", "text": "def __init__(self):\n self.stack = []", "title": "" }, { "docid": "09649ea743134a600543200f606fcd46", "score": "0.5107547", "text": "def __init__(self):\n self.stack = []", "title": "" }, { "docid": "09649ea743134a600543200f606fcd46", "score": "0.5107547", "text": "def __init__(self):\n self.stack = []", "title": "" }, { "docid": "09649ea743134a600543200f606fcd46", "score": "0.5107547", "text": "def __init__(self):\n self.stack = []", "title": "" }, { "docid": "09649ea743134a600543200f606fcd46", "score": "0.5107547", "text": "def __init__(self):\n self.stack = []", "title": "" }, { "docid": "09649ea743134a600543200f606fcd46", "score": "0.5107547", "text": "def __init__(self):\n self.stack = []", "title": "" }, { "docid": "09649ea743134a600543200f606fcd46", "score": "0.5107547", "text": "def __init__(self):\n self.stack = []", "title": "" }, { "docid": "ef6b2c14725b03e2b012f65d089cc7ab", "score": "0.5059532", "text": "def stack():\n global _stack\n return _stack", "title": "" }, { "docid": "ac826b24497f4a93449dc3e42b52d5d0", "score": "0.50530946", "text": "def test_push_1():\n assert not layer.__stack__\n\n layer.push(g1)\n\n assert layer.stack[0] is g1\n assert layer.__stack__\n\n with open(os.devnull, 'w') as f, redirect_stdout(f):\n run(layer, 'fit', X, y)\n run(layer, 'transform', X)\n run(layer, 'predict', X)\n assert layer.__fitted__", "title": "" }, { "docid": "41c7218b6a2c276ebc0d03d627ea78d5", "score": "0.5049773", "text": "def add_images(self, tag, tensor):\n self.images[tag] = tensor", "title": "" }, { "docid": "1647dd4f7017c1cf1cef295ead9f096b", "score": "0.5038774", "text": "def __init__(self):\n\t\tself.stack_list = []\n\t\tself.min_stack_list = []", "title": "" }, { "docid": "a53a4b256f48f85897280ed16093a646", "score": "0.5037374", "text": "def inputs_stack_image_and_xyz(image_list, xyz_list, label_list, batch_size, mean_image_path, mean_xyz_path):\t \n\n\timages = tf.convert_to_tensor(image_list, dtype=tf.string)\n\txyz = tf.convert_to_tensor(image_list, dtype=tf.string)\n\tlabels = tf.convert_to_tensor(label_list, dtype=tf.int32)\n\n\t# Makes an input queue\n\tinput_queue = tf.train.slice_input_producer([images, xyz, labels], shuffle=True, capacity=10*batch_size)\n\n\t#uint8image, _, label = read_images_from_disk(input_queue)\n\tuint8image = tf.image.decode_image(tf.read_file(input_queue[0]), channels=NUM_CHANNELS)\n\tuint8xyz = tf.image.decode_image(tf.read_file(input_queue[1]), channels=NUM_CHANNELS)\n\n\timage_mean_free = subtract_mean(uint8image, mean_image_path)\n\txyz_mean_free = subtract_mean(uint8xyz, mean_xyz_path)\n\n\timg_xyz_stack = tf.concat([image_mean_free, xyz_mean_free], axis=2)\n\n\t# Optional Preprocessing or Data Augmentation\n\t# tf.image implements most of the standard image augmentation\n\t#image = preprocess_image(image)\n\t#label = preprocess_label(label)\n\n\t# Generate a batch of images and labels by building up a queue of examples.\n\tnum_preprocess_threads = 10\n\treturn tf.train.batch([img_xyz_stack, input_queue[-1]], #tf.train.shuffle_batch(\n\t\t\t\t\t\t\t\t batch_size=batch_size,\n\t\t\t\t\t\t\t\t capacity=10*batch_size,\n\t\t\t\t\t\t\t\t num_threads=num_preprocess_threads)", "title": "" }, { "docid": "7ed173c7eb5506d6cd33137f7db9184c", "score": "0.5016056", "text": "def RegisterRGBAImage(self, type, pixels):", "title": "" }, { "docid": "36949ee82043680bd35e42330e85da81", "score": "0.5014108", "text": "def add_summary_images(self):\n size1 = tf.shape(self.input_node)[1]\n size_to_be = tf.cast(size1, tf.int32) - 2*self.displacement\n slicing = [0, self.displacement, self.displacement, 0]\n sliced_axis = [-1, size_to_be, size_to_be, -1]\n crop_input_node = tf.slice(self.input_node, slicing, sliced_axis)\n\n input_s = tf.summary.image(\"input\", crop_input_node, max_outputs=3)\n label_s = tf.summary.image(\"label\", self.label_node, max_outputs=3)\n pred = tf.expand_dims(tf.cast(self.predictions, tf.float32), dim=3)\n predi_s = tf.summary.image(\"pred\", pred, max_outputs=3)\n for __s in [input_s, label_s, predi_s]:\n self.additionnal_summaries.append(__s)\n self.test_summaries.append(__s)", "title": "" }, { "docid": "6c52dc3509c61c4debe650732dbcae01", "score": "0.50086373", "text": "def __init__(self):\n self.stack_a = []\n self.stack_b = []", "title": "" }, { "docid": "e3d4ec2bd162172327b5dd06d120a901", "score": "0.5005655", "text": "def update_pixel_values(self):\n if self._imstack._readonly: raise Exception('readonly')\n if self._data['nz'] == 0:\n self._data['amin'] = float32(0.0)\n self._data['amax'] = float32(0.0)\n self._data['amean'] = float32(0.0)\n else:\n itr = iter(self._imstack)\n im = next(itr)\n amin = im.data.min()\n amax = im.data.max()\n amean = im.data.mean()\n for im in itr:\n amin = min(amin, im.data.min())\n amax = max(amax, im.data.max())\n amean += im.data.mean()\n self._data['amin'] = float32(amin)\n self._data['amax'] = float32(amax)\n self._data['amean'] = float32(amean) / self._data['nz']", "title": "" }, { "docid": "ec10e1972f6501a73f6a084643cf2f6c", "score": "0.50039583", "text": "def __init__(self):\n self._stack = []", "title": "" }, { "docid": "ec10e1972f6501a73f6a084643cf2f6c", "score": "0.50039583", "text": "def __init__(self):\n self._stack = []", "title": "" }, { "docid": "afb4e05447815dbada3dbed83cba509f", "score": "0.49870968", "text": "def landsat_stack(location, dir_pattern, image_pattern, out_pattern,\n bands, ndv,\n extent=None, max_extent=None, min_extent=None,\n percentile=None, extent_image=None,\n utm=None, resume=False,\n fformat='ENVI', co='INTERLEAVE=BIP'):\n ### Check that we provided at least 1 extent option\n extent_opt = 0\n for opt in [extent, max_extent, min_extent, percentile, extent_image]:\n if opt is not None and opt is not False:\n extent_opt = extent_opt + 1\n if extent_opt == 0:\n print('Must specifiy at least one extent option.')\n return 1\n elif extent_opt > 1:\n print('Must specify only one extent option')\n return 1\n if extent is not None:\n if len(extent) != 4:\n print('Error: extent option must have 4 values (UL XY, LR XY)')\n return 1\n\n ### Process stacks\n gdal.AllRegister()\n # Locate folders\n dirs = get_directories(location, dir_pattern)\n if len(dirs) == 0:\n print('Could not find any Landsat images to stack')\n else:\n print('Found {num} Landsat images to stack.'.format(num=len(dirs)))\n\n # For each folder, initialize a LandsatImage object\n images = []\n for d in dirs:\n images.append(LandsatImage(d, image_pattern, bands, ndv, out_pattern,\n fformat=fformat, co=co))\n sys.stdout.flush()\n if len(images) != len(dirs) or any([i == False for i in images]):\n print('Could not find Landsat data for all image directories')\n return 1\n\n # If 'max_extent' option, loop through directories getting maximum extent\n if max_extent:\n extent = get_max_extent(images)\n elif min_extent:\n extent = get_min_extent(images)\n elif percentile:\n extent = get_percentile_extent(images, percentile)\n elif extent_image:\n extent = get_extent_from_image(extent_image)\n\n print('\\nStacking to extent:')\n print('\\tUpper Left: {ulx},{uly}'.format(ulx=extent[0], uly=extent[1]))\n print('\\tLower Right: {lrx},{lry}'.format(lrx=extent[2], lry=extent[3]))\n\n # Go through images, apply some attribute and stack\n print('\\nStacking images:')\n stack_status = []\n sys.stdout.flush()\n for num, image in enumerate(images):\n print('<--------------- {i} / {t} '.format(i=num + 1, t=len(images)))\n print('Stacking:\\n {n}\\n'.format(n=image.output_name))\n\n if resume and image.check_completed(extent):\n if VERBOSE and not QUIET:\n print('Already stacked...')\n else:\n if not DRY_RUN:\n stack_status.append(image.stack_image(extent, utm))\n else:\n stack_status.append(True)\n sys.stdout.flush()\n\n print('\\n\\n --------------- REPORT --------------- \\n\\n')\n # Check for errors and report\n if not all(stack_status):\n failures = [num for num, s in enumerate(stack_status) if s == False]\n print('Could not stack {f} images:'.format(f=len(failures)))\n for f in failures:\n print('\\t{i}'.format(i=images[f]))\n return 1\n else:\n # Check to make sure geo-transform was applied & extent is correct\n success = [image.check_completed(extent) for image in images]\n if all(success):\n print('Stacking completed successfully')\n return 0\n else:\n print('Not all stacks have same extent:')\n for n, s in enumerate(success):\n if s != True:\n print('\\t{i}'.format(i=images[n].id))\n return 1", "title": "" }, { "docid": "d61bf04f09566a148718e6c7f13f1aca", "score": "0.497625", "text": "def __init__(self):\n self.inStack = list()\n self.outStack = list()", "title": "" }, { "docid": "96629bb6af59631c07e7ef90525e01a8", "score": "0.4974103", "text": "def add_deepstack(self,list_of_axes_arrays,stack_array,stack_name=\"mydeepstack\",entry_name=None,\n list_of_axes_labels=None, list_of_axes_titles=None):\n f = h5py.File(self.filename, 'a')\n\n if entry_name is None:\n f1 = f\n else:\n f1 = f[entry_name]\n\n\n f2 = f1.create_group(stack_name)\n\n f2.attrs['NX_class'] = 'NXdata'\n f2.attrs['signal'] = '%s'%(\"stack_data\")\n\n if list_of_axes_labels is None:\n list_of_axes_labels = []\n for i in range(len(list_of_axes_arrays)):\n list_of_axes_labels.append(\"axis%d\" % i)\n f2.attrs['axes'] = list_of_axes_labels\n\n if list_of_axes_titles is None:\n list_of_axes_titles = list_of_axes_labels\n\n # stack data\n ds = f2.create_dataset(self.label_stack_data, data=stack_array)\n for i in range(len(list_of_axes_arrays)):\n ds = f2.create_dataset(list_of_axes_labels[i], data=list_of_axes_arrays[i])\n ds.attrs['long_name'] = list_of_axes_titles[i] # suggested 0 axis plot label\n\n f.close()", "title": "" }, { "docid": "37f01dea63889d0a6e24b155c07caddd", "score": "0.49698737", "text": "def _stack(self, x, repeats, kernels, reduce):\r\n for _ in range(repeats):\r\n x = self._macnn_block(x, kernels, reduce)\r\n return x", "title": "" }, { "docid": "fcbe58d12a1b57bba649f4356e90ada5", "score": "0.49653453", "text": "def stack(self, data):\n if self.data is None:\n self.data = data\n else:\n self.data = self._stack_funcs[self.stack_axis]([self.data, data])", "title": "" }, { "docid": "09d62ada35ab518116762c1f4f6569a4", "score": "0.496502", "text": "def __init__(self):\n self.__minStack = list()\n self.__stack = list()", "title": "" }, { "docid": "a21357869ab23d00e987f677faa5c34b", "score": "0.4946924", "text": "def __init__(self):\n self.stack = Stack()", "title": "" }, { "docid": "b621b2a8ad334a9e691d5c5fb9772cbb", "score": "0.4945459", "text": "def stacks(self, pdb, chain, count=None):\n pass", "title": "" }, { "docid": "cf7030f402ed40e3a6065f0dd5d36225", "score": "0.4942054", "text": "def rawStack(image, period, fwhm, steps_per_line):\n period_px = nm2px(period)\n fwhm_px = nm2px(fwhm)\n step_size_px = nm2px(period/steps_per_line)\n GT_size = np.shape(image)[0]\n OutSize = GT_size / upsamp_for_GT\n size_ratio = 1/upsamp_for_GT\n print('Creating pattern')\n print(GT_size, period_px, fwhm_px)\n pattern = periodicMatrix(GT_size, period_px, fwhm_px, 0, 0)\n stack = []\n \n # scan is done first right, then down\n for i in np.arange(period_px/step_size_px):\n for j in np.arange(period_px/step_size_px):\n print('Line: ', i)\n dx = i*step_size_px\n dy = j*step_size_px\n print(dx, dy)\n shifted = scipy.ndimage.interpolation.shift(image, [dx, dy])\n frame = pattern*shifted\n frame = ndi.gaussian_filter(frame, 45)\n plt.imshow(frame)\n stack.append(scipy.misc.imresize(frame, size_ratio))\n return stack", "title": "" }, { "docid": "f58695a7a0e54f95b086a764b915bdc5", "score": "0.49315062", "text": "def __init__(self):\r\n self.inStack, self.outStack = [], []", "title": "" }, { "docid": "fbb5ffff65f5ce5a340a62e8085d5ad0", "score": "0.49206132", "text": "def stack_imgs(imgs):\n\treturn np.stack(imgs, axis = -1)", "title": "" }, { "docid": "061aab1f726bafdb4799cd2299950384", "score": "0.49150577", "text": "def push(muf_env):\n muf_env[\"stack\"].push(muf_env,muf_env[\"code\"][muf_env[\"instruction\"]][1])", "title": "" }, { "docid": "ce4b11f8e98b9d3ec6f3ce0fc9f46534", "score": "0.49148595", "text": "def stack_edisp(self):\n irf_stacker = IRFStacker(\n list_aeff=[obs.aeff for obs in self.obs_list],\n list_livetime=[obs.livetime for obs in self.obs_list],\n list_edisp=[obs.edisp for obs in self.obs_list],\n list_low_threshold=[obs.lo_threshold for obs in self.obs_list],\n list_high_threshold=[obs.hi_threshold for obs in self.obs_list],\n )\n irf_stacker.stack_edisp()\n self.stacked_edisp = irf_stacker.stacked_edisp", "title": "" }, { "docid": "f9fb4f093d97258a2df418563d35f6aa", "score": "0.49058923", "text": "def __init__(self):\n self.stack = []\n self.work_stack = []", "title": "" }, { "docid": "7fcb59f4c6f84c58934cb7b7b452fc43", "score": "0.48947844", "text": "def test_add_stack(self):\n self.cal.add_stack(5)\n self.assertIsInstance(self.cal.stack.pop(), int)\n\n self.cal.add_stack(10.5)\n self.assertIsInstance(self.cal.stack.pop(), float)\n\n self.cal.add_stack(5.0)\n self.assertIsInstance(self.cal.stack.pop(), int)", "title": "" }, { "docid": "b0002234ce39231f552f9e3ad92fcaaf", "score": "0.48784578", "text": "def entropy_stack(image, normalize=False):\n\n hi_rez = entropy_local(image, 7, normalize=normalize)\n medh_rez = entropy_local(image, 15, 8, normalize=normalize)\n medl_rez = entropy_local(image, 31, 16, normalize=normalize)\n low_rez = entropy_local(image, 63, 32, normalize=normalize)\n\n return [hi_rez, medh_rez, medl_rez, low_rez]", "title": "" }, { "docid": "c5b6aa58c9948e4b5ba43142bffb1ddd", "score": "0.48776102", "text": "def mark_regions_image(self, image, stats):\n mx = image.shape[0]\n my = image.shape[1]\n markedimage = np.zeros(shape=(mx,my))\n global X\n global Y\n X = 0\n Y = 0\n #for i in range(mx):\n # for j in range(my):\n # markedimage[i,j] = image[i,j]\n for r in stats:\n if (stats[r][\"Area\"] >= 15):\n for p in stats[r][\"Pixels\"]:\n markedimage[stats[r][\"Pixels\"][p][0],stats[r][\"Pixels\"][p][1]] = 255\n else:\n stats[r] = None\n\n\n def draw(a,b):\n if ((a < 0) or (a >= mx)) or ((b < 0) or (b >= my)):\n return -1 #out of bounds\n markedimage[a,b] = 128 #255-markedimage[a,b] #simply invert the pixel\n\n def drawN(a,b,e,f):\n draw(e+b,f+a)\n def markCross(x,y):\n draw(x + 0, y + 0)\n draw(x + 1, y + 0)\n draw(x + 0, y + 1)\n draw(x - 1, y + 0)\n draw(x + 0, y - 1)\n draw(x + 2, y + 0)\n draw(x + 0, y + 2)\n draw(x - 2, y + 0)\n draw(x + 0, y - 2)\n return 0\n\n def drawNumber(num,x,y):\n X = x\n Y = y\n\n\n if (num == 0):\n drawN(-1,-2,x,y)\n drawN(0, -2,x,y)\n drawN(1, -2,x,y)\n drawN(-1, -1,x,y)\n drawN(1, -1,x,y)\n drawN(-1, 0,x,y)\n drawN(1, 0,x,y)\n drawN(-1, 1,x,y)\n drawN(1, 1,x,y)\n drawN(-1, 2,x,y)\n drawN(0, 2,x,y)\n drawN(1, 2,x,y)\n\n elif (num == 1):\n drawN(1, 2,x,y)\n drawN(1, 1,x,y)\n drawN(1, 0,x,y)\n drawN(1, -1,x,y)\n drawN(1, -2,x,y)\n elif (num == 2):\n drawN(-1, -2,x,y)\n drawN(0, -2,x,y)\n drawN(1, -2,x,y)\n drawN(1, -1,x,y)\n drawN(1, 0,x,y)\n drawN(0, 0,x,y)\n drawN(-1, 0,x,y)\n drawN(-1, 1,x,y)\n drawN(-1, 2,x,y)\n drawN(0, 2,x,y)\n drawN(1, 2,x,y)\n\n elif (num == 3):\n drawN(-1, -2,x,y)\n drawN(0, -2,x,y)\n drawN(1, -2,x,y)\n drawN(1, -1,x,y)\n drawN(1, 0,x,y)\n drawN(0, 0,x,y)\n drawN(1, 1,x,y)\n drawN(1, 2,x,y)\n drawN(0, 2,x,y)\n drawN(-1, 2,x,y)\n elif (num == 4):\n drawN(-1, -2,x,y)\n drawN(1, -2,x,y)\n drawN(-1, -1,x,y)\n drawN(1, -1,x,y)\n drawN(-1, 0,x,y)\n drawN(0, 0,x,y)\n drawN(1, 0,x,y)\n drawN(1, 1,x,y)\n drawN(1, 2,x,y)\n elif (num == 5):\n drawN(1, -2,x,y)\n drawN(0, -2,x,y)\n drawN(-1, -2,x,y)\n drawN(-1, -1,x,y)\n drawN(-1, 0,x,y)\n drawN(0, 0,x,y)\n drawN(1, 0,x,y)\n drawN(1, 1,x,y)\n drawN(1, 2,x,y)\n drawN(0, 2,x,y)\n drawN(-1, 2,x,y)\n elif (num == 6):\n drawN(1, -2,x,y)\n drawN(0, -2,x,y)\n drawN(-1, -2,x,y)\n drawN(-1, -1,x,y)\n drawN(-1, 0,x,y)\n drawN(0, 0,x,y)\n drawN(1, 0,x,y)\n drawN(1, 1,x,y)\n drawN(1, 2,x,y)\n drawN(0, 2,x,y)\n drawN(-1, 2,x,y)\n drawN(-1, 1,x,y)\n elif (num == 7):\n drawN(-1, -2,x,y)\n drawN(0, -2,x,y)\n drawN(1, -2,x,y)\n drawN(1, -1,x,y)\n drawN(1, 0,x,y)\n drawN(1, 1,x,y)\n drawN(1, 2,x,y)\n elif (num == 8):\n drawN(-1, -2,x,y)\n drawN(0, -2,x,y)\n drawN(1, -2,x,y)\n drawN(-1, -1,x,y)\n drawN(1, -1,x,y)\n drawN(-1, 0,x,y)\n drawN(0, 0,x,y)\n drawN(1, 0,x,y)\n drawN(-1, 1,x,y)\n drawN(1, 1,x,y)\n drawN(-1, 2,x,y)\n drawN(0, 2,x,y)\n drawN(1, 2,x,y)\n elif (num == 9):\n drawN(-1, -2,x,y)\n drawN(0, -2,x,y)\n drawN(1, -2,x,y)\n drawN(-1, -1,x,y)\n drawN(1, -1,x,y)\n drawN(-1, 0,x,y)\n drawN(0, 0,x,y)\n drawN(1, 0,x,y)\n\n drawN(1, 1,x,y)\n drawN(-1, 2,x,y)\n drawN(0, 2,x,y)\n drawN(1, 2,x,y)\n elif (num == ':'):\n drawN(0, -1, x, y)\n drawN(0, 1, x, y)\n\n\n return 0\n def drawMultiDigitNumber(num,x,y,rval):\n if (num >= 1):\n digitsrequired = int(math.log10(int(num)))\n else:\n digitsrequired = 1\n\n if (rval >= 1):\n digitsrequired = (digitsrequired + int(math.log10(int(rval))))\n else:\n digitsrequired = (digitsrequired + 1)\n\n X = (x - 6)\n Y = (y + 2+(2 * (digitsrequired)))\n\n # Determine more optimal positioning of the text for readability\n while True:\n\n drawNumber(int(num % 10),X,Y)\n num = (num//10)\n Y = (Y - 4)\n\n if (num <= 0):\n break\n drawNumber(':', X, Y)\n Y = (Y - 4)\n while True:\n\n drawNumber(int(rval % 10),X,Y)\n rval = (rval//10)\n Y = (Y - 4)\n\n if (rval <= 0):\n break\n return 0\n v = 0\n print(\"Region Report\")\n for r in stats:\n if (stats[r] != None):\n u = int(stats[r][\"CenterPoint\"][0])\n v = int(stats[r][\"CenterPoint\"][1])\n # for e in stats[r][\"Pixels\"]:\n # markedimage[stats[r][\"Pixels\"][e][0],stats[r][\"Pixels\"][e][1]] = 100+r\n markCross(u,v)\n drawMultiDigitNumber(stats[r][\"Area\"], u, v, r)\n stats[r][\"Pixels\"] = None\n print(stats[r])\n \"\"\"\n for i in range(mx):\n for j in range(my):\n markedimage[i,j] = v*255\n if (v == 0):\n v = 1\n else:\n v = 0\"\"\"\n\n\n return markedimage", "title": "" }, { "docid": "c8f214407e67451032bae614c9ff9a23", "score": "0.48744303", "text": "def __init__(self):\n self.mystack=[]\n self.minstack=[]", "title": "" }, { "docid": "2a711257e6de8c9a0cfc998fe7e73624", "score": "0.48641387", "text": "def push_stack(self):\n self.address_bus = (self.get_word(self.S.value, self.P.value) - 1) & 0xFFFF\n self.memory_write()\n self.set_stackpointer(self.address_bus)", "title": "" }, { "docid": "5a1f4726979fa793268dd30df37948d8", "score": "0.48634914", "text": "def _add_obs_layer(self, raster):\n \n water = raster == 128\n dry = raster == 0\n\n print \"the raster's water pixels: \", np.sum(water)\n print \"the raster's dry pixels: \", np.sum(dry)\n\n self.waterArray = self.waterArray + water\n self.dryArray = self.dryArray + dry \n \n return", "title": "" }, { "docid": "21b2685226cd85396757fbe8fe9e7abd", "score": "0.48573312", "text": "def regalloc_push(self, loc, already_pushed):\n assert IS_PPC_64, 'needs to updated for ppc 32'\n\n index = WORD * (~already_pushed)\n\n if loc.type == FLOAT:\n if not loc.is_fp_reg():\n self.regalloc_mov(loc, r.FP_SCRATCH)\n loc = r.FP_SCRATCH\n self.mc.stfd(loc.value, r.SP.value, index)\n else:\n if not loc.is_core_reg():\n self.regalloc_mov(loc, r.SCRATCH)\n loc = r.SCRATCH\n self.mc.std(loc.value, r.SP.value, index)", "title": "" }, { "docid": "9f12160c3613f3f7ec3a74e7e09c50b9", "score": "0.48473233", "text": "def __init__(self):\n self.stack = []\n self.minu = []", "title": "" }, { "docid": "e111b3f6b5900901dbb73ed257a1bb1b", "score": "0.4846617", "text": "def fuse_tensors(outdir, pixels_redChannel, pixels_greenChannel, pixels_blueChannel):\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n num_slices = pixels_redChannel.shape[0]\n for z in xrange(0, num_slices, 1):\n pixels_merged = Image.merge(\"RGB\", (pixels_redChannel[z], pixels_greenChannel[z], pixels_blueChannel[z]))\n im = Image.fromarray(pixels_merged)\n tempname = '/tmp/' + str(uuid.uuid4()) + '.tif'\n im.save(tempname)\n destname = outdir + '/slice_' + str(z).zfill(4) + '.tif'\n os.system('tiffcp -clzw:2 ' + tempname + ' ' + destname)\n os.remove(tempname)\n print('...fusion computed (%s slices) ' % z)\n print('fused stack saved in ', outdir)", "title": "" }, { "docid": "c6543c9d7189af98e64bb429df654ef1", "score": "0.483488", "text": "def __init__(self):\r\n self.stack = []\r\n self.size = len(self.stack)", "title": "" }, { "docid": "bd2fd993801de0178471e1fcde79d2f8", "score": "0.48244905", "text": "def stacks(self) -> Sequence['outputs.GetTemplateScratchesScratchStackResult']:\n return pulumi.get(self, \"stacks\")", "title": "" }, { "docid": "10707b4d82b7c4d87e5bb437ad202243", "score": "0.4822761", "text": "def stack_counts_vectors(self):\n self.stack_on_vector()\n self.stack_off_vector()\n self.stack_backscal()\n self.setup_counts_vectors()", "title": "" }, { "docid": "2084ac67f8d9959b547f706cefa9c12a", "score": "0.48159745", "text": "def __init__(self):\r\n self.mainStack=[]\r\n self.min=[]", "title": "" }, { "docid": "af684c6e882deb02621d3ee7f40d691c", "score": "0.48088375", "text": "def __stack_layers(self):\r\n self.__colors = self.__colors[: self.get_num_clusters()]\r\n for i, color in enumerate(self.__colors):\r\n temp = self.__data[self.__data['cluster'] == i]\r\n x = temp[self.__data.columns[0]]\r\n y = temp[self.__data.columns[1]]\r\n z = temp[self.__data.columns[2]]\r\n self.create_trace_layer(x, y, z, color, self.__data.index)", "title": "" }, { "docid": "682dbe90741eb36172d9af9b6f21471a", "score": "0.48085156", "text": "def ins_push(self, a):\n self.stack.append(self.convert(a))\n self.cursor += 2", "title": "" }, { "docid": "f394b8439c9f0f6e4c8eae7652b27cd3", "score": "0.4804129", "text": "def get_image_info(self):\n return tf.stack([\n tf.to_float(self._scaled_height),\n tf.to_float(self._scaled_width),\n 1.0 / self._image_scale,\n tf.to_float(self._ori_height),\n tf.to_float(self._ori_width)])", "title": "" }, { "docid": "a710853e827c10ad52e1d9f06c22bba6", "score": "0.48038936", "text": "def hstack(self, other):\n \n pass", "title": "" }, { "docid": "26916211bceb6af07c6de2046ba50114", "score": "0.48017246", "text": "def AddPixel(self, *args):\n return _stomp.RegionBound_AddPixel(self, *args)", "title": "" }, { "docid": "9f2433e88203854e7976907e4ecd1fc3", "score": "0.47997528", "text": "def __init__(self):\n self.add_stack = []\n self.del_stack = []", "title": "" }, { "docid": "642c857efca01ab04c7c8f14fc103df1", "score": "0.47947758", "text": "def __init__(self):\n self.__stack = []", "title": "" }, { "docid": "916837d9e3272507ca58e27676990df0", "score": "0.47853053", "text": "def stacked(self, other, axis='0'):\n \n pass", "title": "" }, { "docid": "6a2affa90ec5dd7e9cb90597ea4b6a6f", "score": "0.4772094", "text": "def test_stacked_intensity_provider(self):\n data1 = pd.DataFrame(\n {\n \"geometry\": [\n \"POINT(-71.5473 -32.8026)\",\n \"POINT(-71.5473 -32.8022)\",\n \"POINT(-71.5468 -32.803)\",\n \"POINT(-71.5467 -32.8027)\",\n ],\n \"value_mwh\": [6.7135, 7.4765, 3.627, 3.5967],\n \"unit_mwh\": [\n \"m\",\n \"m\",\n \"m\",\n \"m\",\n ],\n }\n )\n geodata1 = gpd.GeoDataFrame(data1)\n geodata1[\"geometry\"] = geodata1[\"geometry\"].apply(wkt.loads)\n\n intensity_provider1 = intensityprovider.IntensityProvider(\n intensitydatawrapper.GeopandasDataFrameWrapper(geodata1)\n )\n\n data2 = pd.DataFrame(\n {\n \"geometry\": [\n \"POINT(-71.5473 -32.8026)\",\n \"POINT(-71.5473 -32.8022)\",\n \"POINT(-71.5468 -32.803)\",\n \"POINT(-71.5467 -32.8027)\",\n ],\n \"value_pga\": [0.7135, 0.4765, 0.627, 0.5967],\n \"unit_pga\": [\n \"g\",\n \"g\",\n \"g\",\n \"g\",\n ],\n }\n )\n geodata2 = gpd.GeoDataFrame(data2)\n geodata2[\"geometry\"] = geodata2[\"geometry\"].apply(wkt.loads)\n\n intensity_provider2 = intensityprovider.IntensityProvider(\n intensitydatawrapper.GeopandasDataFrameWrapper(geodata2)\n )\n\n stacked_intensity_provider = (\n intensityprovider.StackedIntensityProvider(\n intensity_provider1, intensity_provider2\n )\n )\n\n intensities, units = stacked_intensity_provider.get_nearest(\n lon=-71.5473, lat=-32.8025\n )\n intensity_mwh = intensities[\"mwh\"]\n self.assertLess(6.7134, intensity_mwh)\n self.assertLess(intensity_mwh, 6.7136)\n intensity_pga = intensities[\"pga\"]\n self.assertLess(0.7134, intensity_pga)\n self.assertLess(intensity_pga, 0.7136)\n self.assertEqual(units[\"mwh\"], \"m\")\n self.assertEqual(units[\"pga\"], \"g\")", "title": "" }, { "docid": "f97547ed40efa9ff2cb39c26c5aaca2f", "score": "0.4761401", "text": "def filter(self, stack: Stack) -> None:\n\n high_pass: Callable = partial(self.gaussian_high_pass, sigma=self.sigma)\n stack.image.apply(high_pass)\n\n # apply to aux dict too:\n for auxiliary_image in stack.auxiliary_images.values():\n auxiliary_image.apply(high_pass)", "title": "" }, { "docid": "1b034c9216b52d69483e7c7d49181a8b", "score": "0.47532004", "text": "def testSize(self):\n self.assert_(0 == len(self.stack))\n for i in range(1,10,1):\n self.stack.push(i)\n self.assertEqual(len(self.stack),i)", "title": "" }, { "docid": "a5bc458e08017727ddcf7c0f7e215e11", "score": "0.4751523", "text": "def resetstack(self):\n\n self.lower.stack = []\n self.upper.stack = []", "title": "" }, { "docid": "12cfe5213dd2e359acdd472e087e2ba4", "score": "0.47509846", "text": "def create_stack(self):\r\n faces = range(self.max_number + 1)\r\n self.stack = [list(item) for item in combinations(faces, 2)] + \\\r\n [[i, i] for i in range(self.max_number + 1)]\r\n if (len(self.stack) / len(self.players)) < self.n_tiles:\r\n raise ValueError(u'Not enough tiles for all players!')\r\n else:\r\n shuffle(self.stack)", "title": "" }, { "docid": "b62945035af8b1464b68aa65cdcf9249", "score": "0.47488117", "text": "def __loss_stack_def(self, y_true, y_pred):\r\n # loop of stacked hg\r\n ls_stack_det = 0\r\n num_in_one_stack = self.NUM_CLASSES * 5\r\n for istack in range(self.NUM_STACKS):\r\n start_channel = num_in_one_stack * istack\r\n end_channel = start_channel + num_in_one_stack\r\n\r\n ls_det = self.__loss_det(y_true, y_pred[:, :, :, start_channel:end_channel])\r\n ls_stack_det = ls_stack_det + ls_det\r\n \r\n return ls_stack_det", "title": "" }, { "docid": "d9453e47c96796faa77ccddbd8702320", "score": "0.4743786", "text": "def analyze_damage(cutStack, dmg_point):\n\n num_rings=60\n\n #initialize data containers\n analysis_mat = np.empty((140,num_rings))\n\n #make image 256x256 for analysis\n restack = resize(np.swapaxes(cutStack,0,1), (140,256,256), preserve_range=True)\n\n #outer loop each frame in depth\n for depth, frame in enumerate(restack): \n #loop drawing a ring at point with increasing radius up to $$$$$\n for r in range(0,num_rings):\n #set up background anew for each loop\n null_img = np.zeros((256,256))\n rr,cc = circle_perimeter(int(dmg_point[0]),int(dmg_point[1]/4),r, method='andres')\n #these remove indices of the ring that are off the side of the image\n if np.any(rr>255):\n ind = np.where(rr<256)\n rr=rr[ind]\n cc=cc[ind]\n if np.any(cc>255):\n ind = np.where(cc<256)\n rr=rr[ind]\n cc=cc[ind]\n if np.any(rr<0):\n ind = np.where(rr>=0)\n rr=rr[ind]\n cc=cc[ind]\n if np.any(cc<0):\n ind = np.where(cc>=0)\n rr=rr[ind]\n cc=cc[ind]\n null_img[rr,cc]=1\n #average intensity at each ring\n analysis_mat[depth,r]=np.sum(frame*null_img)/np.sum(null_img)\n\n return analysis_mat", "title": "" }, { "docid": "64de542d7ab0c83ac36c8df79da742ea", "score": "0.4741605", "text": "def stack_on_vector(self):\n on_vector_list = [o.on_vector for o in self.obs_list]\n self.stacked_on_vector = self.stack_counts_spectrum(on_vector_list)", "title": "" }, { "docid": "e517cbf6dfd51396c49e63eea2765315", "score": "0.47375014", "text": "def stack_aeff(self):\n irf_stacker = IRFStacker(\n list_aeff=[obs.aeff for obs in self.obs_list],\n list_livetime=[obs.livetime for obs in self.obs_list],\n )\n irf_stacker.stack_aeff()\n self.stacked_aeff = irf_stacker.stacked_aeff", "title": "" }, { "docid": "4bc2da7302213d3bd1be73a0fb73f953", "score": "0.47358188", "text": "def registrate(fixed_image,moving_image):\n pass", "title": "" }, { "docid": "1541d2637f0ce5c7ff46ba1a8f3391ea", "score": "0.4735547", "text": "def mark_regions_image(self, image, stats):\r\n\r\n plt.clf()\r\n plt.imshow(image, interpolation='none', cmap=plt.cm.gray)\r\n ### ERROR 3: expected 'stats' to be an object with data, but 'stats' is actually an integer < FIXED\r\n # look at line 72 in dip_hw1_region_analysis.py:\r\n # stats = cell_count_obj.compute_statistics(regions) <--- compute_statistics\r\n # look at 'compute_statistics' above:\r\n # returns 0, an integer\r\n for k, region_stats in stats.items():\r\n (x, y) = (region_stats['x'], region_stats['y'])\r\n plt.plot(x, y, 'r*') # plots red asterisk\r\n plt.text(x, y, '{}:{}'.format(k, region_stats['area']), color='red')\r\n plt.show()\r\n plt.savefig('output/cellct/result.png')\r\n\r\n return image", "title": "" }, { "docid": "af7cf57acdf6aa78fe4991bcef096928", "score": "0.47224367", "text": "def _add_image_summary(self, image_tag, image):\n num_slice = 5\n # image shape assumes channel_last [batch, x, y, z, chnl]\n image_x_pixels = image.shape[1]\n stride = image_x_pixels // (num_slice-1)\n\n for i in range(num_slice):\n index = i*stride - 1 if i else i*stride\n image_slice = image[:,index,:, :, :]\n summary_images = tf.summary.image(name= '{0}_{1}'.format(image_tag, i), tensor= image_slice, max_outputs =1)\n summary_hook = tf.train.SummarySaverHook(summary_op= summary_images, save_steps= 1)\n self._hooks[md.Hooks.SUMMARY].append(summary_hook)", "title": "" }, { "docid": "fd8d846067134ca15373e16ed084829c", "score": "0.47200716", "text": "def apply_image(self, img: np.ndarray):", "title": "" }, { "docid": "7e03cc79ae9028eb3cf03ec8b6181ba6", "score": "0.47128043", "text": "def _mem_stack(self):\n return False", "title": "" } ]
757a3d880d7b3d20791405a3456e394a
Returns the data in the csv file.
[ { "docid": "78a72f498df468c4941317e889c7bc2c", "score": "0.68766993", "text": "def getCsvData():\n return np.genfromtxt(FILENAME, delimiter=',', skip_header=1)", "title": "" } ]
[ { "docid": "f6ef013293c3bf7f89588462c68c3cd4", "score": "0.77819395", "text": "def getCsv(self,nomeCsv):", "title": "" }, { "docid": "cc799882a598874b392766febc2f51cf", "score": "0.7663828", "text": "def get_data(self):\n with open(os.path.join('data_file.csv'), 'r') as fin:\n reader = csv.reader(fin)\n data = list(filter(None,reader))\n return data", "title": "" }, { "docid": "ceced46df53956fd2d7cb07fd43342e2", "score": "0.7516333", "text": "def CSVread(self):\n dataFile = pandas.read_csv('names.csv')\n print(dataFile)\n return dataFile", "title": "" }, { "docid": "0c6f3219835692fdbd4a7c2c0628cb98", "score": "0.7469504", "text": "def get_data():\n\t\twith open(os.path.join('data', 'data_file.csv'), 'r') as fin:\n\t\t\treader = csv.reader(fin)\n\t\t\tdata = list(reader)\n\t\treturn data", "title": "" }, { "docid": "1905e1fbeac1de0ade87debab2ba27c1", "score": "0.728847", "text": "def read_csv(self, path):", "title": "" }, { "docid": "891643af27d9c69ed7c900c3178e5f92", "score": "0.72730553", "text": "def get_csv_data(file_path):\n with open(file_path, newline=\"\") as csv_file:\n csv_reader = csv.reader(csv_file)\n data = csv_reader.__next__()\n return data", "title": "" }, { "docid": "b4a05acd8fcad72d025967978f71b4fe", "score": "0.72349864", "text": "def read_csv(self):\n\n with open(self.file, mode='r') as data_file:\n time_format = '%Y-%m-%d %H:%M:%S'\n now = datetime.now(timezone.utc)\n reader = csv.reader(data_file, delimiter='\\t')\n for row in reader:\n if len(row) == 0 or row[0] == 'UserID' or row[0].startswith('---'):\n continue\n self.user_ids.append(int(row[0]))\n self.data.append(row[1].strip())\n self.timestamps.append(now.strptime(row[2], time_format))\n self.urls.append(row[4].strip())\n self.labels.append(int(row[5])) # Mission ID ", "title": "" }, { "docid": "8571848a424aad00d8dccf815e6a8840", "score": "0.70751745", "text": "def data_to_csv():\n pass", "title": "" }, { "docid": "4d2d0fb1e523495e532be4c5c8f56756", "score": "0.7044952", "text": "def get_data():\n\n data = pd.read_csv(\"currency.csv\")\n return data", "title": "" }, { "docid": "ef0893d79330913aeb6439911c163bfa", "score": "0.70350695", "text": "def _get_data(self, path):\n data = pd.read_csv(path)\n return data", "title": "" }, { "docid": "0ca44edf3888d945b43b6d8171a39231", "score": "0.70337754", "text": "def importcsv(self):\n pass", "title": "" }, { "docid": "e907937bbb4d614edc72a608b2512fbe", "score": "0.7024661", "text": "def get_csv():\n with requests.Session() as session:\n return session.get(CSV_URL).content.decode('utf-8')", "title": "" }, { "docid": "e96e7cc6aaeb694479a129c20d21d6fe", "score": "0.70031273", "text": "def file_read():\r\n\r\n data = pd.read_csv('City Drive 2.csv')\r\n return data", "title": "" }, { "docid": "84ef163506f32ba87e445a254b1c9a97", "score": "0.68880546", "text": "def get_data():\r\n file = filedialog.askopenfilename(\r\n title=\"Location of openPO\",\r\n initialdir=r\"%USER%\\Desktop\",\r\n filetypes=[(\"Plain Text\", \"*.txt\"), (\"CSV\", \"*.csv\"),],\r\n ) \r\n \r\n if file:\r\n copied = copy_file(file)\r\n \r\n data = []\r\n with open(file, \"r\") as csvfile:\r\n reader = csvreader(csvfile, delimiter=\"\\t\")\r\n for row in reader:\r\n data.append(row)\r\n \r\n return data", "title": "" }, { "docid": "6d06f20904a617a8527dbfd9ced41648", "score": "0.68690354", "text": "def get_data(self):\n #ipdb.set_trace()\n path = os.path.abspath(__file__)\n path1 = os.path.dirname(os.path.dirname(path))\n csv_path = os.path.join(path1, 'data' ,'csv')\n #ipdb.set_trace()\n\n return df\n # Hints: Build csv_path as \"absolute path\" in order to call this method from anywhere.\n # Do not hardcode your path as it only works on your machine ('Users/username/code...')\n # Use __file__ as absolute path anchor independant of your computer\n # Make extensive use of `import ipdb; ipdb.set_trace()` to investigate what `__file__` variable is really\n # Use os.path library to construct path independent of Unix vs. Windows specificities", "title": "" }, { "docid": "4996e2ee635eded17a4b4f26d6c06a61", "score": "0.6755378", "text": "def getData(self):\n \n # TODO: switch to new location: https://markets.cboe.com/us/futures/market_statistics/historical_data/\n fName = \"CFE_{0}{1}_VX.csv\".format(monthCode(self.month),str(self.year)[-2:])\n urlStr = \"http://cfe.cboe.com/Publish/ScheduledTask/MktData/datahouse/{0}\".format(fName)\n \n # some files have a disclaimer on the first line, so some extra code \n # is needed here\n \n # find first line with header\n url = request.urlopen(urlStr)\n self.data = parseFutureCsv(url)\n \n return self.data", "title": "" }, { "docid": "2d2555421f3b848b8c0308ef79e5799b", "score": "0.6745932", "text": "def __get_data(self, key):\n data = pd.read_csv(\"data/\" + key + \".csv\", index=False)\n return data", "title": "" }, { "docid": "d95f48638181baadbaa86a23a3fe1bf3", "score": "0.67332625", "text": "def read_csv(self):\n\n if os.path.exists(self.file):\n self.data = pd.read_csv(self.file,index_col=0)\n else:\n print(\"file does not exist\")", "title": "" }, { "docid": "30028ab08b9e058ed3094b44a4d4a636", "score": "0.667858", "text": "def get_data(fn):\n data = []\n with open(fn, 'r') as file:\n csv_reader = reader(file)\n\n for row in csv_reader:\n if not row:\n continue\n data.append(row)\n\n return data", "title": "" }, { "docid": "907cf90bb91924454c85a6cbde5b3935", "score": "0.6636066", "text": "def read_csv(path):\n pass", "title": "" }, { "docid": "55525f88e319c0a923dd4f3321c599e2", "score": "0.660094", "text": "def data():\n _, csv_file = tempfile.mkstemp()\n try:\n data_path = os.path.join(app.root_path, data_dir)\n read_data.generate_lightning_csv(data_path, csv_file)\n return helper.csv_to_json(csv_file)\n finally:\n os.remove(csv_file)", "title": "" }, { "docid": "957ad967731e87dd998274757a3fa9b1", "score": "0.6592047", "text": "def csvreader(self):\n with open(self.file, 'r') as csvfile:\n reader = csv.reader(csvfile, self.getdialect()) # Calls getdialect() to find CSV-Dialect\n self.data = list(reader) # makes an iterable list out of the readers content\n print(self.data)\n \"\"\"\n For loop not necessary, because of output as array\n for row in spamreader:\n self.data = self.data.append(row)\n \"\"\"\n csvfile.close()", "title": "" }, { "docid": "ace7f587fe3c5dcb2ec3f8380cbd6b44", "score": "0.65707797", "text": "def grab_file_data(self, filename):\n with open(filename, 'rt') as opened_file:\n read_file = csv.reader(opened_file)\n for row in read_file: # [q1]\n self.rows.append(row)\n opened_file.close", "title": "" }, { "docid": "4a2c2b34102491756a448a41537437ed", "score": "0.65452945", "text": "def read_test_data():\n data = []\n global test_data_file\n try:\n fr = open(\"./tests/data_to_test.csv\",'r')\n line = fr.readline()\n while line:\n line = fr.readline()\n row = line.strip().split(',')\n if row[0]:\n data.append(row)\n fr.close()\n except IOError as err:\n print(err)\n finally:\n return data", "title": "" }, { "docid": "f628aa6ca2943cbf248f00f512bcd0e0", "score": "0.6539908", "text": "def read_data():\n\n data = pd.read_csv('input/data.csv', index_col=False)\n return data", "title": "" }, { "docid": "58aff043f2aa3071a08e913e14937d21", "score": "0.65275687", "text": "def getFullData(path):\n reader = csv.reader(file(path, \"rb\"))\n header = None\n data = []\n for row in reader:\n if not header:\n header = row\n continue\n obj = {}\n for k in range(len(header)):\n obj[header[k]] = row[k]\n data.append(obj)\n #print header\n return header, data", "title": "" }, { "docid": "33b0fe017ccd61ec27f833ed9f067a16", "score": "0.65227294", "text": "def csv():\n input_data = request.get_json()\n stream_name, csv_data = util.calc.get_csv(input_data, request.url, delimiter=',')\n return send_file(BytesIO(csv_data),\n attachment_filename='%s.csv' % stream_name,\n mimetype='application/csv')", "title": "" }, { "docid": "66aba566817b5d4452b8ca51ed3a393c", "score": "0.6499982", "text": "def data_stream(self,datapath): # accpet the file path\n self.datapath=datapath\n data =pd.read_csv(datapath)\n values = data.values # all values/columns in the DataFrame\n for ptime, tweet, period, fav, fol, friend in zip(values[:,1], values[:,2],values[:,3],values[:,4],values[:,5],values[:,6]):# extract value in columns\n yield ptime,tweet.strip(' ,'), period, fav, fol, friend", "title": "" }, { "docid": "79fe54f2e85f26930c839fb991eac42a", "score": "0.6493912", "text": "def _read_csv(self, path):\n data = list()\n with open(path, 'r') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n # data = list()\n corpus_str = ''\n for row in csv_reader:\n if self.data_name == 'ag_news':\n # ag_news has col 1 - label, col 2 - headline, col 3 - article text\n # and has no column headers. For this dataset we concatenate the headline to the article text\n corpus_str += f'{row[1] + \" \" +row[2]}\\t{row[0]}\\n'\n\n data = [line for line in corpus_str.split('\\n') if line] # if statement gets rid of empty strings\n return data", "title": "" }, { "docid": "cfe4c8783fb582a28af70095127aec67", "score": "0.6491471", "text": "def _write_csv(self):\n raise NotImplementedError", "title": "" }, { "docid": "49eda580ed320e33ec1aaa7f51e81760", "score": "0.64888996", "text": "def load_data(): \n with open(filename, 'rb') as f:\n f.seek(0)\n reader = csv.reader(f)\n return [l for l in reader]", "title": "" }, { "docid": "69a50d06addfe5cad8b7a7b54a5ec805", "score": "0.64837235", "text": "def open_dat(self, filepath):\r\n print('Open the File : '+ filepath)\r\n file = open(filepath)\r\n reader = csv.reader(file, delimiter='\\t')\r\n data = []\r\n for line in reader:\r\n tmp = line[0].split(',')\r\n int_data = []\r\n tmp = tmp[0:-1]\r\n for temp in tmp:\r\n int_data.append(float(temp))\r\n data.append(int_data)\r\n file.close()\r\n return data", "title": "" }, { "docid": "9bcbe004404726b5a112ba9634e538fa", "score": "0.6483131", "text": "def extract_data(file):\r\n return pd.read_csv(file)", "title": "" }, { "docid": "2f96c063ba4d72269db64d6af97e44a5", "score": "0.6475103", "text": "def write_csv(data):\n # Using this to simulate file IO,\n # as csv can only write to files.\n f = StringIO()\n writer = csv.writer(f)\n for row in data:\n writer.writerow(row)\n # Get the content of the file-like object\n return f.getvalue()", "title": "" }, { "docid": "9abba71d1e5c882c4d9bec1d0b4f70af", "score": "0.6463594", "text": "def data(self):\n # get file name and open the file\n file_name = f\"{self.name}_data.csv\"\n with open(self.main_path + file_name, \"a\", newline=\"\") as file_object:\n # create CSV object\n csv_object = csv.writer(file_object)\n\n # create header if this is the beginning of a new simulation\n if self.current_step == 1:\n # get list of column names for non-method values and method values\n main_header = [\"Step Number\", \"Number Cells\", \"Step Time\", \"Memory (MB)\"]\n methods_header = list(self.method_times.keys())\n\n # merge the headers together and write the row to the CSV\n csv_object.writerow(main_header + methods_header)\n\n # calculate the total step time and get memory of process in megabytes\n step_time = time.perf_counter() - self.step_start\n process = psutil.Process(os.getpid())\n memory = process.memory_info()[0] / 1024 ** 2\n\n # write the row with the corresponding values\n columns = [self.current_step, self.number_agents, step_time, memory]\n function_times = list(self.method_times.values())\n csv_object.writerow(columns + function_times)", "title": "" }, { "docid": "7b74b7dfa548e8e4bf056ce2c7b6cb46", "score": "0.6455954", "text": "def readData(name):\n data = []\n with open(name, 'r') as f:\n reader = csv.reader(f)\n for row in reader:\n data.append(row)\n return data", "title": "" }, { "docid": "ca1721f902b5a40a5cb11412b6c8f960", "score": "0.6434133", "text": "def save_to_csv(\n csv_data,\n file_path,\n):\n\n # TODO: This functions needs implementation\n\n return None", "title": "" }, { "docid": "fe1a75bf72812c894ff0fc5a9e235695", "score": "0.64310765", "text": "def read_data():\n try: \n with open(FILENAME, 'r') as csvfile:\n csv_data = csv.reader(csvfile)\n data = [[item1.strip(), item2.strip()]\n for item1, item2 in csv_data]\n return data\n except IOError:\n print(\"No such file or directory\")\n except Exception as e:\n print(\"Error:\", e)\n \n return []", "title": "" }, { "docid": "a6f3b6b4bd7ffa99234b1918939ec56e", "score": "0.64254594", "text": "def read_csv_file(file_name):\n \n with open(file_name, newline='') as csvfile:\n table = []\n reader = csv.reader(csvfile, delimiter=',')\n for row in reader:\n table.append(row)\n return table", "title": "" }, { "docid": "af9d7e1dc8eadb4e2fc2211d7ed2c815", "score": "0.6424174", "text": "def get_csv_path(self):\n return self.csv_path", "title": "" }, { "docid": "fe76802f22a025ffaab385105b44e829", "score": "0.6423017", "text": "def get_rows(self):\n self.records.clear()\n try:\n input_file = open(self.input_file_path, 'rt')\n reader = csv.reader(input_file)\n for i, row in enumerate(reader):\n if i >= 12:\n break\n if i == 0:\n self.data_header = row\n if i >= 2:\n self.records.append(list(row))\n input_file.close()\n except OSError as e:\n print(\"I/O error({0}): {1}\".format(e.errno, e.strerror))\n print(self.input_file_path)\n except Exception:\n print(\"Unexpected error:\", sys.exc_info()[0])", "title": "" }, { "docid": "a3dc97069f450c6fd0ce7ca26b4dbaba", "score": "0.6406478", "text": "def open_csv():\n with open(file=csv_params[\"path\"], encoding=\"utf8\", mode=\"r\", newline='') as csv_file:\n rows = list(csv.reader(csv_file, delimiter=csv_params[\"delimiter\"]))\n heading = rows[0]\n sql = str.join(\" text, \", heading)\n # print(\"ok\")\n return heading, sql", "title": "" }, { "docid": "417ddb99a2bd7af622d720b563c5bee0", "score": "0.63980925", "text": "def read_csv(csv_file):\n data = []\n with open(csv_file, \"r\") as file_h:\n for line in reader(file_h):\n data.append(line)\n\n if not data:\n logger.error(\"Data after reading the CSV: %s\", str(data))\n raise EmptyCSV(\"%s is empty!!\" % csv_file)\n\n logger.info(\"No. of entries identified : %s\", str(len(data)))\n return data", "title": "" }, { "docid": "8a53e8923a2f907f7227010e8919f19c", "score": "0.6391777", "text": "def read_csv(path):\n csv_data =[]\n \n with open(path, 'r') as csv_file:\n csv_read = csv.reader(csv_file, dialect='excel')\n for row in csv_read:\n csv_data.append(row)\n\n return(csv_data)", "title": "" }, { "docid": "03b021326264ae8a4a3e866efcb452ca", "score": "0.6364913", "text": "def readCSV(file_name):\n data = pd.read_csv(file_name)\n data.columns = ['Insult', 'Date', 'Comments']\n comments = data['Comments']\n return comments", "title": "" }, { "docid": "c8731303adafe8620071fb398e6e9cf6", "score": "0.6360693", "text": "def read_csv(filename='faces.csv'):\n csv = open(filename, 'r') # 'r' means read\n return csv", "title": "" }, { "docid": "52b5a833894802f9ba1632ed11a83309", "score": "0.63411826", "text": "def read_csv(self, file_path):\r\n try:\r\n file_data = self.read_file(file_path)\r\n if file_data is None:\r\n return None\r\n csv_data = pd.read_csv(BytesIO(file_data))\r\n self.logger.log(self.general_log, 'Successfully read the given .CSV file!')\r\n return csv_data\r\n except ClientError as error:\r\n self.logger.log(self.error_log,\r\n 'Error while reading a .CSV file: %s' % error.response['Error']['Code'])\r\n raise error\r\n except Exception as e:\r\n self.logger.log(self.error_log, 'Unexpected error: %s' % e)\r\n raise e", "title": "" }, { "docid": "5b38b8c27f20ca3bcee9e6f9ba7eef26", "score": "0.63410497", "text": "def get_data_set():\n\n with open('FB.csv','rt') as f:\n data = csv.reader(f)\n\n date = []\n close = []\n \n for row in data:\n date.append(row[0])\n close.append(row[4])\n\n date = date[1:]\n close = close[1:]\n\n return date, close", "title": "" }, { "docid": "27ebe0f7b36831a5f6e1964430c3da6d", "score": "0.63377416", "text": "def get_csv_output(cls, file_name, dataset_directory, item):\n path_file = dataset_directory + '/' + file_name\n with open(file=path_file, mode='a+') as csv_file:\n csv_writer = csv.writer(csv_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n csv_writer.writerow(item)", "title": "" }, { "docid": "3928601297e616e19d8d303c5cbf7f76", "score": "0.6335272", "text": "def read_csv(filepath):\n data = []\n with open(filepath, \"r\", newline=\"\") as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n # print(row)\n data.append(row)\n return data", "title": "" }, { "docid": "241626dc1bf1bef9282f4e4ba997b01a", "score": "0.6330256", "text": "def get_data_to_csv(self, symbol, start=datetime(2000,1,1), end=datetime(2016,1,1)):\n data = self.get_data(symbol, start, end)\n if not data.empty:\n data.to_csv(self.symbol_to_path(symbol))\n return data", "title": "" }, { "docid": "2b19687d12af7e92e786217692a08bfc", "score": "0.63257444", "text": "def open_csv(file):\n\n\ttmp = [] # initialise the list\n\twith open(file, 'r') as f:\n\t\treader = csv.reader(f)\n\t\tfor row in reader:\n\t\t\ttmp.append(row) # add row to list\n\n\treturn tmp", "title": "" }, { "docid": "0625175a8d3a166fcec6f6491a35d678", "score": "0.6324021", "text": "def get_data_set(self):\n if not self.url:\n path=os.path.join(os.path.dirname(__file__),self.url)\n else:\n path=self.url\n \n data_frame_result = pd.read_csv(r\"\"+path)\n return data_frame_result", "title": "" }, { "docid": "bbd2015b2e586b79e44972a1c0f2ebe6", "score": "0.6312478", "text": "def get_csv(self, file_id: str) -> str:\n\n resp = data_utils.get(f'{_OPENML_FILE_API_URL}/get_csv/{file_id}')\n resp = resp.text.replace(', ', ',').replace(' ,', ',')\n return resp", "title": "" }, { "docid": "3d98901be5a5eb6cce1800404786f321", "score": "0.6310875", "text": "def getCSVData(fileName: str, recordsCount: int):\r\n # global records was created in order to access from inside and outside of a function\r\n global records\r\n\r\n error = '' \r\n try:\r\n with open(fileName) as csv_file:\r\n\r\n \r\n csv_reader = csv.reader(csv_file, delimiter=',')\r\n line_count = 0\r\n count = 0\r\n\r\n records = {}\r\n for row in csv_reader:\r\n if count == recordsCount:\r\n break\r\n if line_count == 0:\r\n line_count += 1\r\n else:\r\n line_count += 1\r\n r = { \r\n str(row[0]) + str(row[3]) : { \r\n \"pruid\": row[0],\r\n \"prname\": row[1],\r\n \"prnameFR\": row[2],\r\n \"date\": row[3],\r\n \"numconf\": row[5],\r\n \"numprob\": row[6],\r\n \"numdeaths\": row[7],\r\n \"numtotal\": row[8],\r\n \"numtoday\": row[13],\r\n \"ratetotal\": row[15],\r\n }\r\n }\r\n records.update(r)\r\n count += 1\r\n\r\n # Exception Handling check if the file exist/ file not found (VALIDATION)\r\n except FileNotFoundError:\r\n\r\n error = f'File is not available: {fileName}' \r\n except Exception as e:\r\n error = 'Something went wrong!' + str(e)\r\n\r\n return error", "title": "" }, { "docid": "fa8c78da59288c2c8e9d35c7707a3e0f", "score": "0.6303828", "text": "def extract_csv_data(self):\n csv = pd.read_csv(self.csv, parse_dates=['local_created_at'])\n self.csv_dataframe = csv", "title": "" }, { "docid": "2295e58df7e1be41d2b837b21033345e", "score": "0.62737393", "text": "def csv_data_gatherer():\n\n try:\n\n with open('work_log.csv', mode='r', newline='') as csv_file:\n data = csv.DictReader(csv_file)\n rows = list(data)\n\n data_from_csv = []\n\n for row in rows:\n data_to_list = []\n if row[\"Date\"]:\n string_date = row[\"Date\"]\n sds = string_date.split(\"-\")\n sd_to_obj = datetime.datetime.strptime(\n \"{}/{}/{}\".format(\n sds[0], sds[1], sds[2][:2]), '%Y/%m/%d')\n\n data_to_list.append(sd_to_obj)\n data_to_list.append(row[\"Task Name\"])\n data_to_list.append(int(row[\"Time Spent (Rounded minutes)\"]))\n data_to_list.append(row[\"Notes (Optional)\"])\n data_from_csv.append(data_to_list)\n\n for x in data_from_csv:\n ENTRIES.append(x)\n\n except FileNotFoundError:\n csv_header()", "title": "" }, { "docid": "cea2aedbd101b42f71fc6e0574d4af10", "score": "0.62698144", "text": "def make_data_csv(self):\n with open(self.data_csv, 'wb') as data_fh:\n trialWriter = csv.writer(data_fh)\n trialWriter.writerow(self.fields_to_save)", "title": "" }, { "docid": "cc525001e07c653a508bc7ee74a10b0c", "score": "0.626711", "text": "def exportCSV(self,fileName):\n print(\"exportCSV\")", "title": "" }, { "docid": "98e89e9d530914dbcd2e1befd5a374b9", "score": "0.62579113", "text": "def read_csv_demo():\n stock_day = pd.read_csv(\"./stock_day/stock_day.csv\")\n\n print(stock_day)\n return None", "title": "" }, { "docid": "960da71d1b6ecab0ed91138a82246bda", "score": "0.6254969", "text": "def getData(self, url):\n data = pd.read_csv(url, header=None)\n return data", "title": "" }, { "docid": "b1ae792b28419d3e7a64034bb46c6941", "score": "0.62547463", "text": "def get_csv_data():\n data = []\n connection = sqlite3.connect(DB_FILE)\n try:\n with connection:\n result = connection.execute(\n \"\"\"SELECT strftime('%Y-%m-%d',timestamp),\n MIN(temperature),MAX(temperature),MIN(humidity),MAX(humidity)\n FROM TM_DATA\n GROUP BY DATE(timestamp)\n ORDER BY DATE(timestamp)\"\"\")\n\n for row in result:\n data.append(row)\n except Error as error:\n print(error)\n finally:\n connection.close()\n return data", "title": "" }, { "docid": "efb2b0446a3bff93a52fe366ef658639", "score": "0.62535805", "text": "def csvHandler(csv):\n data = readCsv(csv)\n # cut the head of csv out\n head = data[0]\n data = data[1:]\n # use a list to store usefull information\n result = []\n for tweets in data:\n time = tweets[3]\n tid = tweets[1]\n text = tweets[0]\n client = tweets[4]\n retweets = tweets[2]\n hashtags = tweets[5]\n result.append((time, tid,text,client,retweets,hashtags))\n result = dataBreaker(result)\n return result", "title": "" }, { "docid": "8c5545510e322a324ae6be3285bf1917", "score": "0.6238391", "text": "def OpenCSV(self, csv_file='', delimiter=';', column=None, header=None, filter_column=None, filter_value=''):\n return self.__webapp.open_csv(csv_file, delimiter, column, header, filter_column, filter_value)", "title": "" }, { "docid": "87589672bb5f9ba5b335b9d64df2fb60", "score": "0.622915", "text": "def retrieve_data(self):\n with open(self.datafile, \"r\") as datafile:\n wikilist_data = list(csv.reader(datafile, delimiter=';'))\n\n #Go through each line of the csv\n radio_dataset = OrderedDict()\n for row in wikilist_data:\n row = [cell for cell in row if len(cell.strip())]\n #Getting row with a new section\n if len(row) == 1:\n current_section = row[0]\n radio_dataset.update({current_section:[]})\n #Otherwise, store radio info stored in a single row\n else:\n radio_dataset[current_section].\\\n append(RadioInfo(*row))\n return radio_dataset", "title": "" }, { "docid": "d653b90d2eda93ea407b5377f254b99e", "score": "0.6217934", "text": "def read_csv_file(filename):\r\n\r\n data = []\r\n for row in csv.reader(open(filename)):\r\n data.append(row)\r\n return data", "title": "" }, { "docid": "e8e6165474ebc56f0088495da1e7de4a", "score": "0.6196949", "text": "def get_subj_csv(self):\n path,ftype = QtWidgets.QFileDialog.getOpenFileName(self,'Get File','','CSV(*.csv)')\n self.main_ui.edt_subj_csv.setText(path)\n self.li_subs_to_highlight.clear()\n with open(path, 'r') as f:\n reader = csv.reader(f)\n for row in reader:\n self.li_subs_to_highlight.append(row[0])\n self.highlight_csv_subjects()", "title": "" }, { "docid": "1a8c36a494371650e25550114d5ad37f", "score": "0.618825", "text": "def csv_read(self): \n try:\n \n formatValue = self.formatValue\n recordTuple = None \n self.__building_data=[]\n self.__headerFiedlNames = [inputField.headerName for inputField in self.__inputFields]\n \n with open(self.filename, 'r',encoding='utf-8') as csvfile:\n reader = csv.DictReader(csvfile,fieldnames=self.__headerFiedlNames, delimiter=';', quotechar='\"', quoting= csv.QUOTE_NONE, lineterminator='\\n')\n headDict = next(reader)\n x = 0\n for value in headDict.values():\n if value is not None:\n x += 1\n \n if x != len(self.__headerFiedlNames):\n raise NumberIncorrectException(\"CSV Column Header Number Incorrect expected \" + str(len(self.__headerFiedlNames)))\n \n recordTuple = namedtuple('RecordTuple', headDict.keys()) \n \n for y, row in enumerate(reader):\n if len(row) != len(self.__headerFiedlNames): \n raise NumberIncorrectException(\"CSV Column Row Number Incorrect expected \" + str(len(self.__headerFiedlNames)) + \" for java output file xmlResultado.csv row number:\" + str(y+2)) # Included Header columns row\n \n for key, value in row.items():\n if value is not None:\n value = formatValue(key, value)\n else:\n value = \"\" \n row[key] = value\n \n self.__building_data.append(self.convert(recordTuple,row))\n #self.__building_data.append(copy.deepcopy(row))\n del reader \n \n self.__log.write_log(\"INFO\", \"csv_read - file \" + self.filename + \" data read\")\n \n return self.__building_data\n \n except NumberIncorrectException as e:\n self.__log.write_log(\"ERROR\", e)\n self.__building_data = None \n raise \n except:\n self.__log.write_log(\"ERROR\", \"CSV Read Unexpected error:\" + str(sys.exc_info()[0]) + \" \" + str(sys.exc_info()[1]))\n self.__building_data = None\n raise", "title": "" }, { "docid": "24e0133b86edc723239251a36a138af4", "score": "0.61798835", "text": "def retrieve(table,fields_value,condi,d_from,d_to):\r\n\r\n\temail = '[email protected]'\r\n\tpassword = 'password'\r\n\tproject_id = '154'\r\n\ttable_name = table\r\n\tfields = fields_value\r\n\tcondition = condi\r\n\tdata_from = d_from\r\n\tdata_to = d_to\r\n\tserver = WSDL.Proxy(WSDLFILE)\r\n\ttables = \"p_%s_%s\"%(project_id,table_name)\r\n\tre_type = 'csv'\r\n\tcsv = server.getData(email,password,fields,tables,condition,data_from,data_to,re_type)\r\n\treturn csv", "title": "" }, { "docid": "adc5ef94785f3a34d2498b4cdde8dc6a", "score": "0.61771613", "text": "def export_data(self):\n data = []\n line = []\n dw = self.data_widget\n for col in range(dw.columnCount()):\n line.append(str(dw.horizontalHeaderItem(col).text()))\n data.append(', '.join(line))\n for row in range(dw.rowCount()):\n line = []\n for col in range(dw.columnCount()):\n line.append(str(dw.item(row, col).text()))\n data.append(', '.join(line))\n output = '\\n'.join(data)\n\n csvfile_path = self.current_video_item.vc.csvpath\n filename = QFileDialog.getSaveFileName(self, 'Save CSV File',\n csvfile_path)\n if filename != '':\n filepath = os.path.abspath(filename[0])\n with open(filepath, 'w') as f:\n f.write(output)", "title": "" }, { "docid": "16cfa132c27a19cd662ea5624c609b57", "score": "0.6169934", "text": "def getDataInCsv(self):\n CITATIONS = \"./Dataset/hep-th-citations\"\n ADDRESS = \"./Dataset/abs\"\n ADDRESS = [ADDRESS + '/' + date for date in os.listdir(ADDRESS)]\n MAIN = {}\n\n # Check if data.txt exists or not\n if(os.path.isfile('./Dataset/data.txt') == False):\n for date in ADDRESS:\n progressBar = tqdm(os.listdir(date))\n for f in progressBar:\n progressBar.set_description(f\"Currently processing: {date.split('/')[-1]}\")\n filename = f.split('.')[0] # 100100.abs = ['100100', 'abs']\n f = open(date + '/' + f) # Open the file\n LOCAL_MAIN = {}\n for line in f.readlines(): # Read lines from the file\n firstWord = line.split(' ')[0]\n if(firstWord == \"Title:\"):\n # The line = 'Title: some title\\n' and we remove substring 'Title: ' and '\\n' from it\n title = line[len(\"Title: \"): len(line) - 1]\n LOCAL_MAIN['TITLE'] = title\n elif(firstWord == \"Authors:\" or firstWord == \"Author:\"):\n # We do not want Organization details for now, the main reason behind this is because the organization detail\n # is absent from a lot of papers in the dataset\n\n # Observed: 1) The organization details are provided between '(' and ')'\n # 2) Authors names separators used are: \",\" or \"and\" or combination of both.\n\n # Removing organization details using Regular Expression\n line = re.sub('\\([A-z0-9]*\\)', \"\", line).strip()\n # Get list of authors\n li = []\n if(firstWord == \"Authors:\"):\n line = line[len(\"Authors: \"):]\n else:\n line = line[len(\"Author: \"):]\n authors = line.split(\"and\") # Split with \"and\", it will split in two parts\n # If only 1 author then split on and will give us array with only 1 element\n if(len(authors) != 1):\n # More than 1 author\n li.append(authors[1])\n # Split with \",\"\n authors = authors[0].split(\",\")\n # Merge the lists\n li.extend(authors)\n # Found that author strings might have trailing whitespaces \n li = [i.strip() for i in li]\n LOCAL_MAIN['AUTHORS'] = li\n elif(firstWord == \"Date:\"):\n line = line[len(\"Date: \"):]\n line = line.split('(')[0].strip()\n LOCAL_MAIN['DATE'] = line\n MAIN[filename] = LOCAL_MAIN\n # Save in file for future use\n with open(\"./Dataset/data.txt\", 'w') as outfile:\n json.dump(MAIN, outfile)\n else:\n # Assign MAIN to the data.txt JSON\n with open(\"./Dataset/data.txt\", 'r') as readfile:\n MAIN = json.load(readfile)\n\n ###===============================================================================================###\n \n citationGraph = pd.read_table(CITATIONS)\n citationGraph.columns = [' ']\n citationGraph = citationGraph.to_numpy()\n # citationGraph[0][0] = '0001001 9308122'\n # Split with space, first string is the paper and second string is the citation\n # Take two list and append into it the two strings in order\n paper = []\n cited = []\n progressBar = tqdm(citationGraph) \n for row in progressBar:\n progressBar.set_description('Parsing citation graph...')\n row = row[0].split(' ')\n p = row[0]\n c = row[1]\n paper.append(p)\n cited.append(c)\n\n paper_title = []\n paper_authors = []\n paper_date = []\n cited_title = []\n cited_authors = []\n cited_date = []\n progressBar = tqdm(paper)\n for i, _ in enumerate(progressBar):\n progressBar.set_description('Creating data.csv...')\n # Get the Research Paper using the key\n paper_title.append(MAIN[paper[i]]['TITLE'])\n paper_authors.append(','.join(MAIN[paper[i]]['AUTHORS']))\n paper_date.append(MAIN[paper[i]]['DATE'])\n # Get the cited Paper using key\n cited_title.append(MAIN[cited[i]]['TITLE'])\n cited_authors.append(','.join(MAIN[cited[i]]['AUTHORS']))\n cited_date.append(MAIN[cited[i]]['DATE'])\n # Convert into PD and save it as csv\n FINAL = {'Paper-title': paper_title, 'Paper-authors': paper_authors, 'Paper-date': paper_date, 'Cited-title': cited_title, 'Cited-authors' : cited_authors, 'Cited-date': cited_date}\n pd.DataFrame(FINAL).to_csv('data.csv')", "title": "" }, { "docid": "36fee338023d080d1b605d4484748ebb", "score": "0.61587876", "text": "def read_from_csv(filename: str) -> Any:\n if os.path.isfile(filename):\n with open(filename, mode=\"r\") as csvfile:\n product_data = csv.DictReader(csvfile, delimiter=\",\")\n rows = list(product_data)\n return rows", "title": "" }, { "docid": "b6c0b3ed2956098705a848738a5d7a67", "score": "0.6158554", "text": "def LoadCsv():\n fname = input('Give the path of the file\\n>')\n with open(fname, 'r') as f:\n valsfromfile = list(csv.reader(f))\n vals=list()\n for valfromfile in valsfromfile:\n vals.append(valfromfile[0])\n return vals", "title": "" }, { "docid": "fba7965e1031d5f651a246c1e5394cae", "score": "0.61570185", "text": "def read_csv(self, filePath): # csv file reader\n list = []\n with open(filePath, \"rU\") as csvfile: # open filePath and decalre it as csvfile\n reader = csv.reader(csvfile)\n for row in reader:\n list.append(row)\n return list", "title": "" }, { "docid": "f3052718e9ca63d9de3ee930516718fe", "score": "0.61529064", "text": "def main():\n read_file(\"people.csv\")", "title": "" }, { "docid": "ea248365501c82c0868fbb736ba949e9", "score": "0.6152298", "text": "def read_csv_file(filename, index_st):\n\tfile = open(filename)\n\treader = csv.reader(file)\n\tdata_all = list(reader)\t\n\tdata = np.array(data_all[index_st:])\n\treturn data", "title": "" }, { "docid": "e9ce0f8798acf311ca6f70c948918e2c", "score": "0.6151065", "text": "def lesen(self):\n filename = self.path\n content = []\n try:\n with open(filename, 'r') as f:\n reader = csv.reader(f, self.sniffer(filename))\n try:\n for row in reader:\n print(row)\n content.append(row)#der Liste wird ein element hinzugefuegt\n except csv.Error as e:\n sys.exit('file %s, line %d: %s' % (filename, reader.line_num, e))\n return content\n except FileNotFoundError as er:\n print('Datei existiert nicht!')\n return ' '", "title": "" }, { "docid": "ead6f6cc2f54b3913a91da08991bc7b7", "score": "0.61371297", "text": "def read_csv():\n with open(CSV_NAME, newline='') as fh:\n reader = csv.DictReader(fh)\n return list(reader)", "title": "" }, { "docid": "e1e12ce1b01cd034a7a0b4610929514f", "score": "0.6135775", "text": "def read_csv(filename):\n with open(filename) as f:\n reader = csv.DictReader(f)\n DATA = [r for r in reader]\n return DATA", "title": "" }, { "docid": "5b83f72e5ccc112d4264decab576a037", "score": "0.6128731", "text": "def readCSV(filename, delimiter=None):\n with open(filename, 'r') as csvFile:\n if delimiter:\n liens = csv.reader(csvFile, delimiter=delimiter)\n else:\n liens = csv.reader(csvFile)\n\n data = []\n for row in liens:\n data.append(row)\n # print(row)\n # data.pop()\n return data", "title": "" }, { "docid": "c00546d772474acf54e296ca2eeb649b", "score": "0.61263853", "text": "def download_new_csv(self):\n pass", "title": "" }, { "docid": "e590a4326f4bb317ac4a6b1964bc9795", "score": "0.6114424", "text": "def get_entity_csv(self, entity):\n from google.appengine.ext.db import GqlQuery\n\n # permissions\n if self.user.user_type != 'god':\n raise PermissionDenied(\"Only gods can download CSVs\")\n\n allowed_entities = ['program', 'cohort', 'classroom', 'school']\n if entity not in allowed_entities:\n raise PermissionDenied(\n \"Requested entity is not allowed for live CSV\")\n entity = entity.title() # capitalize first letter so GQL query works\n # retrieve requested entity type\n q = GqlQuery(\"SELECT * FROM {} WHERE is_test=False AND deleted=False\"\n .format(entity))\n\n # make a list of lists with headers, see CsvHandler\n out = []\n headers = None\n for r in q:\n entry_dicts = r.to_dict()\n if not headers:\n headers = entry_dicts.keys()\n out.append(headers)\n out.append(entry_dicts.values())\n return out", "title": "" }, { "docid": "345cd999722385807170fa15d14cbc1c", "score": "0.6105632", "text": "def csv_list(file):\n return_list = []\n with open('sample_csv_files/' + file, encoding='utf-8-sig') as csv_file:\n file_input = csv.DictReader(csv_file)\n for row in file_input:\n return_list.append(row)\n return return_list", "title": "" }, { "docid": "f78e9145fee8b96e4721ffa2495c6075", "score": "0.6104069", "text": "def get_data(path):\n data = pd.read_csv(path, header=None)\n columns = generate_column_list()\n data.columns = columns\n\n return data[columns[2:]], data[['label']]", "title": "" }, { "docid": "534359d0bd7689fa6e816fe1546cd1ee", "score": "0.6100842", "text": "def get_raw_data(csv_file):\n\traw_muse_data = []\n\ttry:\n\t\twith open(csv_file, newline='') as file:\n\t\t\t# muse_data looks like [[row],[row],[row]]\n\t\t\tdata_reader = csv.reader(file, delimiter=',')\n\t\t\tfor row_num in data_reader:\n\t\t\t\traw_muse_data += [row_num]\n\n\t\treturn raw_muse_data\n\texcept:\n\t\tprint(\"ERROR - get_data_from_csv(csv_file): unable to read csv file\")", "title": "" }, { "docid": "2ae5842634e6aefb228eb855af129dc3", "score": "0.6096046", "text": "def load_csv(self):\n # Ensure a unique extract directory\n extract_dir = self.extract_directory_base\n\n while os.path.exists(extract_dir):\n extract_dir += '0'\n\n # Set up the local path variables.\n outputs_path = os.path.join(extract_dir, \"outputs\")\n\n try:\n extract_tar(self.wu_file, extract_dir)\n\n with open(os.path.join(outputs_path, output_files['csv']), 'r') as f:\n # Load the CSV rows.\n csv_file = csv.DictReader(f)\n self.cells = [a for r in csv_file for a in r.values()]\n self.rows = [r for r in csv_file]\n except:\n raise\n finally:\n if extract_dir and os.path.exists(extract_dir):\n shutil.rmtree(extract_dir)\n\n return self.cells", "title": "" }, { "docid": "6a02ff6eb9921d0be1ed9d8cf294b81b", "score": "0.60909337", "text": "def readCSV(self, filename='../test_files/LMT85_LookUpTableCSV.csv', data_format='str', header=True, delimiter_arg = ';'):\n valid_types = ['int', 'str', 'float', 'hex']\n\n if not isinstance(filename, str):\n raise TypeError(\"Filename is not a string\")\n if not isinstance(data_format, str) or data_format not in valid_types:\n raise TypeError(f'data_format: {data_format} is not in list of valid types.')\n\n data = []\n first_row = True\n # print(os.getcwd())\n with open(filename) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=delimiter_arg)\n for row in csv_reader:\n # Gather data in a list\n row_data = []\n\n # Convert first row to the correct data format if header is false\n if first_row and header == True:\n first_row = False\n for l in range(len(row)):\n row_data.append(row[l])\n else:\n for l in range(len(row)):\n if data_format == 'int' or data_format == 'hex':\n row_data.append(int(row[l], 0))\n elif data_format == 'float':\n row_data.append(float(row[l]))\n else:\n row_data.append(row[l])\n\n if row_data:\n data.append(row_data)\n return data", "title": "" }, { "docid": "ac2b51f634eac6e7b0893f0a83c3e46d", "score": "0.60816556", "text": "def read_csv(self):\n with open(self.input_file, 'r') as f:\n self.data = csv.DictReader(f)\n for header in self.data.fieldnames:\n self.data_dict[header] = []\n for row in self.data:\n for key,value in row.iteritems():\n self.data_dict[key].append(value)", "title": "" }, { "docid": "3785a4ceb46f9e2b508f9be7b3eff1b2", "score": "0.6080789", "text": "def read_csv_dataset():\n return pd.read_csv('ESC-50/meta/esc50.csv')", "title": "" }, { "docid": "d07320b19c07571aaafd7374b2551f3b", "score": "0.60737664", "text": "def retrieve_csv_data(rows, row_header=0, column_header=0, limit_column=0):\n return [row[row_header:limit_column] for row in rows[column_header:]]", "title": "" }, { "docid": "b7a43c06a3b0651009fd6a0665dff9b5", "score": "0.60696495", "text": "def open_csv(self, filename='transactions_record.csv'):\n\n Firm.transactions_record = pd.read_csv(filename, sep=';',index_col=0)\n print(\"CSV file '{}' loaded\".format(filename))", "title": "" }, { "docid": "1082591a55d7581a310a1133e2724c78", "score": "0.6065059", "text": "def stream(self):\n try:\n fp = open(self.filepath, 'Ur')\n except Exception, exc:\n message = 'Unable to open file {filepath}: {exc}'.format(\n filepath=self.filepath, exc=exc)\n raise OpenFileError(message)\n\n reader = csv.reader(fp, quoting=csv.QUOTE_NONNUMERIC)\n reader.next() # consume var names\n for row in reader:\n yield row\n fp.close()", "title": "" }, { "docid": "21626b47334a194ec5b4c6687aaed69d", "score": "0.60603875", "text": "def csv_export(self):\n endpoint = self.ENDPOINTS['csv_export'] % (self.app_id)\n return self.post(self._url(endpoint))", "title": "" }, { "docid": "a09136b7df0b53eb079854633df0e817", "score": "0.6049905", "text": "def _read_csv(csv_name: str) -> CSV:\n f = open(csv_name)\n csv = []\n for line in f:\n csv.append(line.split(\";\"))\n return csv", "title": "" }, { "docid": "49837e9893c21d622377462b10c835ef", "score": "0.6043926", "text": "def csv_data(self, results):\n return [\n self.name,\n self.__fingerprint['protocol'],\n self.__o_src_ips,\n self.__o_src_ports,\n self.__o_dst_ports,\n self.__src_ips,\n self.__src_ports,\n self.__dst_ports,\n results['TP'],\n results['FP'],\n results['UP'],\n results['TN'],\n results['FN'],\n results['UN']\n ]", "title": "" }, { "docid": "674b4bbd964dcd18a812fa8a2319344e", "score": "0.604115", "text": "def __read_csv_data(self: ConConverter) -> list:\n with open(self.__in, mode='r') as infile:\n data = [{header: value for header,value in row.items()}\n for row in csv.DictReader(infile, skipinitialspace=True)]\n return data", "title": "" }, { "docid": "110618d4bbcec44ae94b29ec3e449e45", "score": "0.60401326", "text": "def get_csv(url):\n\twith urllib.request.urlopen(url) as response:\n\t\tjsonraw = response.read().decode()\n\t\treturn jsonraw", "title": "" }, { "docid": "5f77d3be6ddb7999531f2d030520e256", "score": "0.6035042", "text": "def parse_csv(file):\n with open(file, newline='') as f:\n data = list(csv.reader(f))\n\n return data", "title": "" }, { "docid": "7990bea178e9e8e5d13a214ac02526ca", "score": "0.60298324", "text": "def get_cpi_file(self):\n # Open up the CSV from the BLS\n csv_path = os.path.join(self.this_dir, 'data.csv')\n csv_file = open(csv_path, \"r\")\n return csv.DictReader(csv_file)", "title": "" }, { "docid": "31c5776e63869e74ac6007d70a1ec9d1", "score": "0.602342", "text": "def _example_data():\n return pd.read_csv(Path(Path(__file__).parent.parent, \"Data\", \"ExampleData.csv\"))", "title": "" } ]
33e9dd697aa0873d63e601ff171033aa
Two test users are created, but only the first one is loggedin.
[ { "docid": "b9a23a5f8000f9fed04b15d7eef592eb", "score": "0.0", "text": "def setUp(self):\n # set up an active test user and log them in\n first_test_user = factories.MTUserFactory(is_active=True)\n first_test_user_profile = factories.UserProfileFactory(user=first_test_user)\n self.first_test_user = first_test_user\n\n login_response = self.client.login(username=first_test_user.email, password=factories.DEFAULT_PASSWORD)\n\n if not login_response:\n self.skipTest(\"Could not log in\")\n\n # setup a second test user but do not log them in\n second_test_user = factories.MTUserFactory(is_active=True)\n second_test_user_profile = factories.UserProfileFactory(user=second_test_user)\n self.second_test_user = second_test_user\n\n # set up a test account and some transactions for first user\n account_first = factories.AccountFactory(user=first_test_user)\n self.account_first = account_first\n\n for i in range(randint(1, 20)):\n factories.TransactionFactory(account=account_first, user=first_test_user)\n\n # set up an account and some transactions for second user\n account_second = factories.AccountFactory(user=second_test_user)\n self.account_second = account_second\n\n for i in range(randint(1, 10)):\n factories.TransactionFactory(account=account_second, user=second_test_user)", "title": "" } ]
[ { "docid": "cecd23b2f17c718e8d4802d2e20bc74c", "score": "0.7482575", "text": "def test_0000_initiate_users(self):\n self.login(email=common.test_user_1_email, username=common.test_user_1_name)\n test_user_1 = self.test_db_util.get_user(common.test_user_1_email)\n assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_1_email\n self.test_db_util.get_private_role(test_user_1)\n self.login(email=common.admin_email, username=common.admin_username)\n admin_user = self.test_db_util.get_user(common.admin_email)\n assert admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email\n self.test_db_util.get_private_role(admin_user)", "title": "" }, { "docid": "6556acfdcce6e22598960d5fa577f5cf", "score": "0.7471892", "text": "def test_log_in_existing_user(self):\n users = seed_mock_data.seed_user_w_no_travel()\n user = users[0]\n test_username = bytes(user.username, \"utf-8\")\n\n result = self.client.post(f\"/profile/{test_username}\")\n\n self.assertIn(test_username, result.data)", "title": "" }, { "docid": "460c2c6a46ae8196f9c0a6e7444ab907", "score": "0.7439339", "text": "def test_0000_initiate_users( self ):\n self.login( email=common.test_user_1_email, username=common.test_user_1_name )\n test_user_1 = self.test_db_util.get_user( common.test_user_1_email )\n assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_1_email\n self.test_db_util.get_private_role( test_user_1 )\n self.login( email=common.admin_email, username=common.admin_username )\n admin_user = self.test_db_util.get_user( common.admin_email )\n assert admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email\n self.test_db_util.get_private_role( admin_user )", "title": "" }, { "docid": "5974aa0ac7837fbfb72e18d81964ec4e", "score": "0.7436203", "text": "def test_0000_initiate_users(self):\n self.login(email=common.test_user_1_email, username=common.test_user_1_name)\n test_user_1 = self.test_db_util.get_user(common.test_user_1_email)\n assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_1_email\n self.test_db_util.get_private_role(test_user_1)\n self.login(email=common.test_user_2_email, username=common.test_user_2_name)\n test_user_2 = self.test_db_util.get_user(common.test_user_2_email)\n assert test_user_2 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_2_email\n self.test_db_util.get_private_role(test_user_2)\n self.login(email=common.admin_email, username=common.admin_username)\n admin_user = self.test_db_util.get_user(common.admin_email)\n assert admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email\n self.test_db_util.get_private_role(admin_user)", "title": "" }, { "docid": "9f0dbf317bbf41be6dd6d8873123800c", "score": "0.7420557", "text": "def test_auth_check(self):\n self.new_user = Users(\"Nanjala\", \"Joan\", \"password\")\n self.new_user.create_user()\n another_user = Users(\"user2\",\"othername\",\"password2\")\n another_user.create_user()\n\n for cred in Users.user_info:\n if cred.first == another_user.first and cred.password == another_user.password:\n identity = another_user.first\n\n return identity", "title": "" }, { "docid": "6b490c734da5d785b0aa9df13b9870bd", "score": "0.7409982", "text": "def test_0000_initiate_users(self):\n self.login(email=common.test_user_1_email, username=common.test_user_1_name)\n test_user_1 = self.test_db_util.get_user(common.test_user_1_email)\n assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_1_email\n self.test_db_util.get_private_role(test_user_1)\n self.login(email=common.admin_email, username=common.admin_username)\n admin_user = self.test_db_util.get_user(common.admin_email)\n assert admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email\n self.test_db_util.get_private_role(admin_user)\n self.galaxy_login(email=common.admin_email, username=common.admin_username)\n galaxy_admin_user = self.test_db_util.get_galaxy_user(common.admin_email)\n assert galaxy_admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email\n self.test_db_util.get_galaxy_private_role(galaxy_admin_user)", "title": "" }, { "docid": "6b490c734da5d785b0aa9df13b9870bd", "score": "0.7409982", "text": "def test_0000_initiate_users(self):\n self.login(email=common.test_user_1_email, username=common.test_user_1_name)\n test_user_1 = self.test_db_util.get_user(common.test_user_1_email)\n assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_1_email\n self.test_db_util.get_private_role(test_user_1)\n self.login(email=common.admin_email, username=common.admin_username)\n admin_user = self.test_db_util.get_user(common.admin_email)\n assert admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email\n self.test_db_util.get_private_role(admin_user)\n self.galaxy_login(email=common.admin_email, username=common.admin_username)\n galaxy_admin_user = self.test_db_util.get_galaxy_user(common.admin_email)\n assert galaxy_admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email\n self.test_db_util.get_galaxy_private_role(galaxy_admin_user)", "title": "" }, { "docid": "941523a7a0342a6a622adbdd714c1107", "score": "0.7409477", "text": "def test_0000_initiate_users( self ):\n self.galaxy_logout()\n self.galaxy_login( email=common.admin_email, username=common.admin_username )\n admin_user = test_db_util.get_galaxy_user( common.admin_email )\n assert admin_user is not None, 'Problem retrieving user with email %s from the database' % admin_email\n admin_user_private_role = test_db_util.get_galaxy_private_role( admin_user )\n self.logout()\n self.login( email=common.test_user_1_email, username=common.test_user_1_name )\n test_user_1 = test_db_util.get_user( common.test_user_1_email )\n assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % test_user_1_email\n test_user_1_private_role = test_db_util.get_private_role( test_user_1 )\n self.logout()\n self.login( email=common.admin_email, username=common.admin_username )\n admin_user = test_db_util.get_user( common.admin_email )\n assert admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email\n admin_user_private_role = test_db_util.get_private_role( admin_user )", "title": "" }, { "docid": "81852cc10da303a1b94aa776bc31c75f", "score": "0.71802956", "text": "def test_can_create_user(self):\n self.client.force_authenticate(user=self.authenticated_user)\n response = self.client.post(\n '/users/', {\n 'email': '[email protected]',\n 'password': 'testpassword'\n }, format='json'\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(TimerUser.objects.count(), 2)\n new_user = TimerUser.objects.get(email='[email protected]')\n\n # Test user data\n self.assertEqual(new_user.email, '[email protected]')\n self.assertIsNone(new_user.display_name)\n self.assertEqual(new_user.get_full_name(), 'testcreate')\n self.assertEqual(new_user.get_short_name(), 'testcreate')\n self.assertEqual(new_user.is_active, True)\n self.assertEqual(new_user.is_admin, False)\n self.assertEqual(new_user.is_staff, False)\n\n # Try to login with the user.\n self.assertTrue(\n authenticate(email='[email protected]', password='testpassword'),\n \"Could not authenticate new user\"\n )", "title": "" }, { "docid": "f0fa4f9181e412ccd094cbc58c9909aa", "score": "0.7173492", "text": "def test_create_user(self):\n pass", "title": "" }, { "docid": "f0fa4f9181e412ccd094cbc58c9909aa", "score": "0.7173492", "text": "def test_create_user(self):\n pass", "title": "" }, { "docid": "1339356bb4842bf8604fe049fca25f2d", "score": "0.7107476", "text": "def test_user_creation(self):\n CustomUser.objects.get(id=1)", "title": "" }, { "docid": "e7d43aafa0eeb3ec548150695d4481c8", "score": "0.70758", "text": "def test_register_an_user(self):\n\n response = self.client.post(\n reverse(\"sign_in\"),\n data={\n \"email\": \"[email protected]\",\n \"username\": \"Hello_test2\",\n \"password1\": \"Globalshoot46\",\n \"password2\": \"Globalshoot46\",\n }\n )\n self.assertEqual(response.status_code, 302)\n users_list = get_user_model().objects.all()\n self.assertEqual(users_list.count(), 2)", "title": "" }, { "docid": "fdf62c636d36fea34261a5d4481f24f5", "score": "0.7072557", "text": "def test_create_user_robot(self):\n pass", "title": "" }, { "docid": "8ce763c6321c3f69fcc19cc602bd21e5", "score": "0.7018191", "text": "def signuptest(self):\n\n self.assertEqual(self.new_user.login,\"migidza-andisi\")\n self.assertEqual(self.new_user.password,\"shay123\")", "title": "" }, { "docid": "41d2cb0085b14ab5d95c632132014841", "score": "0.7015016", "text": "def test_get_sentinel_user(self):\n sentinel_user = get_user_model().objects.create_user(email=\"[email protected]\", password=\"1234\")\n sentinel_user2 = get_sentinel_user()\n self.assertEqual(sentinel_user.id, sentinel_user2.id)", "title": "" }, { "docid": "6727167f012e7ddfaafabe6004df5a58", "score": "0.7004389", "text": "def test_post_existing_user(self):\n get_user_model().objects.create_user(self.email, email=self.email)\n response = self.client.post(\"/login/\", {\"next\": \"\", \"email\": self.email})\n self.assertIsNotNone(get_user_model().objects.get(username=self.email))\n self.assertIn(\n f\"Your login link has been sent to {self.email}\",\n response.context[\"content\"],\n )", "title": "" }, { "docid": "1d80609164f01a488d080fb959ec8b09", "score": "0.6974849", "text": "def test_save_user(self):\n\n self.new_user.create_user()\n self.assertEqual(len(Users.user_info),1)", "title": "" }, { "docid": "83e73d16d2a07ea71c9ace97d04ef49d", "score": "0.6972077", "text": "def test_users_1(self):\n self.login(USERNAME, PASSWORD)\n rv = self.app.post('/admin/users/new/', data=self.user_data)\n assert not 'Error 400' in rv.data\n assert not 'Error 500' in rv.data", "title": "" }, { "docid": "c26cbdfc2b31ca57bb807dc267404ff6", "score": "0.6967814", "text": "def test_user_built_in_adds_new_user(self):\n test_user2 = User.objects.last()\n example2 = Profile(\n website=\"www.chelseadole.com\",\n fee=\"1.00\",\n camera=\"Canon\",\n bio=\"My bio\",\n phone=1069147021,\n user=test_user2\n )\n test_user2.profile = example2\n self.assertEqual(example2.camera, \"Canon\")\n self.assertEqual(test_user2.profile.camera, \"Canon\")\n self.assertEqual(example2.user.username, User.objects.last().username)", "title": "" }, { "docid": "32f887ce58acf326199896855cbb4de7", "score": "0.69650054", "text": "def test_post_duplicate_email(self):\n get_user_model().objects.create_user(self.email, email=self.email)\n get_user_model().objects.create_user(\"foobar\", email=self.email)\n with patch(\"django.conf.settings.CREATE_NEW_USERS\", False):\n response = self.client.post(\"/login/\", {\"next\": \"\", \"email\": self.email})\n self.assertIn(\n f\"There are multiple users associated with {self.email}\",\n response.context[\"error\"],\n )", "title": "" }, { "docid": "43c4395ad5b4f5c2a89940894d38eb8d", "score": "0.6964775", "text": "def test_add_user(self, client):\n for username, password in (user for user in USERS['to_create']):\n r = client.api.add_user(username, password)\n assert r.get('status') == 'success'", "title": "" }, { "docid": "6fa7433fc6b7a565dc87f23474193bd2", "score": "0.69561434", "text": "def test_save_login(self):\n self.new_user.save_login()\n self.assertEqual(len(User.user_list,),1)", "title": "" }, { "docid": "a27222f6a0ae7d93e7ced4cfa2ac841b", "score": "0.6947998", "text": "def test_create_user(self):\n session.clear()\n response = self.client.post(\n url_for('users'),\n data={\n 'username': 'test user',\n 'email': '[email protected]',\n 'password': '123456'\n }\n )\n self.assertEqual(response.status_code, 201)\n self.assertIn(b'User created successfully', response.data)\n self.assertEqual(db.session.query(User).count(), 2)", "title": "" }, { "docid": "18fb30bb4d63dc4e875750c43fa9fb42", "score": "0.6918078", "text": "def test_createUsers():\n print(\"Executing test_createUsers...\")\n users_stash = local.stash.UsersStash()\n\n helper.TEST_USER_01_UID = users_stash.createUser(helper.TEST_USER_01_FIRST_NAME,\n helper.TEST_USER_01_LAST_NAME,\n helper.TEST_USER_01_NICK_NAME,\n helper.TEST_USER_01_PASSWORD)\n nose.tools.assert_not_equal(helper.TEST_USER_01_UID, None)\n\n helper.TEST_USER_02_UID = users_stash.createUser(helper.TEST_USER_02_FIRST_NAME,\n helper.TEST_USER_02_LAST_NAME,\n helper.TEST_USER_02_NICK_NAME,\n helper.TEST_USER_02_PASSWORD)\n nose.tools.assert_not_equal(helper.TEST_USER_02_UID, None)\n\n helper.TEST_USER_03_UID = users_stash.createUser(helper.TEST_USER_03_FIRST_NAME,\n helper.TEST_USER_03_LAST_NAME,\n helper.TEST_USER_03_NICK_NAME,\n helper.TEST_USER_03_PASSWORD)\n nose.tools.assert_not_equal(helper.TEST_USER_03_UID, None)", "title": "" }, { "docid": "d4188a0f75b8894d1e1ccb3be5abad6d", "score": "0.69100714", "text": "def setUp(self):\n self.admin = User.objects.create_superuser(\n username='admin',\n email='[email protected]',\n password='Admin74940',\n role=1,\n )\n\n self.user_sales = User.objects.create_user(\n username='sales',\n email='[email protected]',\n password='Sales74940',\n role=2\n )\n\n self.support = User.objects.create_user(\n username='support',\n email='[email protected]',\n password='Support74940',\n role=3,\n )", "title": "" }, { "docid": "b11400f23e1e4a6053cf8a59909abc24", "score": "0.69059825", "text": "def create_users():\n return 1", "title": "" }, { "docid": "6e0565c55f0b9dd658397383ecc6865d", "score": "0.68834203", "text": "def test_user_session(self):\n pass", "title": "" }, { "docid": "5aaf11c57388ecd547ccca130ccdb1ac", "score": "0.68781376", "text": "def test_signin(self):\n\n test_user = User.authenticate(self.user1.username, USER1_DATA[\"password\"])\n \n self.assertTrue(test_user)", "title": "" }, { "docid": "733c7f5892ca194614476d79ba383ff3", "score": "0.686634", "text": "def test_save_multiple_users(self):\n self.newuser.save_user()\n testuser= User(\"TestFirst\",\"TestLast\",\"TestUsername\",\"TestPassword\")\n testuser.save_user()\n self.assertEqual(len(User.userslist),2)", "title": "" }, { "docid": "ff35b430d69ca730578524139c9a4520", "score": "0.68612176", "text": "def test_existing_user(self):\n user = User.objects.create_user(username='test', password='test', email='')\n social_user = UserSocialAuth.objects.create(\n user=user, provider='lastfm', uid='1000002'\n )\n data = {'token': 'FAKEKEY'}\n response = self.client.get(self.complete_url, data)\n self.assertRedirects(response, DEFAULT_REDIRECT)", "title": "" }, { "docid": "37f3bd1c9bf34ef0c763d2c25687d84c", "score": "0.6839594", "text": "def test_dashboard_user(self):\n # ----------- GET NOT AUTHENTIFICATED ----------- #\n response = self.client.get(\"/accounts/dashboard/\")\n self.assertEqual(response.status_code, 302)\n\n # ----------- GET AUTHENTIFICATED ----------- #\n self.client.force_login(User.objects.get_or_create(username=\"roger\")[0])\n response = self.client.get(\"/accounts/dashboard/\")\n # self.assertEqual(response.status_code, 200)", "title": "" }, { "docid": "f083eaa789f8335780851c1285859624", "score": "0.68392265", "text": "def test_create_user(self):\n\n response = self.client.post(self.signup_url, data=self.data)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(User.objects.count(), 2)\n\n self.assertEqual(response.data[\"username\"], response.data[\"username\"])\n self.assertNotIn(\"password\", response.data)", "title": "" }, { "docid": "b5c8d73e617cb9016f2ae0742059bbaa", "score": "0.6831231", "text": "def testCreateUser(self):\n err2, user2 = UserManager.createUser(\"test2\", \"aaaa\")\n self.assertEquals(err2, \"succeed\")", "title": "" }, { "docid": "8a8bae908a398303fde2bb3189447c59", "score": "0.68221027", "text": "def get_user_if_registered_success(self):\n self.user.create_user(self.user_data)\n self.assertDictContainsSubset(self.user_data, self.user.get_user(1))", "title": "" }, { "docid": "5286065f1de531871327a958baba72fc", "score": "0.67968404", "text": "def test_create_user(self):\n assert self.test_user.create_user(\n \"test\", \"[email protected]\", \"male\", \"1234\") == 1\n assert self.test_user.create_user(\n \"testab\", \"[email protected]\", \"male\", \"1234\") == 0", "title": "" }, { "docid": "7161c222eef49943232e6efb963a0152", "score": "0.6794734", "text": "def test_user_add(self):\n pass", "title": "" }, { "docid": "26c75ed82858eb70df9a8262d45def3c", "score": "0.67945117", "text": "def test_main_with_users(self):\n add_user('abel', '[email protected]')\n add_user('fredy', '[email protected]')\n with self.client:\n response = self.client.get('/')\n self.assertEqual(response.status_code, 200)\n self.assertIn(b'Todos los usuarios', response.data)\n self.assertNotIn(b'<p>No hay usuarios!</p>', response.data)\n self.assertIn(b'abel', response.data)\n self.assertIn(b'fredy', response.data)", "title": "" }, { "docid": "6691e318fa3da79f9be49d9f002cf4da", "score": "0.6792554", "text": "def test_create_user_profile(self):\n pass", "title": "" }, { "docid": "8704581226d178852ae40365d9fe58b6", "score": "0.6775212", "text": "def test_2_1_create_same(self):\n user = User(username = \"test_username\",\n password = \"test_password\",\n first_name = \"test_first_name\",\n last_name = \"test_last_name\") \n \n result = user.create()\n\n self.assertIsNone(user.id, \"User ID must be None\")\n self.assertEqual(result, constants.FAILURE)", "title": "" }, { "docid": "c99a0c6b8e5593da9343e57c3bc63cd9", "score": "0.6774878", "text": "def test_post_register_auto_login(self):\n response = self.client.post(\"/accounts/register/\",\n {\"username\":\"mike\",\n \"email\":\"[email protected]\",\n \"password1\":\"mikepass\",\n \"password2\":\"mikepass\"},\n follow=True)\n\n users = [u for u in models.User.objects.all()]\n self.assertEqual(len(users), 1, \"Successful registration must create a user\")\n u = users[0]\n self.assertEqual(u.username, \"mike\")\n\n self.assertEqual(response.status_code, 200)\n self.assertTrue(self.client.session.has_key(SESSION_KEY),\n \"After registration the user should be logged in.\")\n self.assertEqual(self.client.session[SESSION_KEY],\n u.id)", "title": "" }, { "docid": "1124f1b46bd35a886d3b300e52c4cb1e", "score": "0.6768918", "text": "def create_dummy_user(self):\n username = \"jsmith\"\n email = \"[email protected]\"\n user = User.objects.create_user(username, email, \"test123\")\n return (user, username, email)", "title": "" }, { "docid": "30628f0bfc49e6fa109aa42a8f51bfae", "score": "0.6768249", "text": "def test_finalcheck_different_user(self):\n\n other_user = ArtaUserFactory()\n # Let the registration updated_at be the largest of all timestamps, to make an implementation that just takes\n # the max timestamp fail.\n self.reg.save()\n\n response = self.client.get(self.final_check_url)\n self.client.logout()\n self.client.force_login(other_user)\n self.assertCache(response, changed=True)", "title": "" }, { "docid": "c033c323701bb77bc1b64bd306edcefa", "score": "0.6756094", "text": "def test_login_user_exists(self):\n response = self.app.test_client().post(\n \"/auth/signup\",\n data=dict(\n username=\"helloworld\",\n email=\"[email protected]\",\n password=\"topsecret\",\n password2=\"topsecret\",\n ),\n follow_redirects=True,\n )\n\n self.assertEqual(response.status_code, 200)\n\n response = self.app.test_client().post(\n \"/auth/login\",\n data=dict(email=\"[email protected]\", password=\"topsecret\"),\n follow_redirects=True,\n )\n\n self.assertEqual(response.status_code, 200)\n self.assertTrue(b\"Sign Out\" in response.data)\n self.assertTrue(b\"Signed in as helloworld\" in response.data)", "title": "" }, { "docid": "6cfe7efa1eaee28e7165566e62f4ffb9", "score": "0.67556167", "text": "def test_create_user_if_already_registred_false(self):\n self.user.create_user(self.user_data)\n res = self.user.create_user(self.user_data)\n self.assertEqual(res.get('Message'), 'That user already exists')", "title": "" }, { "docid": "286b25bd3abf1ff5f8fbc17c4742851a", "score": "0.67554635", "text": "def test_login_user(self):\n response = self.tester.post('/api/v1/auth/signup', data=self.user)\n self.assertEqual(201, response.status_code)\n self.assertIn('User created successfully', str(response.data))\n response = self.tester.post('/api/v1/auth/login', data=self.user)\n self.assertEqual(200, response.status_code)\n self.assertIn('You are successfully logged in', str(response.data))\n\n \"\"\"test login for non existing user\"\"\"\n response = self.tester.post('/api/v1/auth/login', data={\n 'username': 'lugada', 'email': '[email protected]',\n 'password': 'yes'\n })\n self.assertEqual(401, response.status_code)\n self.assertIn('Please Create an account', str(response.data))\n\n \"\"\"test login with incorrect credentials\"\"\"\n response = self.tester.post('/api/v1/auth/login', data={\n 'username': 'joshua', 'password': 'yes456'\n })\n self.assertEqual(401, response.status_code)\n self.assertIn('username or password is incorrect', str(response.data))", "title": "" }, { "docid": "cf93645ebaf648d8443890afbe19bea5", "score": "0.6745831", "text": "def test_users_profile_was_created_with_new_user(self):\n profile = UserProfile.objects.get(user__username=self.username)\n self.assertEqual(profile.user.username, self.username)", "title": "" }, { "docid": "4a90ed29e6112109107f8e86b2bf8d3b", "score": "0.67332137", "text": "def test_login(self):\n with test_database(self.TEST_DB, (User,)):\n self.app.post('/register', data=self.data)\n rv = self.app.post('/login', data=self.login)\n self.assertEqual(rv.status_code, 302)\n with self.app.session_transaction() as sess:\n assert 'user_id' in sess", "title": "" }, { "docid": "5620557faf197c1825fdc679597f57b7", "score": "0.6731318", "text": "def test_check_user(self):\n self.new_user = User(\"Anthony\", \"Mu\\tu\\ku\",\"K8ddj6m2l\")\n self.new_user.save_user()\n user2 = User(\"Jymal\", \"An\\tho\\ny\", \"K8ddj6m2l\")\n user2.save_user()\n\n for user in User.users_list:\n if user.first_name == user2.first_name and user.password == user2.password:\n current_user = user.first_name\n return current_user\n\n self.assertEqual(current_user,Credential.check_user(user2.password,user2.first_name))", "title": "" }, { "docid": "239242738d08bf8c7604c8d731bcfe61", "score": "0.673028", "text": "def test_create_new_user(self):\n user = self.usermanager.create_user(\n '[email protected]',\n 'a;d+-394hasldf0)'\n )\n self.assertFalse(user.is_superuser)\n self.assertFalse(user.is_staff)", "title": "" }, { "docid": "a6c2b3ca78ad574a5ca4b37201fe97d3", "score": "0.67283267", "text": "def test_superadmin(self):\n valid_sid = self.session.sid\n name = str(getRandomBytes())\n valid_pld = {\"name\": name,\n \"mail\": \"mail@mynewuser\",\n \"password\": \"mynewuser\"}\n\n # first we create that new user\n uid = new_user(self.ccd, valid_sid, valid_pld)\n self.assertIsInstance(uid, int)\n\n # second, we check for the user to be in the database\n uid2 = User.by_name(self.ccd._db.conn, name).uid\n self.assertEqual(uid, uid2)", "title": "" }, { "docid": "8d799ab3d870518d7cea29b4965e46ab", "score": "0.6727033", "text": "def testLogins(self):\n self.failUnlessEqual(self.client.login(username=self.user.username,password='top_secret'),True) # Checcks whether login is successful.\n self.client.logout()\n logintime_list=TimeStamp.objects.filter(user=self.user)\n self.assertIs( len(logintime_list), 1) # Checks whether a timeeStamp is creatd for the login", "title": "" }, { "docid": "b9575bd68f281e692c0f78773f961e9b", "score": "0.67246807", "text": "def init():\n if services.get_all_users().count() == 0:\n services.save_user(user_1())\n services.save_user(user_2())", "title": "" }, { "docid": "d5a0fc28ac0e75eb95f430a385e12569", "score": "0.67184937", "text": "def test_profile_creation(self):\n # there are no users\n assert len(User.objects.all()) == 0\n assert len(Profile.objects.all()) == 0\n\n # the creation of a user triggers the creation of a profile\n user = User.objects.create(username='test', password='test')\n assert len(User.objects.all()) == 1\n assert len(Profile.objects.all()) == 1\n assert Profile.objects.all()[0].user == user\n\n # saving again the user does not create another profile\n user.first_name = 'the'\n user.save()\n assert len(User.objects.all()) == 1\n assert len(Profile.objects.all()) == 1", "title": "" }, { "docid": "a095aa8de8020bcd00e6da82bd2ad454", "score": "0.67166775", "text": "def setUp(self):\n self.client = Client()\n self.admin_user = get_user_model().objects.create_superuser(\n email='[email protected]',\n password='pass123'\n )\n self.client.force_login(self.admin_user)\n self.user = get_user_model().objects.create_user(\n email='[email protected]',\n password='pass123',\n name='Test user full name'\n )", "title": "" }, { "docid": "10866ebee3a02a63ad2caa8de092ff40", "score": "0.67146516", "text": "def test_multiple_success(self):\n user_names = [\"foo\", \"bar\", \"baz\"]\n\n for user_name in user_names:\n user = UserFactory.create(username=user_name, is_active=True)\n user.profile.email_optin = True\n user.profile.save()\n UserSocialAuthFactory.create(user=user, provider='not_edx')\n for _ in range(TOTAL_PROGRAMS):\n ProgramEnrollmentFactory.create(user=user)\n\n assert user.is_active is True\n assert user.profile.email_optin is True\n assert UserSocialAuth.objects.filter(user=user).count() == 1\n assert ProgramEnrollment.objects.filter(user=user).count() == TOTAL_PROGRAMS\n\n self.command.handle(\"retire_users\", users=user_names)\n\n for user_name in user_names:\n user = User.objects.get(username=user_name)\n assert user.is_active is False\n assert user.profile.email_optin is False\n assert UserSocialAuth.objects.filter(user=user).count() == 0\n assert ProgramEnrollment.objects.filter(user=user).count() == 0", "title": "" }, { "docid": "72f0f6dbba276d756edefbe682d4490e", "score": "0.67108566", "text": "def test_multiple():\n # reset workspace\n requests.post(URL_RESET)\n user_data1 = create_data('great','evenbetter','[email protected]','asaaaaaaadfg')\n user_data2 = create_data('nice', 'evennicer', '[email protected]','sadjaaaawqnd')\n user1 = create_user(user_data1)\n #Query string for user 1\n user2 = create_user(user_data2)\n query_string1 = urllib.parse.urlencode({\n 'u_id' : user1['u_id'],\n 'token': user1['token']\n })\n query_string2 = urllib.parse.urlencode({\n 'u_id': user2['u_id'],\n 'token': user2['token']\n })\n #Users dictionary from perspective of user 1\n\n #Dictionary for the first user\n user1_info = json.load(urllib.request.urlopen(f\"{BASE_URL}/user/profile?{query_string1}\"))\n #Dictionary for the second user\n user2_info = json.load(urllib.request.urlopen(f\"{BASE_URL}/user/profile?{query_string2}\"))\n user1_dictionary = json.load(urllib.request.urlopen(f\"{BASE_URL}/users/all?{query_string1}\"))\n user2_dictionary = json.load(urllib.request.urlopen(f\"{BASE_URL}/users/all?{query_string2}\"))\n assert user1_dictionary == {\n 'users': [\n user1_info['user'],\n user2_info['user']\n ]\n }\n assert user2_dictionary == {\n 'users': [\n user1_info['user'],\n user2_info['user']\n ]\n }", "title": "" }, { "docid": "901599d6fa623e76ddb4bd3c665c2210", "score": "0.67098284", "text": "def test_user_create_duplicate(self):\n C.user.create('bill', 'pass1234', is_admin=False)\n with pytest.raises(FailedAPICallException):\n C.user.create('bill', 'pass1234', is_admin=False)", "title": "" }, { "docid": "e1c6530c922692fb1b22492a4b6a39f4", "score": "0.6702927", "text": "def test_with_password_and_display_name(self):\n self.client.force_authenticate(user=self.authenticated_user)\n data = {'email': '[email protected]', 'display_name': 'The Ultimate Tester'}\n response = self.client.post('/users/', data, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n new_user = TimerUser.objects.get(email='[email protected]')\n\n self.assertEqual(TimerUser.objects.count(), 2)\n self.assertEqual(new_user.display_name, 'The Ultimate Tester')", "title": "" }, { "docid": "8cd004aeb779c4296bc20e84375747ba", "score": "0.6702477", "text": "def create_testuser(app, created_models, verbosity, **kwargs):\n pass\n # try:\n # auth_models.User.objects.get(username='mamunt')\n # except auth_models.User.DoesNotExist:\n # print 'Creating superuser, login: mamunt, password: pHecAS7s'\n # assert auth_models.User.objects.create_superuser(\n # 'mamunt',\n # '[email protected]',\n # 'pHecAS7s'\n # )\n # else:\n # print 'Test user already exists'", "title": "" }, { "docid": "0a3828bc89005313395660dd65954faf", "score": "0.6701362", "text": "def test_users_create(self):\n client = app.test_client()\n result = client.post(\n '/users',\n data={\n 'username': 'Jane',\n 'password': 'Smith',\n 'first_name': 'Jane',\n 'last_name': 'Smith',\n 'img_url': 'http://some-image.com/'\n },\n follow_redirects=True)\n self.assertIn(b'<h2>Current user ZAP users:</h2>', result.data)", "title": "" }, { "docid": "a50496d8ecffc364fa85e0c0023b1aff", "score": "0.6692581", "text": "def test_user_auth(self):\n self.new_user.save_login()\n test_user=User(\"[email protected]\",\"Theorde\",\"Mosalah\",\"1234password\")\n test_user.save_login()\n self.assertTrue(self.new_user.user_auth(\"Mosalah\",\"1234password\"))", "title": "" }, { "docid": "484c621765f2f643defc7fb78f2c116c", "score": "0.6689732", "text": "def test_any_user_fallback(self):\n user = make(get_user_model(), is_staff=False)\n other_user = make(get_user_model(), is_staff=False)\n stdout = tempfile.TemporaryFile(mode=\"w+\")\n call_command(\"login\", stdout=stdout)\n stdout.seek(0)\n content = stdout.read()\n # the first staffer by PK should be the one used\n self.assertIn(\"/cloak/login/%d\" % user.pk, content)", "title": "" }, { "docid": "8d440d01c06c852bceb87ed2db8c0485", "score": "0.6688257", "text": "def setUp(self):\n User.objects.create(username='john_smith', is_active=False)\n User.objects.create(username='tom_thompson',\n first_name='Tom', is_active=True)", "title": "" }, { "docid": "1bd639c52ef66f1c5f610c6062c414e2", "score": "0.6682655", "text": "def test_get_single_user(self):\n\n data = self.data.copy()\n \n new_user_response = self.client.post(self.users_url + \"/?format=json\", data=data, format='json')\n url = urlsplit(new_user_response.data['url'])\n user_pk = url.path.split('/')[-2]\n path = self.users_url + '/' + user_pk + '/'\n \n self.assertEqual(new_user_response.status_code, 201)\n self.assertTrue(self.client.login(email='[email protected]', password='password'))\n\n response = self.client.get(\n path,\n content_type='application/json'\n )\n\n self.assertEqual(response.status_code, 200)", "title": "" }, { "docid": "57f2799ad4e2ecbd5678a33765829c68", "score": "0.6682053", "text": "def test_create_existing_user(self):\n create_user(**self.payload)\n res = self.client.post(CREATE_USER_URL, self.payload)\n userList = get_user_model().objects.filter(email=self.payload['email'])\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(len(userList), 1)", "title": "" }, { "docid": "2cc3bba014ad5284b1adbb0d1d82a6d1", "score": "0.66765857", "text": "def setUp(self):\n self.client = Client()\n self.admin_user = get_user_model().objects.create_superuser(\n email='[email protected]',\n password='password123'\n )\n self.client.force_login(self.admin_user)\n self.user = get_user_model().objects.create_user(\n email='[email protected]',\n password='password123',\n name='testfirstname testlastname'\n )", "title": "" }, { "docid": "f857dca4494147df42491f05e90ce607", "score": "0.6676334", "text": "def test_create_user_credentials_totp(self):\n pass", "title": "" }, { "docid": "e55fe8eba80a148c3f58b9443f88ca4e", "score": "0.66762435", "text": "def create_test_user(self):\n res = self.client.post('api/v0.1/users',\n data=json.dumps(self.test_user),\n content_type='application/json')\n return res", "title": "" }, { "docid": "5cf2379a678ee7c5d30790f313595876", "score": "0.66740835", "text": "def test_create_user_successfully(self):\n\n body = UserPayloads.new_user('MyBestUser', 'Male')\n resp = self.user.create_user(body)\n Poc_Asserts.check_status_code(200, resp.status_code)\n resp_body = resp.json()\n Poc_Asserts.check_response_contains_values_from_dict(body, resp_body['data'])\n self.usr_id = resp_body['data']['id']\n print('✓ user created -> {}'.format(self.usr_id))\n resp = self.user.get_user_by_id(self.usr_id)\n Poc_Asserts.check_status_code(200, resp.status_code)\n resp_body = resp.json()\n Poc_Asserts.check_response_contains_values_from_dict(body, resp_body['data'])\n print('✓ got user -> {}'.format(self.usr_id))", "title": "" }, { "docid": "6e59e4d3f3d8342ca8daa30c44124cac", "score": "0.6673999", "text": "def setUp(self):\n self.client = Client()\n self.admin_user = get_user_model().objects.create_superuser(\n email=\"[email protected]\",\n password=\"test123\"\n )\n # log a user in with django authentication\n self.client.force_login(self.admin_user)\n self.user = get_user_model().objects.create_user(\n email=\"[email protected]\",\n password=\"test123\",\n name='Test USER full name'\n )", "title": "" }, { "docid": "5b631e31e0b19ec07822a07a1d3b1ce9", "score": "0.66701925", "text": "def test_create_new_user(self):\n resp_obj = UserEndpoint.create_user(self.users_url, payload=self.create_payload)\n self.assertEqual(resp_obj['resp_obj'].status_code, 201, 'User creation is not successfull. '\n 'Please validate url and data format')\n self.assertEqual(resp_obj['resp_data']['name'], self.create_payload['name'],\n 'user name in request and response is not matching with each other.')\n self.assertEqual(resp_obj['resp_data']['job'], self.create_payload['job'],\n 'user job in request and response is not matching with each other.')\n self.assertTrue(resp_obj['resp_data']['id'].isdigit(), 'user id does not contains only numbers.')", "title": "" }, { "docid": "877cd98b54529510906f6b4b43540d52", "score": "0.66674954", "text": "def test_login(self):\n existing_user = auth.authenticate(username='org', password='ramramtau')\n non_existing_user = auth.authenticate(username='orga', password='ramramtau')\n\n self.assertNotEqual(existing_user, None)\n self.assertEqual(non_existing_user, None)", "title": "" }, { "docid": "3cba832806a21e31c9e307676c367c43", "score": "0.66529083", "text": "def test_user_registration_with_exist_user(self):\n # create a user which will have the same username\n exist_user = User.objects.create(username='test')\n data = dict(username='test',\n email='[email protected]',\n password1='test2020',\n password2='test2020')\n response = self.client.post('/rest-auth/registration/', data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n # The second user should not be created\n self.assertEqual(User.objects.count(), 1)\n self.assertIn(\"A user with that username already exists.\", response.content.decode('utf-8'))", "title": "" }, { "docid": "ed30b258eb7c10cef30ac2ff2e3fba86", "score": "0.66529053", "text": "def setUp(self):\n\n User.query.delete()\n Message.query.delete()\n Follows.query.delete()\n\n user1 = User.signup(**USER1_DATA)\n user2 = User.signup(**USER2_DATA)\n \n db.session.add_all([user1, user2])\n db.session.commit()\n\n self.client = app.test_client()\n # set it to the user's id\n self.user1 = user1\n self.user2 = user2", "title": "" }, { "docid": "3f1f0f51801ed0447029420442963ce5", "score": "0.6651305", "text": "def test_createuser_post(self):\n data = {\n \"username\": \"testusername333\",\n \"first_name\": \"testfirstname\",\n \"last_name\": \"testlastname\",\n \"project\": \"testproject\",\n \"component\": \"testcomponent\",\n \"password\": \"testpassword\",\n \"is_admin\": True\n\n }\n url = '/api/v1/userservice/create/'\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(TimeTrackerUser.objects.count(), 2)", "title": "" }, { "docid": "6512115cd5151017faa4201e48d37c4a", "score": "0.66456485", "text": "def setUp(self):\n self.new_user = User(\"Derrick-Nyongesa\", \"DN17w9S\")", "title": "" }, { "docid": "b568a9f4aac005c31ad246715718b483", "score": "0.66440225", "text": "def test_user_multi_session(self):\n lion = MediaUser.objects.get(username=\"lion\")\n\n sess1 = UserSession.objects.create(session_id=_MOCK_SESSION_ID1,\n user_ref=lion)\n sess2 = UserSession.objects.create(session_id=_MOCK_SESSION_ID2,\n user_ref=lion)\n\n svc = UserService.objects.create(user_ref=lion, user_session=sess1,\n service_id=None, enabled=True,\n last_report_time=(\n datetime.datetime.now()),\n auto_restart=True)\n sess1.services.append(svc)\n sess2.services.append(svc)\n\n # Check the session count for the user\n sessions = UserSession.objects.filter(user_ref=lion)\n self.assertTrue(len(sessions), len((sess1, sess2)))", "title": "" }, { "docid": "29f4fd88839a62d1aa222deb8eb9e03a", "score": "0.6640403", "text": "def test_can_create_account(self):\r\n url = reverse('user-list')\r\n data = {'username': 'piebe', 'password': 'bliksem'}\r\n response = self.client.post(url, data)\r\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\r\n self.assertEqual(User.objects.count(), 1)\r\n self.assertEqual(User.objects.get().username, 'piebe')", "title": "" }, { "docid": "a3fbb8ce20498be4645e846d54e71d00", "score": "0.6627961", "text": "def test_user(self):\n pass", "title": "" }, { "docid": "0f5ba5bcac09c8b727ec986c2ee1df55", "score": "0.66252494", "text": "def setUp(self):\n\n # Create a user\n test_user1 = User.objects.create_user(\n username=\"testuser1\", password=\"1X<ISRUkw+tuK\"\n )\n test_user2 = User.objects.create_user(\n username=\"testuser2\", password=\"2HJ1vRV0Z&3iD\"\n )\n\n test_user1.save()\n test_user2.save()\n\n # Give test_user2 permission to renew books.\n permission = Permission.objects.get(name=\"Set book as returned\")\n test_user2.user_permissions.add(permission)\n test_user2.save()", "title": "" }, { "docid": "0f5ba5bcac09c8b727ec986c2ee1df55", "score": "0.66252494", "text": "def setUp(self):\n\n # Create a user\n test_user1 = User.objects.create_user(\n username=\"testuser1\", password=\"1X<ISRUkw+tuK\"\n )\n test_user2 = User.objects.create_user(\n username=\"testuser2\", password=\"2HJ1vRV0Z&3iD\"\n )\n\n test_user1.save()\n test_user2.save()\n\n # Give test_user2 permission to renew books.\n permission = Permission.objects.get(name=\"Set book as returned\")\n test_user2.user_permissions.add(permission)\n test_user2.save()", "title": "" }, { "docid": "db52797651fb4870826bb9d15b3d51ce", "score": "0.66248846", "text": "def test_save_user(self):\n self.newuser.save_user()\n self.assertEqual(len(User.userslist),1)", "title": "" }, { "docid": "5705bbadf402b6648ca162bfe86baa83", "score": "0.66244245", "text": "def test_new_user(self, check_name_mock, check_id_mock):\n setup_test_environment()\n client = Client()\n payload = {\n 'auth_token': os.environ.get('ADMIN_TOKEN'),\n 'new_user_info': {\n 'username': os.environ.get('LOGIN_USERNAME'),\n 'password': os.environ.get('LOGIN_PASSWORD'),\n 'email': os.environ.get('RESET_EMAIL'),\n 'admin': False,\n 'enable': True,\n 'subenddate': os.environ.get('LOGIN_SUB_END_DATE'),\n 'userid': os.environ.get('LOGIN_USER_ID'),\n 'vineyards': [int(os.environ.get('LOGIN_USER_ID'))],\n },\n }\n response = client.post(\n '/admin/user/new',\n data=json.dumps(payload),\n content_type='application/json'\n )\n new_user_info = payload.get('new_user_info', '')\n user_id = str(new_user_info.get('userid', ''))\n username = str(new_user_info.get('username', ''))\n check_name_mock.assert_called_once_with(username)\n check_id_mock.assert_called_once_with(user_id)\n body = json.loads(response.content.decode('utf-8'))\n self.assertTrue(body is not None)\n self.assertEqual(response.status_code, 200)", "title": "" }, { "docid": "d7896e7a8f8ba2f53e2b356f4bf87e70", "score": "0.6623884", "text": "def test_save_user(self):\n self.new_user.save_user()\n self.assertEqual(len(User.users_list),1)", "title": "" }, { "docid": "fc9a0a6cf05bbfc61f4f4295e4d29609", "score": "0.66201043", "text": "def test_create_with_username(self):\n properties = self.portal.portal_properties.site_properties\n properties.manage_changeProperties(use_email_as_login=True)\n\n user = api.user.create(\n username='chuck',\n email='[email protected]',\n password='secret',\n )\n self.assertEquals(user.getUserName(), '[email protected]')\n\n properties = self.portal.portal_properties.site_properties\n properties.manage_changeProperties(use_email_as_login=False)\n\n user = api.user.create(\n username='chuck',\n email='[email protected]',\n password='secret',\n )\n self.assertEquals(user.getUserName(), 'chuck')", "title": "" }, { "docid": "3439190e5a02d4ce9ec793e3b297b4fc", "score": "0.6618023", "text": "def test_authenticated_user_found(self):\n\t\t# login user\n\t\tself.login_user()\n\t\t# get the user object\n\t\tuser_object = User.objects.get(username=self.user.get('username', None))\n\t\t# create a project\n\t\tself.create_project(user_object)\n\t\t# get the project object\n\t\tproject_object = Project.objects.get(project_name=self.project_name)\n\t\t# create user2\n\t\tuser2 = {\n\t\t\t'username': fake.user_name(),\n\t\t\t'email': fake.email(),\n\t\t\t'password': fake.password()\n\t\t}\n\t\tself.client.post('/api/user/', data=user2)\n\t\t# create a project invite to user2\n\t\turl = reverse('project-invites-list')\n\t\tself.client.post(\n\t\t\turl,\n\t\t\tdata={\n\t\t\t\t'email': user2.get('email', None),\n\t\t\t\t'project': project_object.project_id\n\t\t\t}\n\t\t)\n\t\t# retrieve project invite object\n\t\tpi = ProjectInvite.objects.get(email=user2.get('email', None))\n\t\turl += pi.invite_code + '/'\n\t\t# user 2 responds to the invite\n\t\tresponse = self.client.get(url)\n\t\tself.assertTrue(\n\t\t\t'User exists in the system' in response.data.get('message')\n\t\t)\n\t\tself.assertEqual(response.status_code, 200)\n\t\tself.assertEqual(response.status_text, 'OK')", "title": "" }, { "docid": "1915e12d641075fd19d9a6b981de96ad", "score": "0.6616181", "text": "def test_unauthenticated_user_found(self):\n\t\t# login user\n\t\tself.login_user()\n\t\t# get the user object\n\t\tuser_object = User.objects.get(username=self.user.get('username', None))\n\t\t# create a project\n\t\tself.create_project(user_object)\n\t\t# get the project object\n\t\tproject_object = Project.objects.get(project_name=self.project_name)\n\t\t# create user2\n\t\tuser2 = {\n\t\t\t'username': fake.user_name(),\n\t\t\t'email': fake.email(),\n\t\t\t'password': fake.password()\n\t\t}\n\t\tself.client.post('/api/user/', data=user2)\n\t\t# create a project invite to user2\n\t\turl = reverse('project-invites-list')\n\t\tself.client.post(\n\t\t\turl,\n\t\t\tdata={\n\t\t\t\t'email': user2.get('email', None),\n\t\t\t\t'project': project_object.project_id\n\t\t\t}\n\t\t)\n\t\t# retrieve project invite object\n\t\tpi = ProjectInvite.objects.get(email=user2.get('email', None))\n\t\turl += pi.invite_code + '/'\n\t\t# logout self.user1\n\t\tself.client.get('/api/api-auth/logout/')\n\t\t# user 2 responds to the invite\n\t\tresponse = self.client.get(url)\n\t\tself.assertTrue(\n\t\t\t'User exists in the system' in response.data.get('message')\n\t\t)\n\t\tself.assertEqual(response.status_code, 200)\n\t\tself.assertEqual(response.status_text, 'OK')", "title": "" }, { "docid": "9dfd910fcf8c16e068bade7d2f6c7653", "score": "0.66104096", "text": "def test_main_add_user(self):\n with self.client:\n response = self.client.post(\n '/',\n data=dict(username='abel', email='[email protected]'),\n follow_redirects=True\n )\n self.assertEqual(response.status_code, 200)\n self.assertIn(b'Todos los usuarios', response.data)\n self.assertNotIn(b'<p>No hay usuarios!</p>', response.data)\n self.assertIn(b'abel', response.data)", "title": "" }, { "docid": "17095c22e9a0608552c49042dcc8557f", "score": "0.6607727", "text": "def test_save_user(self):\n self.new_user.save_user()\n self.assertEqual(len(User.user_list),1)", "title": "" }, { "docid": "17095c22e9a0608552c49042dcc8557f", "score": "0.6607727", "text": "def test_save_user(self):\n self.new_user.save_user()\n self.assertEqual(len(User.user_list),1)", "title": "" }, { "docid": "384f3193eee96b958b53a97830352828", "score": "0.66054004", "text": "def test_login_user(self):\n # Register a user to login\n self.client.post('/api/v2/auth/register', data = json.dumps(self.user),\n headers = {'content-type': 'application/json'})\n\n # Login a user\n response=self.client.post(\n '/api/v2/auth/login', data=json.dumps(self.user_login),\n headers={'content-type': 'application/json'})\n # Test if login was successful with right credentials\n self.assertEqual(response.status_code, 200)\n self.assertIn('Successfuly login', str(response.data),\n msg=\"Login successful\")\n self.assertIn('access_token', str(response.data),\n msg=\"Access token issued\")\n\n # Login a user already logged in\n response = self.client.post(\n '/api/v2/auth/login', data=json.dumps(self.user_login),\n headers={'content-type': 'application/json'})\n # Test if login was successful\n self.assertEqual(response.status_code, 200)\n self.assertIn('You are already logged In', str(response.data),\n msg=\"Login successful\")\n\n # Login a user without password\n response = self.client.post(\n '/api/v2/auth/login', data=json.dumps(\n {\"email\": \"[email protected]\"}),\n headers={'content-type': 'application/json'})\n # Test if login was successful with right credentials\n self.assertIn('Enter a valid password', str(response.data),\n msg=\"Login successful\")\n\n # Login a user with wrong credentials\n response = self.client.post(\n '/api/v2/auth/login', data=json.dumps(self.user_wrong_login),\n headers={'content-type': 'application/json'})\n # Test if login was successful with wrong credentials\n self.assertEqual(response.status_code, 401)\n self.assertIn('Invalid email or password',\n str(response.data), msg=\"Login successful\")\n self.assertNotIn('access_token', str(response.data),\n msg=\"Access token not issued\")", "title": "" }, { "docid": "974f7b1f84012b56f0be33834920541e", "score": "0.6599109", "text": "def test_user_profile(self):\n with self.client as c:\n with c.session_transaction() as sess:\n sess[CURR_USER_KEY] = self.user1.username\n\n resp = c.get(f'/users/{self.user1.username}')\n\n self.assertEqual(resp.status_code, 200)\n self.assertIn('<h3>username_1</h3>', str(resp.data))\n self.assertIn('<h5>Decks: 0</h5>', str(resp.data))", "title": "" }, { "docid": "5184e69ac4b45e3dd28b8d848be672c5", "score": "0.659219", "text": "def test_user_creation(self):\n print(\"Test user creation\")\n self.assertTrue(isinstance(self.user, User))\n self.assertTrue(self.user.is_active)\n self.assertFalse(self.user.is_staff, self.user.is_superuser)\n self.assertEqual(self.user.get_user_permissions(), set(), \"No permissions have been set\")\n self.assertEqual(self.user.get_group_permissions(), set(), \"No permissions have been set\")\n self.assertEqual(self.user.get_username(), self.user.username)\n self.assertTrue(self.user.is_authenticated)\n self.assertFalse(self.user.is_anonymous)", "title": "" }, { "docid": "06e91b0d0bec991e1cabf11528d207cb", "score": "0.65911114", "text": "def setUpTestData(cls):\n super(TwoUsersWithCharacterGeneric, cls).setUpTestData()\n # pylint: disable=no-member\n cls.second_user = User.objects.get(username=\"Tim\")", "title": "" }, { "docid": "02dd786b172d306b0b9724e7bcf352c4", "score": "0.6577971", "text": "def test_create_user(self):\n user_utils.create_or_update_user('03ec8318-08ed-4aeb', 'abc', 'SNYK')\n assert user_utils.get_user('03ec8318-08ed-4aeb').snyk_api_token == 'abc'\n assert user_utils.get_user('03ec8318-08ed-4aeb').status == UserStatus.REGISTERED.name", "title": "" }, { "docid": "5c4acebbe0626fe17d12f93e46e55a12", "score": "0.6576675", "text": "def test_login_user(self):\n \n new_user_res = self.client.post(self.users_url + \"/?format=json\", data=self.data, format='json')\n self.assertEqual(new_user_res.status_code, 201)\n\n login_data = {\n \"email\": self.data['email'],\n \"password\": self.data['password']\n }\n login_path = '/api/v1/auth/login/'\n \n # wrong credentials\n login_data2 = login_data.copy()\n login_data2['password'] = 'pass'\n invalid_info_login_res = self.client.post(path=login_path, data=login_data2, format='json')\n self.assertEqual(invalid_info_login_res.data['non_field_errors'][0], \"Unable to log in with provided credentials.\")\n\n # successful login\n res = self.client.post(path=login_path, data=login_data, format='json')\n self.assertTrue(res.data['token'])", "title": "" }, { "docid": "d9e72d8b02cd578e53abc519e1f9bdae", "score": "0.657381", "text": "def test_usuario_login_exitoso(self):\n usuario_activado= User.objects.create_user(username='usuario1', password='prueba123')\n usuario_activado.save()\n resp = self.client.post('/login/', {'username': 'usuario1', 'password': 'prueba123'})\n\n self.assertEqual(resp.status_code, 302)", "title": "" }, { "docid": "7270b1eb0361bcdb497b8dbfb5b961fb", "score": "0.6572068", "text": "def setUp(self):\n\n Follows.query.delete()\n Message.query.delete()\n User.query.delete()\n\n test_user_1 = User.signup(**TEST_USER_DATA_1)\n test_user_2 = User.signup(**TEST_USER_DATA_2)\n db.session.add_all([test_user_1, test_user_2])\n db.session.commit()\n\n self.test_user_1_id = test_user_1.id\n self.test_user_2_id = test_user_2.id\n\n self.client = app.test_client()", "title": "" }, { "docid": "35b076aa5e3a15e3d3cd95d83a1bbe74", "score": "0.65698016", "text": "def test_plaid_2_create_new_user(self):\n\n self.browser.get(os.path.join(self.live_server_url, ''))\n # Sébastien logs in to get access to the admin interface\n self.browser.find_element_by_id(\"login-status\")\n self.browser.find_element_by_id(\"header-connection\").click()\n user_input = self.browser.find_element_by_id(\"id_username\")\n user_input.send_keys('leila')\n # S. enters his password\n user_input_pwd = self.browser.find_element_by_id(\"id_password\")\n user_input_pwd.send_keys('@dmin1234')\n self.browser.find_element_by_xpath('//input[@type=\"submit\"]').click()\n self.browser.find_element_by_id('navbarDropdown').click()\n self.browser.find_element_by_id('new-user').click()\n username_input = self.browser.find_element_by_id('id_username')\n username_input.send_keys('Albert')\n email_input = self.browser.find_element_by_id('id_email')\n email_input.send_keys(\"[email protected]\")\n phone_number = self.browser.find_element_by_id('id_phone_number')\n phone_number.send_keys(\"01 02 03 04 05\")\n team = Select(self.browser.find_element_by_id('id_team'))\n team.select_by_value(\"1\")\n password1_input = self.browser.find_element_by_id('id_password1')\n password1_input.send_keys('test@1234')\n password2_input = self.browser.find_element_by_id('id_password2')\n password2_input.send_keys('test@1234')\n self.browser.find_element_by_xpath('//input[@type=\"submit\"]')", "title": "" }, { "docid": "6c3d648bcf94f7a1d8990034411f68d0", "score": "0.6569665", "text": "def create_users(self, count):\n total_count = 0\n for team in Team.objects.all():\n for i in range(0, count):\n _ = i\n username = \"player%d\" % total_count\n user = User.objects.create_user(username,\n username + \"@test.com\",\n password=\"testuser\")\n user.first_name = username.capitalize()\n user.last_name = \"Test\"\n user.save()\n\n profile = user.get_profile()\n profile.name = username.capitalize()\n profile.team = team\n profile.save()\n\n total_count += 1\n\n self.stdout.write(\"%d test users created.\\n\" % total_count)", "title": "" } ]
e8709b2d66cca368ee1046dd9e976275
get_portfolio_date extracts the list of stock by the date period
[ { "docid": "232d02e0f68a63921ef4f1602c848e54", "score": "0.7507132", "text": "def get_date(self,start_date, end_date, n_threads = 8):\n stockList = self._get_stock_list_multi(n_threads,'get_date', [start_date, end_date])\n self.portfolio = Portfolio(\n stockList,\n time_index='Date',\n symbolIx='symbol'\n )\n return self.portfolio", "title": "" } ]
[ { "docid": "754e943e07f3c2f47d78658c43324332", "score": "0.6763785", "text": "def get_period(self,period, n_threads= 8):\n stockList = self._get_stock_list_multi(n_threads,'get_period', [period])\n self.portfolio = Portfolio(\n stockList,\n time_index='Date',\n symbolIx='symbol'\n )\n return self.portfolio", "title": "" }, { "docid": "edbc5398dadde1cc2ea2a9efac9babf3", "score": "0.65951866", "text": "def generate_portfolio(stock_list,start_date, end_date):\n import pandas as pd\n\n close = get_yahoo_data(stock_list,start_date,end_date).fillna(0)\n position = pd.DataFrame(data=0,index=close.index,columns=close.columns)\n portfolio = pd.Panel({'price':close,'pos':position})\n return portfolio", "title": "" }, { "docid": "c4ebe0c4c220e307930d4f274417b265", "score": "0.6531667", "text": "def get_trading_dates(self):\n return self.get_portfolio_historic_returns().index", "title": "" }, { "docid": "acecc40d010ca5db2069ee288884ec69", "score": "0.6418591", "text": "def _get_portfolio_df(self):\n unique_stock_companies, trade_amount = self._merge_same_stock()\n stock_class_dict = {stock:Stock(stock,self.start_date,self.end_date) for stock in unique_stock_companies} #get valid stock symbols\n \n pricecols = {stock:stock_class.close_price for stock, stock_class in stock_class_dict.iteritems()} # get the closing price of each stock\n closed_price_df = pd.DataFrame(data=pricecols, columns=unique_stock_companies)\n portfolio = closed_price_df * np.array(trade_amount) # multiply each stock's price with its trading volume.\n portfolio_add_sum = portfolio.copy()\n portfolio_add_sum['Portfolio']= portfolio.sum(1) # create a column that adds up all the stock values, which is the portflio value. \n return portfolio_add_sum", "title": "" }, { "docid": "0757ad93e61348ab5f7c46123456189c", "score": "0.6198651", "text": "def get_intra_day(self,start_date, end_date, interval = '1m', n_threads = 8):\n stockList = self._get_stock_list_multi(n_threads,'get_intra_day', [start_date, end_date, interval])\n self.portfolio = Portfolio(\n stockList,\n time_index='Datetime',\n symbolIx='symbol'\n )\n return self.portfolio", "title": "" }, { "docid": "056e924159f17b4400f2213f08ebfd1d", "score": "0.5957589", "text": "def updated_portfolio(self):\n # TODO: build cumulative portfolio\n return self.perf_tracker.get_portfolio(False)", "title": "" }, { "docid": "a429252caeae2b69d92003278c51e24f", "score": "0.5926457", "text": "def get_daily_returns():\n portfolio = request.get_json(force=True)\n start_date = parse_date(request.args.get('start'))\n end_date = parse_date(request.args.get('end'))\n prices_df = prepare_dataframe(portfolio, start_date, end_date)\n performance = compute_daily_returns(prices_df)\n return performance.to_json(orient='index')", "title": "" }, { "docid": "867398be58a58b5f0f870230930021c7", "score": "0.5925239", "text": "def portfolio_eod_series(self) -> PricesSeries:\n end_of_day_date = list(map(lambda x: datetime(x.year, x.month, x.day), self._dates)) # remove time component\n portfolio_timeseries = PricesSeries(data=self._portfolio_values, index=end_of_day_date)\n return portfolio_timeseries", "title": "" }, { "docid": "b36718a814bd93234520d3ab2f172b3c", "score": "0.5924636", "text": "def get_stock_data(x):", "title": "" }, { "docid": "3002f8b8740eec552ebc9388ad41dc4c", "score": "0.5902916", "text": "def get_date(self,start_date,end_date):\n data = self.client.getHistoricalByRange(start_date,end_date)\n data = pd.DataFrame(data.to_records())\n data['Date'] = data.Date.astype(str)\n additionalInfo = self.client.getAdditionalInfo()\n self.stock = Stock(\n data,\n time_index='Date',\n symbol = self.symbol\n )\n return self.stock", "title": "" }, { "docid": "9bf9378244952d6c000bc38aa219efbc", "score": "0.58860785", "text": "def stockprice_range(start, end):\n \n ranges = get_date_range(start, end)\n stock = []\n for date, day in ranges:\n time.sleep(1)\n if day not in [5, 4]: #stock market is closed on fri and sat\n if not stock:\n temp = stock_today(date)\n if temp:\n stock = temp\n else:\n temp = stock_today(date, headers=False)\n if temp:\n stock += temp\n return stock", "title": "" }, { "docid": "7dd87115d3ed101c171b3e2b30138d38", "score": "0.5865342", "text": "def get_portfolio_items(self):\n portfolio = self.get_portfolio_object()\n # only take complete orders\n orders = [order for order in portfolio.orders if order.status == Status.completed]\n # get all traded stocks for stock counting\n stocks_orders = list(set([order.stock_id for order in orders]))\n portfolio = []\n for stock_id in stocks_orders:\n stock_size = self.get_stock_size(stock_id)\n if stock_size > 0:\n signal_id = [order.signal_id for order in orders if order.is_sell == 0 and\n order.stock_id == stock_id][-1]\n portfolio.append({\"stock_id\": stock_id, \"size\": stock_size,\n \"signal_id\": signal_id})\n return portfolio", "title": "" }, { "docid": "ade9747165b1e74859ca2b1b3c5df7c5", "score": "0.5862334", "text": "def describe_portfolio(self):\n portfolio_df = self._get_portfolio_df() \n describe_stat_df = portfolio_df.describe()\n describe_stat_df = describe_stat_df.rename(index = {'count':'trading days'})\n portfolio_weight_df = self._portfolio_weight()\n describe_stat_df.loc['start weight'] = portfolio_weight_df.ix[0]\n describe_stat_df.loc['end weight'] = portfolio_weight_df.ix[-1]\n \n stocks_return = portfolio_df.ix[-1] / portfolio_df.ix[0] -1\n describe_stat_df.loc['total return'] = stocks_return \n return describe_stat_df", "title": "" }, { "docid": "ba3f1252af7a8229a827b238ae9f4e98", "score": "0.58102125", "text": "def get_items_by_date(month, year):", "title": "" }, { "docid": "3ee9dd3546759275ac4159fced9b3d53", "score": "0.57966334", "text": "def get_portfolio_value(self, dates):\n \n u_symb = list(self.symbols)\n for s in self.symbols:\n if self.amount[s] == 0:\n u_symb.remove(s)\n u_symb.remove('Cash')\n \n df_value = pd.DataFrame(index=dates, columns=['Value'])\n df_data = get_data(u_symb, dates)\n \n df_value['Value'] = self.amount['Cash']\n \n for s in u_symb:\n df_value['Value'] += abs(self.amount[s]) * df_data[s] \n \n df_value = df_value.dropna()\n \n return df_value", "title": "" }, { "docid": "892affd9b2087780d25511f68ee4d49e", "score": "0.57779396", "text": "def get_value_on_date(portfolio_data, date):\n value = float(portfolio_data['Cash'])\n for stock in portfolio_data['stocks'].keys():\n stock_data = mongo.db.stocks.find_one({\"_id\": stock})\n if stock_data is not None:\n # find value of share on date\n date_entry = filter(lambda x : x['Date'] == date, stock_data['historical_data'])\n # invalid date entered.\n if date_entry == []:\n return -1\n # multiply number of shares by share price and add to value\n value += (float(date_entry[0]['Close']) * float(portfolio_data['stocks'][stock]))\n return value", "title": "" }, { "docid": "dc78a0f666fcbb772f19c30ad75f27f4", "score": "0.57770294", "text": "def cash_ladder(date,scope,portfolio):\n \n # set some commonly used strings as variables for easier access\n SDATE='settlement_date'\n CCY='instrument_uid'\n QTY='units'\n TYPE='holding_type'\n CUM='cum'\n ORDER='sort'\n JOIN='join'\n \n # convert the date string to a datetime\n qry_date = pd.to_datetime(date,utc=True)\n \n def check_contents(df):\n \"\"\"\n This function checks the length of a dataframe (no. rows) if it is 0 it\n returns a printed message noting that there is no holdings in the portfolio\n \n Inputs\n df (Pandas DataFrame) - The dataframe containing the portfolio positions\n \"\"\"\n \n if len(df) == 0:\n print(\n \"Portfolio {} in scope {} contains no cash on {:%Y-%m-%d}\".format(\n portfolio,scope,start_date)\n )\n\n # Run one-day earlier, this gives us the beginning of day for the \n # required qry_date\n start_date = qry_date + pd.DateOffset(days=-1)\n\n # Generate a Pandas DataFrame with then holdings of the portfolio\n df = qry_holdings(start_date,scope,portfolio)\n \n # Check that the portfolio contains holdings\n check_contents(df)\n\n # To convert holdings data frame into cash ladder\n # we need to filter out Position types which hold instruments other than cash\n df = df[df[TYPE] != 'P'].copy()\n\n # Check that the portfolio contains cash after applying the filter\n check_contents(df)\n \n # Set start date for current balances\n df[SDATE] = df[SDATE].fillna(start_date).dt.date\n\n # Aggregate the cash balances \n df = df[[CCY,SDATE,TYPE,QTY]].groupby([CCY,SDATE,TYPE],as_index=False).sum()\n\n #Populate BOD/EOD records\n\n start_date = start_date.date() # change form for working with frame data\n\n #Get unique list of dates, but make sure it includes the qry_date\n dates=pd.concat([\n df[[SDATE]], \n pd.DataFrame(\n {\n SDATE:[qry_date.date()]\n }\n )\n ], ignore_index=True, sort=False).drop_duplicates()\n \n # Get all dates greater than the start date\n dates=dates[dates[SDATE]>start_date]\n \n # Get all currencies\n ccys =df[[CCY]].drop_duplicates()\n \n ccys[JOIN]=1\n dates[JOIN]=1\n dates[QTY]=0\n dates[ORDER]=1\n dates[TYPE]='Opening Cash Balance'\n bod = ccys.merge(dates,on=JOIN)\n eod = bod.copy()\n eod[ORDER]=5\n eod[TYPE]= eod[CCY].str.slice(4) + \" Summary\"\n\n df[ORDER] = df[TYPE].map({'C':2,'A':3,'R':4})\n df[TYPE] = df[TYPE].map({'C':'Trades to settle','R':'Estimated funding','A':'Dividend'})\n\n df = pd.concat([bod,eod,df],ignore_index=True, sort=False).sort_values([CCY,SDATE,ORDER]).reset_index(drop=True)\n\n #Calculate cumulative quantity\n df[CUM] = df[[CCY,QTY]].groupby([CCY],as_index=False).cumsum()[QTY]\n\n #Put cumulative balance onto BOD/EOD records\n subset = df[df[ORDER].isin([1,5])]\n df.loc[subset.index,QTY] = subset[CUM]\n\n #Filter out T-1 balances (just used to provide BOD balance)\n\n df = df[df[SDATE] > start_date]\n\n #Pivot the data\n data = df.set_index([CCY,ORDER,TYPE,SDATE],drop=True).unstack(fill_value=0)\n return data[QTY]", "title": "" }, { "docid": "9cbcf657322ac89279729e2018a47abf", "score": "0.5749958", "text": "def get_google_portfolio_info(portfolio, days):\n if not validate_portfolio(portfolio):\n click.echo(\"Portfolio {} doesn't exists\".format(portfolio))\n sys.exit(0)\n get_name_and_symbol(portfolio, days)", "title": "" }, { "docid": "6a0424a615902986a5bd89ea826ab311", "score": "0.572419", "text": "def stockvals(df,start_date,end_date):\r\n #convert pd dataframes to strings\r\n symbols, names = df.Symbol, df.Security\r\n symbols = symbols.to_numpy()\r\n symbols = symbols.astype(str)\r\n names = names.to_numpy()\r\n names = names.astype(str)\r\n start_date_int = datetime_to_integer(start_date)\r\n #Stocks under consideration (from S&P500)\r\n n_stocks = len(symbols)\r\n #Open - Closing value of stocks (as float)\r\n indices = []; open_val = []; close_val = []\r\n for j in tqdm(range(0,n_stocks),position=0,desc='Loading Stock Data'):\r\n if j == 91:\r\n continue\r\n date_string=(df.iloc[j][6]).replace('-',''); #print(date_string)\r\n date_added = int(date_string[:8])\r\n if(date_added <= start_date_int):\r\n index = j\r\n indices = np.append(indices,index)\r\n quotes = web.DataReader(symbols[j], 'yahoo', start_date, end_date)\r\n opening = quotes.Open\r\n closing = quotes.Close\r\n open_val = np.append(open_val,opening,axis=0)\r\n close_val = np.append(close_val,closing,axis=0)\r\n open_val = open_val.reshape(len(indices),-1)\r\n close_val = close_val.reshape(len(indices),-1)\r\n variation = open_val-close_val\r\n return names[indices.astype(int)],symbols[indices.astype(int)],variation,close_val,open_val", "title": "" }, { "docid": "d847eb52d88d6e4fbc2d4ac1416db076", "score": "0.5717154", "text": "def PortfolioList(self):\n portfolio = self.conn.execute(\"\"\"SELECT * from portfolio\"\"\").fetchall()\n return portfolio", "title": "" }, { "docid": "012a3e312328983c9f380271b71d35b7", "score": "0.571283", "text": "def plot_stock_func(self):\r\n end_date = datetime.date.today()\r\n\r\n if self.plot_period_comboBox_portfolio.currentText() == \"week\":\r\n start_date = end_date - datetime.timedelta(days=7)\r\n\r\n elif self.plot_period_comboBox_portfolio.currentText() == \"month\":\r\n start_date = end_date - datetime.timedelta(days=30)\r\n\r\n elif self.plot_period_comboBox_portfolio.currentText() == \"year\":\r\n start_date = end_date - datetime.timedelta(days=365)\r\n\r\n elif self.plot_period_comboBox_portfolio.currentText() == \"5 years\":\r\n start_date = end_date - datetime.timedelta(days=365*5)\r\n\r\n else: start_date = end_date - datetime.timedelta(days=5)\r\n\r\n\r\n plot_parameters_list = ([self.plot_params_boxes[0].isChecked(),\r\n self.plot_params_boxes[1].isChecked(),\r\n self.plot_params_boxes[2].isChecked(),\r\n self.plot_params_boxes[3].isChecked(),\r\n self.plot_params_boxes[4].isChecked(),\r\n self.plot_params_boxes[5].isChecked()])\r\n\r\n ask_for_stock_excel(self.socket, self.ticker, start_date, end_date, plot_parameters_list)", "title": "" }, { "docid": "5115a9ab81eca2adfac678aaef198b52", "score": "0.5711843", "text": "def getHistoricStockPrices(stock, yahooEarningsDF, daysAroundEarnings = 10):\n\n yahooEarningsDF['EDClosePrice'] = np.nan\n yahooEarningsDF['EDPlus1ClosePrice'] = np.nan\n yahooEarningsDF['EDMinus1ClosePrice'] = np.nan\n yahooEarningsDF['EDPlus4ClosePrice'] = np.nan\n yahooEarningsDF['Plus4MinusED'] = np.nan\n yahooEarningsDF['Plus1MinusED'] = np.nan\n yahooEarningsDF['EDPlus4ClosePriceDiffPercent'] = np.nan\n yahooEarningsDF['EDPlus1ClosePriceDiffPercent'] = np.nan\n yahooEarningsDF['EDCloseToFwd1DayOpen'] = np.nan\n\n\n # get the Stock into yahoofinancials module\n yahoo_financials = YahooFinancials(stock)\n\n # Bar Size = Daily weekly etc\n barSizeSetting = 'daily'\n\n # Get Historical price data for each Stock past earnings Dates\n for earnDateRow in range(0, len(yahooEarningsDF)):\n # set start and end Date\n # set to format 2018-09-29 // String Dash Separator\n endDateTime = dateUtils.getDateStringDashSeprtors(yahooEarningsDF['Earnings_Date'][earnDateRow]\n +datetime.timedelta(days=daysAroundEarnings))\n startDateTime = dateUtils.getDateStringDashSeprtors(yahooEarningsDF['Earnings_Date'][earnDateRow]\n -datetime.timedelta(days=daysAroundEarnings))\n\n # Get historic stock prices from yahoofinancials within daysAroundEarnings timeframe\n historical_stock_prices = yahoo_financials.get_historical_price_data(startDateTime, endDateTime, barSizeSetting)\n #create DF from prices\n historical_stock_prices = pd.DataFrame(historical_stock_prices[stock]['prices'])\n\n yahooEarningsDF = getEarningsDayPricing(earnDateRow, historical_stock_prices, yahooEarningsDF, yahooEarningsDF['Earnings_Date'][earnDateRow])\n\n # calculate price and persent deltas\n yahooEarningsDF['Plus4MinusED'] = yahooEarningsDF['EDPlus4ClosePrice'] - yahooEarningsDF['EDClosePrice']\n yahooEarningsDF['Plus1MinusED'] = yahooEarningsDF['EDPlus1ClosePrice'] - yahooEarningsDF['EDClosePrice']\n\n yahooEarningsDF['EDPlus4ClosePriceDiffPercent'] = 1-(yahooEarningsDF['EDClosePrice'] / yahooEarningsDF['EDPlus4ClosePrice'])\n yahooEarningsDF['EDPlus1ClosePriceDiffPercent'] = 1-(yahooEarningsDF['EDClosePrice'] / yahooEarningsDF['EDPlus1ClosePrice'])\n\n print(\"-------------------------------------- cat \\n Meow------------------------------\")\n\n\n return yahooEarningsDF", "title": "" }, { "docid": "b593edf728f4559d11666b3579d76d2f", "score": "0.5699237", "text": "def quarterly_price(start_year ,end_year,stocks):\n \n # obtaining the TICKER symbols of the stocks\n stock = stocks\n \n # initialising start date and end date \n start_date = start_year\n end_date = end_year\n\n \n # creating dictionary to obtain quarterly_price_data\n quarterly_price_data = {}\n \n # for each TICKER symbol in stock\n for abbv in stock:\n # add TICKER symbol with .NS to obtain National Stock exchange TICKER symbol\n query = str(abbv)+\".NS\"\n \n # obtain all the price_data\n quarterly_price_data[abbv] = close_price(query,start=start_date, end=end_date )\n \n # filter quarterly price_data\n quarterly_price_data[abbv] = quarterly_price_data[abbv].resample('Q', how='last')\n\n \n return quarterly_price_data", "title": "" }, { "docid": "6560b892ee48351f21ee52a7159ba182", "score": "0.56906426", "text": "def quarterly_returns(start_year ,end_year,stocks):\n # obtaining the TICKER symbols of the stocks\n stock = stocks\n print (stock)\n \n # initialising start date and end date \n start_date = start_year\n end_date = end_year\n\n \n # creating dictionary to store the quarterly returns\n stock_data = {}\n \n # for each TICKER symbol in stock obtain the quarterly returns \n for abbv in stock:\n query = str(abbv)+\".NS\"\n \n # obtaining data \n stock_data[abbv] = close_price(query,start=start_date, end=end_date )\n \n # filtering to obtain quarterly data \n stock_data[abbv] = stock_data[abbv].resample('Q').last()\n \n # finding the returns\n stock_data[abbv] = stock_data[abbv].pct_change(1)\n \n return stock_data", "title": "" }, { "docid": "50cebacde972721659d589daa452b2ea", "score": "0.56373006", "text": "def fetch_stock_data(symbol, month):\n interval = '1min'\n slice = 'year1month' + str(month) if month <= 12 else 'year2month1' + str(month)\n apikey = config.APIKEY\n CSV_URL = 'https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY_EXTENDED&' \\\n 'symbol={symbol}&interval={interval}&slice={slice}&apikey={apikey}' \\\n .format(symbol=symbol, slice=slice, interval=interval,apikey=apikey)\n df = pd.read_csv(CSV_URL)\n df['symbol'] = symbol\n\n df['time'] = pd.to_datetime(df['time'], format='%Y-%m-%d %H:%M:%S')\n df = df.rename(columns={'time': 'time', \n 'open': 'price_open', \n 'close': 'price_close', \n 'high': 'price_high',\n 'low': 'price_low',\n 'volume': 'trading_volume'}\n )\n return [row for row in df.itertuples(index=False, name=None)]", "title": "" }, { "docid": "91838cdd06bc4dd50fc09971d49cd9c4", "score": "0.5626686", "text": "def all_available_dates(reference_stock=\"ANZ\"):\n # use reference_stock to quickly search the db by limiting the stocks searched\n dates = Quotation.objects.mongo_distinct(\n \"fetch_date\", {\"asx_code\": reference_stock}\n )\n ret = sorted(dates, key=lambda k: datetime.strptime(k, \"%Y-%m-%d\"))\n return ret", "title": "" }, { "docid": "fee73a7968d3c87917941da884d259c5", "score": "0.5625055", "text": "def find_quarterly_reports_until_date(self, stock_symbol, date, cik=None):\n company_info_and_docs = \\\n self._find_raw_quarterly_reports_until_date(stock_symbol, date, cik)\n \n company_info_and_docs_processed = \\\n self._process_company_info_and_docs(company_info_and_docs)\n\n if cik is not None:\n assert int(company_info_and_docs_processed[\"info\"][\"cik\"]) == int(cik)\n \n return company_info_and_docs_processed", "title": "" }, { "docid": "255548cfa1813edce32bb0ce24b61258", "score": "0.55916756", "text": "def get_stock_prices(self, ticker, time_period=False, limit=False, time_normalized=False, dataframe=False):\n cursor = self.db.cursor()\n sql = \"SELECT `date`, `open`, `high`, `low`, `close`, `volume` FROM `stock_data` WHERE `ticker` = '{}'\".format(\n ticker)\n\n if time_period:\n\n if type(time_period) != list or len(time_period) != 2:\n return False\n\n if not time_period[0]:\n\n earliest_timestamp = \"SELECT `date` FROM `stock_data` WHERE `ticker` = '{}' ORDER BY `date` ASC LIMIT 1;\".format(\n ticker)\n cursor.execute(earliest_timestamp)\n results = cursor.fetchall()\n\n if not results:\n raise Exception(\n \"Ticker value `{}` not found\".format(ticker))\n\n time_period[0] = results[0]['date']\n time_period[0] = time_period[0].strftime(\n \"%Y:%m:%d 00:00:00\")\n\n sql = sql + \\\n \" AND `date` BETWEEN '{}' AND '{}'\".format(\n time_period[0], time_period[1])\n if limit:\n sql = sql + \" LIMIT {}\".format(limit)\n sql = sql + \" ORDER BY `date` DESC;\"\n cursor.execute(sql)\n results = cursor.fetchall()\n cursor.close()\n\n if not results:\n\n return False\n\n if time_normalized:\n\n result_datetimes = [x['date'] for x in results]\n norm_results = []\n times = [results[0]['date'], results[len(results) - 1]['date']]\n\n diff = (times[0] - times[1]).days\n for d in range(diff):\n dt = times[len(times) - 1] + datetime.timedelta(days=d)\n if dt not in result_datetimes:\n norm_results.append({\n \"ticker\": ticker,\n \"date\": dt,\n \"open\": np.nan,\n \"high\": np.nan,\n \"low\": np.nan,\n \"close\": np.nan,\n \"volume\": np.nan})\n else:\n idx = result_datetimes.index(dt)\n norm_results.append(results[idx])\n results = norm_results\n\n if dataframe:\n results = DataFrame(results)\n\n return results", "title": "" }, { "docid": "5504c57449f4e60349a23885b13fe742", "score": "0.55884355", "text": "def get_period(self,period):\n data = self.client.getHistoricalByPeriod(period)\n data = pd.DataFrame(data.to_records())\n data['Date'] = data.Date.astype(str)\n additionalInfo = self.client.getAdditionalInfo()\n self.stock = Stock(\n data,\n time_index='Date',\n symbol=self.symbol\n )\n return self.stock", "title": "" }, { "docid": "c276a954a182f50c63c2459c7038437e", "score": "0.5586912", "text": "def get_historical_data(stock_name, date = get_nth_date(), days = 1):\n\n engine = get_db_connection()\n query = (\"SELECT * FROM\" + \" \" + surround_mysql_quotes(stock_name) + \" \" + \"WHERE\" + \" \" + \\\n surround_mysql_quotes(stock_name) + \".date\" + \" \" + \"< DATE(\" + surround_double_quotes(date) + \") order by\" + \" \" \n + surround_mysql_quotes(stock_name) + \".date\" + \" \" + \"DESC LIMIT\" + \" \" + str(days))\n # print query\n return engine.run(query)", "title": "" }, { "docid": "8883c6dec28cf004acc8988cf48295fa", "score": "0.55820256", "text": "def get_stock_history(self):\n\t\tstock_history = finance.fetch_historical_yahoo(self.symbol, self.start, self.end)\n\t\tr = mlab.csv2rec(stock_history)\n\t\tr.sort()\n\n\t\tdates = r.date\n\t\tprices = r.adj_close\n\n\t\t# convert to epoch time for highcharts\n\t\tdates = [(int(time.mktime(time.strptime(date.strftime(\"%Y-%m-%d\"), \"%Y-%m-%d\"))) - time.timezone)*1000 for date in dates]\n\n\t\treturn dates, prices", "title": "" }, { "docid": "2870a13af788a1eb6e29b557c9892abb", "score": "0.55813926", "text": "def get_historical_prices(symbol, start_date, end_date):\n params = urlencode({\n 's': symbol,\n 'a': int(start_date[5:7]) - 1,\n 'b': int(start_date[8:10]),\n 'c': int(start_date[0:4]),\n 'd': int(end_date[5:7]) - 1,\n 'e': int(end_date[8:10]),\n 'f': int(end_date[0:4]),\n 'g': 'd',\n 'ignore': '.csv',\n })\n url = 'http://real-chart.finance.yahoo.com/table.csv?%s' % params\n req = Request(url)\n resp = urlopen(req)\n print('download success')\n content = str(resp.read().decode('utf-8').strip())\n daily_data = content.splitlines()\n hist_dict = []\n keys = daily_data[0].split(',')\n for day in daily_data[1:]:\n day_data = day.split(',')\n date = day_data[0]\n hist_dict.append(\\\n {keys[0]: day_data[0],\n keys[1]: day_data[1],\n keys[2]: day_data[2],\n keys[3]: day_data[3],\n keys[4]: day_data[4],\n keys[5]: day_data[5],\n keys[6]: day_data[6]})\n return hist_dict", "title": "" }, { "docid": "f415ae21be592210ba75ccd3641662e4", "score": "0.55682564", "text": "def get_stock_intraday_info(cls, stock_symbol: str) -> list:\n if stock_symbol.upper() in cls.cash_stocks:\n return []\n\n intraday_url = cls.intraday_url + \"/\".join([cls.time_interval, stock_symbol])\n params = {\"apikey\": cls.api_token}\n _intraday_trades = []\n try:\n res = requests.get(intraday_url, params=params)\n if res:\n _intraday_trades = res.json()\n else:\n pass\n\n except requests.exceptions.ConnectionError:\n logger.info(f\"connection error: {intraday_url} {stock_symbol} {params}\")\n\n # if there is intraday info, convert date string and provide time and\n # create list of trade_info tuples\n trade_tuple = namedtuple(\"trade_tuple\", \"date open close low high volume\")\n intraday_trades = []\n min_low = None\n max_high = None\n if _intraday_trades:\n date_trade = datetime.datetime.strptime(\n _intraday_trades[0].get(\"date\"), \"%Y-%m-%d %H:%M:%S\"\n ).date()\n\n for i, _trade in enumerate(_intraday_trades):\n _tradetime = datetime.datetime.strptime(\n _trade.get(\"date\"), \"%Y-%m-%d %H:%M:%S\"\n )\n if _tradetime.date() != date_trade:\n break\n\n # adjust cumulative volumes\n try:\n volume = _trade.get(\"volume\") - _intraday_trades[i + 1].get(\n \"volume\"\n )\n if volume < 0:\n volume = 0\n except IndexError:\n volume = 0\n\n intraday_trades.append(\n trade_tuple(\n date=_tradetime,\n open=_trade.get(\"open\"),\n close=_trade.get(\"close\"),\n low=_trade.get(\"low\"),\n high=_trade.get(\"high\"),\n volume=volume,\n )\n )\n min_low = get_min(_trade.get(\"low\"), min_low)\n max_high = get_max(_trade.get(\"high\"), max_high)\n\n intraday_trades = sorted(intraday_trades, key=lambda k: k.date)\n\n # add start and end time\n initial_open = intraday_trades[0].open\n last_close = intraday_trades[-1].close\n start_time = intraday_trades[0].date.strftime(\"%Y-%m-%d\") + \" 08:00:00\"\n intraday_trades.insert(\n 0,\n trade_tuple(\n date=datetime.datetime.strptime(start_time, \"%Y-%m-%d %H:%M:%S\"),\n open=None,\n close=None,\n low=None,\n high=None,\n volume=None,\n ),\n )\n end_time = intraday_trades[-1].date.strftime(\"%Y-%m-%d\") + \" 18:00:00\"\n intraday_trades.append(\n trade_tuple(\n date=datetime.datetime.strptime(end_time, \"%Y-%m-%d %H:%M:%S\"),\n open=initial_open,\n close=last_close,\n low=min_low,\n high=max_high,\n volume=None,\n )\n )\n\n else:\n # if not succeeded try with fallback site on marketstack\n intraday_trades = get_intraday_marketstack(stock_symbol)\n\n # or (last resort) try alpha vantage\n if not intraday_trades:\n intraday_trades = get_intraday_alpha_vantage(stock_symbol)\n\n return intraday_trades", "title": "" }, { "docid": "9b15585cfcbd7596b0c6004bfdc5b4a4", "score": "0.554499", "text": "def get_portfolio(self, account_id):\n method = \"GET\"\n url = self.base_url + f\"accounts/{account_id}/portfolio\"\n return self.get_essential_details(method, url)", "title": "" }, { "docid": "756a9a525609c129fe44b11e6194e959", "score": "0.5542699", "text": "def get_dates(decks: QuerySet) -> List[date]:\n return list(decks.values_list(\"date_created\", flat=True).distinct())", "title": "" }, { "docid": "06418ae4eb2a68849b5bb9bc3e6112ff", "score": "0.5538388", "text": "def download_historical_prices(symbol):\r\n auth_token = 'g1CWzGxxg2WxNVbV5n9y'\r\n\r\n # add exchange prefix to symbol name\r\n futures_info = get_futures_info()\r\n prefix = futures_info['Exchange'].loc[futures_info['Symbol'] == symbol[:-5]].values[0] # strip off month and year\r\n full_name = prefix + '/' + symbol\r\n\r\n prices = pd.DataFrame()\r\n try:\r\n # download prices from quandl using full_name\r\n prices = quandl.get(full_name, authtoken=auth_token)\r\n prices = prices['Settle']\r\n # add contract_sort in order to sort by year then by month using contract name\r\n prices = pd.DataFrame({'Settle': pd.Series(prices),\r\n 'Contract': symbol,\r\n 'Contract_Sort': symbol[-4:] + symbol[-5:-4] + symbol[:-5]})\r\n except:\r\n pass\r\n return prices", "title": "" }, { "docid": "304fb98b8b143ffb97af4897eee9d648", "score": "0.55348414", "text": "def list_price_date_by_sectors(query_params):\n\n price_date = split_date(query_params.get(\"price_date\"))\n option_search = query_params.get(\"option_search\")\n\n try:\n price_year = int(price_date[0])\n price_month = int(price_date[1])\n price_day = int(price_date[2])\n if option_search == \"default\":\n last_date = PriceList.objects.order_by(\"-price_date\")[:1][0]\n s_date = last_date.price_date\n else:\n s_date = datetime(\n year=price_year,\n month=price_month,\n day=price_day,\n hour=0,\n minute=0,\n second=0,\n ).replace(tzinfo=pytz.UTC)\n\n except Exception:\n raise APIException(detail=\"Provide proper date\")\n\n main_sector_list = MainSector.objects.all()\n date_sector_list = []\n id = 0\n for main_sector in main_sector_list:\n sub_sector_list = SubSector.objects.filter(main_sector_id=main_sector.id)\n for sub_sector in sub_sector_list:\n date_sector = {}\n\n stocks_involved = Stock.objects.filter(\n sub_sector_id=sub_sector.id\n ).values_list(\"stock_code\", flat=True)\n stocks_involved_str = \"','\".join(stocks_involved)\n\n query_set = (\n f\"select * from stock_maintain_pricelist where price_date='{s_date}' \"\n f\"and trim(sec_code) in ('{stocks_involved_str}') order by sec_code\"\n )\n price_list_objects = PriceList.objects.raw(query_set)\n if price_list_objects:\n id += 1\n date_sector[\"id\"] = id\n date_sector[\"current_date\"] = s_date\n date_sector[\"sub_sector\"] = sub_sector\n date_sector[\"sub_sector_name\"] = sub_sector.name\n date_sector[\"main_sector_name\"] = main_sector.name\n date_sector[\"main_sector\"] = main_sector\n date_sector[\"price_list\"] = price_list_objects\n\n date_sector_list.append(date_sector)\n\n return date_sector_list", "title": "" }, { "docid": "f9258cb77d8ef88c7325c3eb78408b5f", "score": "0.55339605", "text": "def get_stock_history(i_stock,\n date_start,\n date_end):\n df = yf.download(i_stock, start=date_start, end=date_end, actions=True)\n df['record_date'] = df.index\n df['record_date'] = pd.to_datetime(df['record_date']).dt.date\n df.rename(columns={'Close':'close_price', 'Dividends':'dividends', 'Stock Splits':'stock_split'},\n inplace=True)\n return df", "title": "" }, { "docid": "4e8848d9d5ef9c9b2338a6e9b1f8cc89", "score": "0.5528521", "text": "def backtest_portfolio(self):\n\n self.positions = pd.DataFrame(index=self.signals.index).fillna(0.0)\n self.positions['positioninrs'] = self.stocks_per_trade*self.signals['signal'] # each position has stocks equal to stocks per trade specified\n self.portfolio = self.positions.multiply(self.close_price, axis=0) # multiply positions with stock price on that day to get value\n self.pos_diff = self.positions.diff()\n self.portfolio['holdings'] = (self.positions.multiply(self.close_price,axis=0)).sum(axis=1) # total amount in holdings\n self.portfolio['cash'] = self.capital - (self.pos_diff.multiply(self.close_price,axis=0)).sum(axis=1).cumsum() # total in cash\n self.portfolio['total'] = self.portfolio['cash'] + self.portfolio['holdings'] # total value\n self.portfolio['returns'] = self.portfolio['total'].pct_change() # returns as percentage change\n del self.portfolio['positioninrs']", "title": "" }, { "docid": "3f1088ac132d4177ddb94a8da3494788", "score": "0.55226076", "text": "def _find_raw_quarterly_reports_until_date(self, stock_symbol, date, cik=None):\n # @todo: wait until elements are available instead of hard wait times\n if cik is None:\n self._search_company_ticker(stock_symbol)\n\n time.sleep(5)\n cik = self._get_cik_number(self._find_quarterly_financial_reports_on_docs_page()[\"info\"])\n\n self._search_financial_forms_by_cik(cik, \"10-Q\")\n time.sleep(5)\n\n company_info_and_docs = None\n \n while True:\n company_info_on_this_page = self._find_quarterly_financial_reports_on_docs_page()\n time.sleep(5)\n \n if company_info_and_docs is None:\n company_info_and_docs = {\"info\": company_info_on_this_page[\"info\"],\n \"docs\": company_info_on_this_page[\"forms_10Q\"]}\n \n else:\n company_info_and_docs[\"docs\"].extend(company_info_on_this_page[\"forms_10Q\"])\n\n # break if passed the date\n if len(company_info_and_docs[\"docs\"]) > 0:\n last_date = company_info_and_docs[\"docs\"][-1][\"date\"]\n if datetime.date.fromisoformat(last_date) < date:\n break\n \n # move to the next page\n last_page = not self._click_next_page()\n time.sleep(5)\n \n if last_page:\n break\n \n return company_info_and_docs", "title": "" }, { "docid": "469ff2d0d2753ef1620d2d426269f0ce", "score": "0.5514048", "text": "def get_stock_data(lista_tickers, creds, previous_date=False):\n end_date = '{}-{}-{}'.format(now.year, now.month, now.day)\n for ticker in lista_tickers:\n if previous_date:\n start_date = get_last_date(ticker.replace('.','_').replace('-','_'), creds)\n logging.warning(\"start_date: %s end_date: %s\" % (start_date, end_date))\n else:\n start_date = end_date\n\n try:\n panel_data = data.DataReader(ticker, 'yahoo', start_date, end_date)\n table_name = ticker.replace('.','_').replace('-','_')\n create_new_stock_table(table_name, creds)\n update_in_db(panel_data, table_name, creds)\n\n except Exception as error:\n logging.error(\"Error al tratar de obtener datos de %s: %s\" % (ticker,error))", "title": "" }, { "docid": "635cff647ab1d521081e89f97e20af3e", "score": "0.5490171", "text": "def market_analysis_stock(query_params):\n dict_result = {}\n try:\n sec_code = query_params.get(\"sec_code\")\n if sec_code is not None:\n rs = PriceAnalysisTemp.objects.filter(sec_code=sec_code).first()\n price_data = PriceList.objects.filter(sec_code=sec_code).order_by(\n \"-price_date\"\n )[:1][0]\n\n if rs:\n\n f52_week_monday = price_data.price_date - timedelta(\n days=price_data.price_date.weekday(), weeks=52\n )\n # f52_week_friday = (\n # price_data.price_date\n # - timedelta(days=price_data.price_date.weekday())\n # - timedelta(days=-4, weeks=52)\n # )\n price_group = PriceList.objects.filter(\n sec_code=sec_code,\n price_date__gte=f52_week_monday,\n price_date__lte=price_data.price_date,\n )\n first_day_of_year = date(date.today().year, 1, 1)\n year_to_date = get_first_working_day_of_month(first_day_of_year)\n price_year_to_date = get_price_data_period(sec_code, year_to_date)\n year_change = price_data.price - price_year_to_date\n if len(price_group) != 0:\n dict_52_week_max = price_group.aggregate(Max(\"price\"))\n dict_52_week_min = price_group.aggregate(Min(\"price\"))\n max_52_week = dict_52_week_max[\"price__max\"]\n min_52_week = dict_52_week_min[\"price__min\"]\n else:\n max_52_week = 0.0\n min_52_week = 0.0\n # if rs.price_year_to_date_cent is None:\n rs.price_year_to_date_cent = round(\n (year_change / price_year_to_date) * 100, 2\n )\n if rs.price_one_year is None:\n date_returned = get_x_days_ago(price_data.price_date, 365)\n rs.price_one_year = get_price_data_period(sec_code, date_returned)\n rs.one_year_cent = round(\n ((rs.price_one_year - rs.price) / rs.price) * 100, 2\n )\n if rs.price_six_months is None:\n date_returned = get_x_days_ago(price_data.price_date, 183)\n rs.price_six_months = get_price_data_period(sec_code, date_returned)\n rs.six_months_cent = round(\n ((rs.price_six_months - rs.price) / rs.price) * 100, 2\n )\n\n if rs.price_three_months is None:\n date_returned = get_x_days_ago(price_data.price_date, 92)\n rs.price_three_months = get_price_data_period(\n sec_code, date_returned\n )\n rs.three_months_cent = round(\n ((rs.price_three_months - rs.price) / rs.price) * 100, 2\n )\n\n if rs.price_one_week is None:\n date_returned = get_x_days_ago(price_data.price_date, 7)\n rs.price_one_week = get_price_data_period(sec_code, date_returned)\n rs.one_week_cent = round(\n ((rs.price_one_week - rs.price) / rs.price) * 100, 2\n )\n\n dict_result = {\n \"id\": rs.id,\n \"sec_code\": rs.sec_code,\n \"price\": rs.price,\n \"min_year\": rs.min_year,\n \"max_year\": rs.max_year,\n \"min_six_months\": rs.min_six_months,\n \"max_six_months\": rs.max_six_months,\n \"min_three_months\": rs.min_three_months,\n \"max_three_months\": rs.max_three_months,\n \"min_one_week\": rs.min_one_week,\n \"max_one_week\": rs.max_one_week,\n \"price_one_week\": rs.price_one_week,\n \"price_three_months\": rs.price_three_months,\n \"price_six_months\": rs.price_six_months,\n \"price_one_year\": rs.price_one_year,\n \"one_week_cent\": rs.one_week_cent,\n \"three_months_cent\": rs.three_months_cent,\n \"six_months_cent\": rs.six_months_cent,\n \"one_year_cent\": rs.one_year_cent,\n \"price_year_to_date_cent\": rs.price_year_to_date_cent,\n \"previous_price\": price_data.price,\n \"current_price\": price_data.price_close,\n \"today_change\": float(f\"{price_data.x_change:.2f}\"),\n \"year_change\": float(f\"{year_change:.2f}\"),\n \"today_sign\": price_data.offer_bid_sign,\n \"today_volume\": price_data.volume,\n \"today_day_range\": f\"{price_data.x_low} - {price_data.x_high}\",\n \"today_52_week_range\": f\"{min_52_week} - {max_52_week}\",\n }\n\n # data_set['results'] = dict_result\n # data_set['date'] = date_price\n\n except Exception:\n raise APIException(detail=\"Provide proper date or sec code\")\n\n return dict_result", "title": "" }, { "docid": "a780abc73233736c53e2dbd6d292ffea", "score": "0.54869044", "text": "def get_historical_prices(self,start,end,frequency):\n equity_financials = YahooFinancials(self.ticker)\n equity_data = equity_financials.get_historical_price_data(start, end, frequency)[self.ticker]['prices']\n\n df = pd.DataFrame(columns=['date','high','low','open','close','adjclose'])\n \n for data in equity_data:\n df = df.append({\n 'date': data['formatted_date'],\n 'high': data['high'],\n 'low': data['low'],\n 'open': data['open'],\n 'close': data['close'],\n 'adjclose': data['adjclose']\n }, ignore_index = True\n )\n df = df.set_index('date')\n return df", "title": "" }, { "docid": "a780abc73233736c53e2dbd6d292ffea", "score": "0.54869044", "text": "def get_historical_prices(self,start,end,frequency):\n equity_financials = YahooFinancials(self.ticker)\n equity_data = equity_financials.get_historical_price_data(start, end, frequency)[self.ticker]['prices']\n\n df = pd.DataFrame(columns=['date','high','low','open','close','adjclose'])\n \n for data in equity_data:\n df = df.append({\n 'date': data['formatted_date'],\n 'high': data['high'],\n 'low': data['low'],\n 'open': data['open'],\n 'close': data['close'],\n 'adjclose': data['adjclose']\n }, ignore_index = True\n )\n df = df.set_index('date')\n return df", "title": "" }, { "docid": "f8f838d7e6cbb1fcc8eaab83cdb4e250", "score": "0.5480303", "text": "def list_price_range(query_params):\n date_start = query_params.get(\"start_date\").split(\"-\")\n date_end = query_params.get(\"end_date\").split(\"-\")\n stock = int(query_params.get(\"stock\"))\n try:\n s_year = int(date_start[0])\n s_month = int(date_start[1])\n s_day = int(date_start[2])\n e_year = int(date_end[0])\n e_month = int(date_end[1])\n e_day = int(date_end[2])\n s_date = datetime(\n year=s_year, month=s_month, day=s_day, hour=0, minute=0, second=0\n ).replace(tzinfo=pytz.UTC)\n # s_date = datetime(year=2019, month=1, day=22, hour=0, minute=0, second=0)\n e_date = datetime(\n year=e_year,\n month=e_month,\n day=e_day,\n hour=0,\n minute=0,\n second=0,\n tzinfo=pytz.UTC,\n ).replace(tzinfo=pytz.UTC)\n\n except Exception:\n raise APIException(detail=\"Provide proper dates\")\n return PriceList.objects.filter(\n price_date__gte=s_date, price_date__lt=e_date, stock_id=stock\n )", "title": "" }, { "docid": "8b349175b8a8ae5ed45b35bd303c0979", "score": "0.54419833", "text": "def get_profit_for_ticker_in_range(ticker_name,from_date,to_date, accumulated=False):\r\n data_close_ticker= get_data_for_ticker_in_range(ticker_name,from_date,to_date,['close'])\r\n data_close_ticker= data_close_ticker.sort_index(ascending=True)\r\n lst_close= list(data_close_ticker['close'])\r\n \r\n lst_daily_profit = [0]\r\n for i in range(len(lst_close)-1):\r\n lst_daily_profit.append(lst_close[i+1]/lst_close[i]-1)\r\n lst_cumulative_profit = lst_daily_profit[:2]\r\n last = lst_cumulative_profit[-1]\r\n for i in range (2,len(lst_daily_profit)):\r\n lst_cumulative_profit.append(((1+last)*(1+lst_daily_profit[i])-1))\r\n last= lst_cumulative_profit[-1]\r\n data_close_ticker['daily_profit']= lst_daily_profit\r\n data_close_ticker['cumulative_profit']= lst_cumulative_profit\r\n\r\n if accumulated:\r\n return data_close_ticker[['ticker_name','cumulative_profit']]\r\n return data_close_ticker[['ticker_name','daily_profit']]", "title": "" }, { "docid": "580fabeb778a048d610e7a6c7f9592d4", "score": "0.5432853", "text": "def get_stock_data(tickers, start_date, end_date):\n\n # Get stock data as pandas dataframe\n res = {}\n for ticker in tickers:\n # covert the dataframe to a dict\n res[ticker] = web.DataReader(ticker,'yahoo', start_date, end_date).head(1).to_dict('list')\n \n return res", "title": "" }, { "docid": "e7532b73f530acdbd37b9d8b588d562c", "score": "0.5432611", "text": "async def get_stock_data(\n symbol: str = Depends(check_symbol), db: Session = Depends(get_db), days: int = 90\n) -> List[schemas.TimeSeriesAPIout]:\n return crud.stock.get_time_series(db=db, symbol=symbol, days=days)", "title": "" }, { "docid": "e7ed61cfa0c72bd8ac94ead27838942a", "score": "0.54316235", "text": "def compute_portvals( \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t \t \t\t \t\t \t\n df_trades_in,\n symbol=\"AAPL\", \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t \t \t\t \t\t \t\n start_val=100000, \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t \t \t\t \t\t \t\n commission=9.95, \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t \t \t\t \t\t \t\n impact=0.005, \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t \t \t\t \t\t \t\n): \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t \t \t\t \t\t \t\n # this is the function the autograder will call to test your code \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t \t \t\t \t\t \t\n # NOTE: orders_file may be a string, or it may be a file object. Your \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t \t \t\t \t\t \t\n # code should work correctly with either input \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t \t \t\t \t\t \t\n # TODO: Your code here \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t \t \t\t \t\t \t\n \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t \t \t\t \t\t \t\n # In the template, instead of computing the value of the portfolio, we just \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t \t \t\t \t\t \t\n # read in the value of IBM over 6 months \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t \t \t\t \t\t \t\n\n df_orders = df_trades_in.copy()\n df_orders.index.name = 'Date'\n\n # print(df_orders)\n\n # sort oders df and find start and end dates\n df_orders = df_orders.sort_values(by='Date',ascending=True)\n # df_orders = df_orders.sort_values(by=index,ascending=True)\n start_date = df_orders.index[0]\n end_date = df_orders.index[-1]\n\n # create prices dataframe\n stocks = [symbol]\n alldates = pd.date_range(start_date, end_date)\n df_prices = get_data(stocks, alldates, addSPY=True)\n df_prices['Cash'] = 1.0\n del df_prices['SPY']\n\n # create trades dataframe\n df_trades = df_prices.copy()\n df_trades[:] = 0\n\n # create trades dataframe\n for index, row in df_orders.iterrows():\n\n shares = row['Trades']\n price = df_prices.loc[index][symbol]\n\n # if row['Order'] == 'BUY':\n # cash = -1 * price * shares * (1 + impact) - commission\n # else:\n # cash = price * shares * (1 - impact) - commission\n # shares *= -1\n\n if np.abs(shares) > 0:\n cash = -1 * price * shares * (1 - impact) - commission\n else:\n cash = 0\n # if shares > 0:\n # shares *= -1\n\n df_trades.loc[index][symbol] += shares\n df_trades.loc[index]['Cash'] += cash\n\n\n # create holdings dataframe\n df_holdings = df_trades.copy()\n df_holdings.loc[start_date]['Cash'] += start_val\n df_holdings = df_holdings.cumsum()\n # print(df_holdings)\n\n\n # create values dataframe and portvals array\n df_values = df_prices * df_holdings\n portvals = df_values.sum(axis=1)\n \n return portvals", "title": "" }, { "docid": "9abd929e0744c5a2461e561de1b373c8", "score": "0.54121494", "text": "def getDailyDataOfStock(stockfilename, dayslength):\r\n filepath = findDataDir()\r\n fullpathname = filepath + \"\\\\\" + stockfilename\r\n msele = daysDataOfStock(fullpathname, dayslength)\r\n nptmpArray = msele.datumLast\r\n #print nptmpArray.T\r\n closeList = nptmpArray.T[4]\r\n closeList = np.vstack([closeList, msele.getTA(6) / 9000.0]) #volumn\r\n dayslst = closeList.tolist()", "title": "" }, { "docid": "cdf008a463c0ce234441ca6e88cc577b", "score": "0.5398998", "text": "def get_dates(decks: QuerySet) -> List[date]:\n dates = list(decks.values_list(\"date_created\", flat=True).distinct())\n return sorted(dates)", "title": "" }, { "docid": "8326419bfff9d3bbf1b105aee9bd6a78", "score": "0.5389918", "text": "def get_forecast_inputs(symbol, start_year=2015, end_year=dt.date.today().year):\r\n futures_info = get_futures_info()\r\n price_source = futures_info.loc[futures_info['Symbol'] == symbol]['PriceSource'].values[0]\r\n if price_source != 'csv':\r\n full_prices = compile_historical_prices(symbol, start_year, end_year)\r\n # get data for active contract\r\n df = get_active_prices(symbol, full_prices)\r\n # add month and year to data frame\r\n df['Month'] = df['Contract'].str[-5]\r\n df['Year'] = pd.to_numeric(df['Contract'].str[-4:])\r\n\r\n # add previous month\r\n futures_info = get_futures_info()\r\n months = futures_info['ExpMonths'].loc[futures_info['Symbol'] == symbol].values[0]\r\n for i, month in enumerate(months):\r\n if i == 0: # first month\r\n df.loc[df['Month'] == month, 'PrevMonth'] = months[-1]\r\n df.loc[df['Month'] == month, 'Year'] = df['Year']-1\r\n else:\r\n df.loc[df['Month'] == month, 'PrevMonth'] = months[i-1]\r\n\r\n # add data for prev contract\r\n df['PrevContract'] = df['Symbol'] + df['PrevMonth'] + df['Year'].astype(str)\r\n # add raw settle prices for prev contract\r\n for contract in df['PrevContract'].unique()[::-1]:\r\n prices = full_prices[full_prices['Contract'] == contract]\r\n df.loc[df['PrevContract'] == contract, 'PrevSettleRaw'] = prices['Settle']\r\n # remove NaN rows as some contracts have longer histories than others\r\n df = df[pd.notnull(df['PrevSettleRaw'])]\r\n else:\r\n df = get_active_prices_csv(symbol)\r\n\r\n # add data for return volatility based on raw price data\r\n df['ReturnDay'] = df['Settle'] - df['Settle'].shift(1)\r\n df['ReturnDayPct'] = df['Settle'] / df['Settle'].shift(1) - 1.0\r\n df = df[1:] # drop first day without return\r\n df['ReturnDaySq'] = df['ReturnDay'] ** 2\r\n df['Variance'] = pd.ewma(df['ReturnDaySq'], span=36)\r\n df['PriceVolatility'] = df['Variance'] ** 0.5\r\n df['PriceVolatilityPct'] = df['PriceVolatility'] / df['SettleRaw']\r\n return df", "title": "" }, { "docid": "f246c5ca08b54fb8bd19ee53da2d28bf", "score": "0.5389121", "text": "def __getDateList(self):\n\t\tstart = datetime.date(2016, 01, 01)\n\t\tend = datetime.date(2016, 04, 30)\n\t\tdaysRange = (end - start).days\n\t\treturn [str(start + datetime.timedelta(days = i)) for i in range(daysRange + 1)]", "title": "" }, { "docid": "ab55134b521510acbfecede5a1cf70b0", "score": "0.53687", "text": "def get_stock_data_current(tick, days=50):\n _data = yahoo_finance.Share(tick)\n\n # today_date = datetime.today() - timedelta(days=1)\n today_date = datetime.today()\n today_date = str(today_date.date())\n # print today_date\n previous_date = datetime.today() - timedelta(days=days)\n previous_date = str(previous_date.date())\n # print previous_date\n\n df = _data.get_historical(start_date=previous_date, end_date=today_date)\n\n df = pd.DataFrame(df)\n df['Date'] = pd.to_datetime(df['Date'], errors='coerce')\n df.index = df['Date']\n df.sort_index(inplace=True)\n\n # df = df[['Date', 'Open', 'High', 'Low', 'Adj_Close', 'Volume']]\n df = df[['Open', 'High', 'Low', 'Adj_Close', 'Volume']]\n # Changing the data types of columns to numeric\n for cols in df:\n df[cols] = pd.to_numeric(df[cols], errors='coerce')\n\n return df", "title": "" }, { "docid": "62ad6f89f38f47625b788e9ac1fa33f9", "score": "0.5363225", "text": "def get_historical_data(self, start_date):\n\n historical_data = {}\n for stock_data in self.history_collection.find({\"date\": {\"$gt\": start_date}}, {\"date\": 1, \"adjClose\": 1, \"symbol\": 1, \"_id\": 0}).sort('date', pymongo.ASCENDING):\n if stock_data['symbol'] not in historical_data:\n historical_data[stock_data['symbol']] = {'X': [], 'Y': []}\n historical_data[stock_data['symbol']]['X'].append([stock_data['date'].toordinal()])\n historical_data[stock_data['symbol']]['Y'].append(stock_data['adjClose'])\n return historical_data", "title": "" }, { "docid": "89be3e5abee7d91333b6cff7c311c6bc", "score": "0.53356737", "text": "def getStock(stockName):\n\n\turl = \"https://www.alphavantage.co/query?\"\n\t\n\tpayload = {\"function\": \"TIME_SERIES_DAILY_ADJUSTED\", 'symbol': stockName, \\\n 'apikey': API_KEY}\n\t\n\tresponse = requests.get(url, params=payload)\n\n\tdaily = response.json()\n\t\n\treturn daily", "title": "" }, { "docid": "c0c8e4ab4074bf2f418b14db3b21cbcc", "score": "0.5316937", "text": "def get_stock_data_for_period(code: str, start: str, end: str) -> pd.DataFrame:\n try:\n return pandas_datareader.get_data_yahoo(code, start, end)\n except Exception as e:\n raise_retrieval_error(code, e)", "title": "" }, { "docid": "854ca7feb32b4338e4c5e8e4bc88a121", "score": "0.53168464", "text": "def get_portfolio_historic_returns(self):\n shares = self._shrs\n positions = shares.keys()\n periods = self._hld_per \n \n returns = {}\n for position in positions:\n returns[position] = self._get_historic_returns(position, periods[position]['start'], periods[position]['end'])\n \n return pandas.DataFrame(returns)", "title": "" }, { "docid": "11ecc34a3a9b4002244ffd971dcea207", "score": "0.5308173", "text": "def get_portfolio_historic_values(self, shares=None):\n if shares is None:\n shares = self._shrs\n \n positions = shares.keys()\n periods = self._hld_per \n \n values = {}\n for position in positions:\n prices = self._get_historic_data(position, periods[position]['start'], periods[position]['end'])\n values[position] = prices['adjustedClose'] * shares[position]\n \n portfolio = pandas.DataFrame(values).sum(axis=1)\n \n return pandas.Series(portfolio)", "title": "" }, { "docid": "b771c3cfcfeffb61484511d3a9b22b8f", "score": "0.5292888", "text": "def getstock(abbr, write_csv, time):\n abbr = abbr.upper()\n output_filename = abbr+'.csv'\n poc_name = abbr+'_POC'\n if time=='before':\n url = 'https://query1.finance.yahoo.com/v7/finance/download/' + abbr + \\\n '?period1=1569024000&period2=1578009600&interval=1d&events=history'\n else:\n url = 'https://query1.finance.yahoo.com/v7/finance/download/' + abbr + \\\n '?period1=1579564800&period2=1588896000&interval=1d&events=history'\n # read the url into dataframe\n df = pd.read_csv(url)\n # trim the year in Date column\n df['Date'] = df['Date'].astype(str).str[5:]\n df[poc_name] = df['Close'].diff()\n output = df[['Date', 'Close', poc_name]]\n # rename columns for further steps\n output.columns = ['date', abbr, poc_name]\n if write_csv:\n df.to_csv(output_filename, index=False)\n return(output)", "title": "" }, { "docid": "982bfc1a0398ba1e437565a03eec7059", "score": "0.5283076", "text": "def get_price_df(symbol, duration=1):\n end = datetime.today()\n #end = '02-26-2020'\n #end = datetime.strptime(end, '%m-%d-%Y').date()\n start = end-timedelta(days=duration) \n return web.DataReader(symbol,'yahoo',start,end)['Close'].pct_change()[1]", "title": "" }, { "docid": "9737ebfe6c068db2483fd34806768df0", "score": "0.52777445", "text": "def get_dates_season(dates, l_season):\r\n if len(l_season) != 366:\r\n raise \"number of l_season must be 366\"\r\n return [l_season[date.replace(year=2016).timetuple().tm_yday - 1] for date in dates]", "title": "" }, { "docid": "8a21f7ead75454064821b3cdf0085d54", "score": "0.52696246", "text": "def getChartHistory(self,scrip,start_date=None,end_date=None):\n\n scripID = [security for security in self.securities if security['symbol']==scrip.upper()][0]['securityId']\n resp = requests.post(self.host+f'nots/market/graphdata/{scripID}', headers=self.headers, json = {'id': 820}).json()\n if not resp:\n data = {'id': self.fetchPayload()}\n resp= requests.post(self.host+f'nots/market/graphdata/{scripID}', headers=self.headers, json=data).json()\n if start_date:\n start_date = self.dateFilter(start_date,resp)\n start_index = next((index for (index, d) in enumerate(resp) if d[\"businessDate\"] == start_date), None)\n resp = resp[start_index:]\n if end_date:\n \n end_date = self.dateFilter(end_date,resp)\n end_index =next((index for (index, d) in enumerate(resp) if d[\"businessDate\"] == end_date), None)+1\n if start_date and end_date:\n if end_index == start_index:\n end_index =-1\n resp = resp[:end_index]\n return resp", "title": "" }, { "docid": "907b934c53a5468e99f222887ae94c66", "score": "0.5260362", "text": "def make_price_df(stock, start, end=None, interval=30, ignore_missing_data=False, dirpath='./'):\n if 60 % interval != 0:\n raise ValueError(f'Interval of {interval} not a divisor of 60.')\n\n end = end if end else start\n dirpath = dirpath if dirpath[-1] == '/' else dirpath + '/'\n data = []\n for date in trading_daterange(start, end):\n filename = dirpath + 'xetra/' + f'{stock}-{date}.feather' # We always assume that the file is in ./xetra\n if not ignore_missing_data:\n if not os.path.isfile(filename):\n # If data does not yet exist, we download it first.\n download(date, 'xetra', open('apikey', 'r').readline().strip(), dirpath, stock)\n if os.path.isfile(filename):\n df = pandas.read_feather(filename, columns=data_columns['xetra'])\n data.extend(df.values.tolist())\n buckets = OrderedDict()\n for row in data:\n date = row[col_index('Date')]\n hour, minutes = row[col_index('Time')].split(':')\n if int(hour) < 7: # Price adjustements are recorded as pre-open trades. We omit them from analysis.\n continue\n price = row[col_index('EndPrice')]\n volume = row[col_index('NumberOfTrades')]\n minute_val = interval * (int(minutes) // interval)\n minute_val = str(minute_val) if minute_val >= 10 else '0' + str(minute_val)\n key = f'{date} {hour}:{minute_val}'\n # by using a dictionary we let the data structure do the bucketing for us.\n if key not in buckets:\n buckets[key] = [(volume, price)]\n else:\n buckets[key].append((volume, price))\n price_list = []\n price_items = list(buckets.items())\n yesterday = price_items[0][0].split(' ')[0] # first date in the sequence.\n for index in range(0, len(buckets)):\n key, prices = price_items[index]\n date, time = key.split(' ')\n if date != yesterday: # We omit prices from inter-day trading.\n yesterday = date\n continue\n avg_price = get_weighted_avg(prices)\n price_list.append([stock, date, time, avg_price])\n price_df = pandas.DataFrame(price_list, columns=['Mnemonic', 'Date', 'Time', 'Price'])\n return price_df", "title": "" }, { "docid": "a7060ff81b33fd704773f5ffb8a07ebb", "score": "0.5257072", "text": "def get_portfolio_object(self):\n raise NotImplementedError", "title": "" }, { "docid": "8ed718c476781236b6ad4030ed55d63f", "score": "0.52541125", "text": "def get_close_price(data, tickers):\n\n close = data[data['Name'] == tickers[0]][['Date']]\n for t in tickers:\n close[t] = data[data['Name'] == t]['Close'].values\n close.set_index(['Date'], inplace=True)\n close.index = pd.to_datetime(close.index)\n \n return close", "title": "" }, { "docid": "a0560caa1f04345bd952df1ff0a23ca3", "score": "0.5251322", "text": "def generate_season_date(season_tf, years):\r\n list_date = []\r\n # print season_tf\r\n for year in years:\r\n for from_date, to_date in reversed(season_tf):\r\n if (year % 4) != 0:\r\n if from_date == '02-29':\r\n from_date = '03-01'\r\n if to_date == '02-29':\r\n to_date = '02-28'\r\n begin = pd.datetime.strptime(\"{0}-{1}\".format(year, from_date), '%Y-%m-%d')\r\n end = pd.datetime.strptime(\"{0}-{1}\".format(year, to_date), '%Y-%m-%d')\r\n num_day = (end - begin).days + 1\r\n list_date += [begin + timedelta(days=i) for i in range(num_day)]\r\n return list_date", "title": "" }, { "docid": "c40f8ee2ac0825344f4050c4896bb788", "score": "0.5248201", "text": "def get_data(symbols, dates):\n df = pd.DataFrame(index = dates)\n if 'SPY' not in symbols: # add SPY for reference if absent\n symbols.insert(0, 'SPY') \n \n for symbol in symbols:\n #print(symbol)\n # Read and join data for each symbol\n df_temp = pd.read_csv(symbol_to_path(symbol), index_col ='Date',\n parse_dates = True,\n usecols = ['Date', 'Adj Close'], na_values = ['nan'])\n df_temp = df_temp.rename(columns = {'Adj Close': symbol}) \n \n df = df.join(df_temp)\n if symbol == 'SPY': # drop dates where SPY did not trade\n df = df.dropna(subset = [\"SPY\"])\n \n return df", "title": "" }, { "docid": "5d2abba4b0b67d19a43f3c284d150c74", "score": "0.5242208", "text": "def update_portfolio(self, portfolio):\n actual_value = portfolio.cash\n for asset, volume in portfolio.presentAssetDict.items():\n actual_value += asset.data[self.theDay] * volume\n portfolio.valueHistory.append(actual_value)\n\n # update position history value of the positions held by the portfolio\n for position in portfolio.openPositionList:\n position.valueHistory.append(position.openTrade.asset.data[self.theDay] * position.openTrade.volume)\n for position in portfolio.closePositionList:\n if position.closeTrade.day == self.theDay:\n position.valueHistory.append(position.openTrade.asset.data[self.theDay] * position.openTrade.volume)", "title": "" }, { "docid": "77225990084726ac2e9a3da3efc4e306", "score": "0.5241742", "text": "def stock_today(date, \n table='table table-condensed table-hover', \n headers = True):\n \n url = ('http://www.nepalstock.com/todaysprice?'\n +'startDate={}&stock-symbol=&_limit=500'.format(date))\n npse = get_soup(url)\n pages = npse.find_all(text=re.compile('Page.*')) #'Page' is only present if page has data\n if pages:\n table = npse.find('table', {'class':table})\n\n #get title \n title = table.find_all('tr')[1].find_all('td') #title of table\n title = get_text(title)\n title = clean_names(title)\n title = ['date'] + title #added date to data\n\n #tr in table are nested, so we only need top tr\n body = table.find_all('tr')[2].find_all('td')\n body = list(chunks(get_text(body), 10)) #get all data and make chuck of rows\n body = [[date] + i for i in body] #added date to data\n body_list = [title] + body\n if headers:\n return body_list[:-1] #last coloum is total, so ignored\n else:\n return body_list[1:-1]\n else:\n print(\"No data on stock exchange, was that day public holiday!!\")\n pass", "title": "" }, { "docid": "a5d33e939d50dd6b7678431d35a91b7b", "score": "0.5240899", "text": "def get_portfolio(portfolio_name):\n return jsonify(print_portfolio(portfolio_name))", "title": "" }, { "docid": "1764dbdfc7bd69cd6d51ad49a31a5925", "score": "0.52299947", "text": "def get_raw_data(price_data, stocks_list, api_key, n_period):\n for stock in stocks_list:\n url = f'https://api.tdameritrade.com/v1/marketdata/{stock}/pricehistory?apikey={api_key}&periodType=month&period={n_period}&frequencyType=daily&frequency=1'\n raw_data = requests.get(url).json()\n raw_data = pd.json_normalize(raw_data, record_path=['candles'])\n raw_data.rename(columns = {'datetime': 'date'}, inplace=True)\n raw_data['date'] = pd.to_datetime(raw_data['date'], unit='ms')\n raw_data['date'] = [raw_data['date'][i].date() for i in range(len(raw_data['date']))]\n raw_data['stock'] = [stock for x in range(len(raw_data))]\n\n # Calc returns\n raw_data['1d-logreturn'] = np.log(raw_data['close'] / raw_data['close'].shift(1))\n raw_data['2d-logreturn'] = np.log(raw_data['close'] / raw_data['close'].shift(2))\n raw_data['5d-logreturn'] = np.log(raw_data['close'] / raw_data['close'].shift(5))\n\n # Determine direction of return\n raw_data['1d-direction'] = [1 if x > 0 else -1 if x < 0 else 0 for x in raw_data['1d-logreturn']]\n raw_data['2d-direction'] = [1 if x > 0 else -1 if x < 0 else 0 for x in raw_data['2d-logreturn']]\n raw_data['5d-direction'] = [1 if x > 0 else -1 if x < 0 else 0 for x in raw_data['5d-logreturn']]\n\n # Concat dataframes\n price_data = pd.concat([price_data, raw_data], ignore_index=True)\n price_data = price_data[['date',\n 'stock',\n 'close',\n '1d-logreturn',\n '1d-direction',\n '2d-logreturn',\n '2d-direction',\n '5d-logreturn',\n '5d-direction']]\n\n return price_data", "title": "" }, { "docid": "0e72c96de26984c035817b1a531b852e", "score": "0.5229147", "text": "def get_data_for_ticker_in_range(ticker_name,from_date,to_date, data_type):\r\n try:\r\n from_date=datetime.datetime.strptime(from_date,\"%Y-%m-%d\")\r\n to_date=datetime.datetime.strptime(to_date,\"%Y-%m-%d\")\r\n except:\r\n raise ValueError(\"The format of parameters from_date or to_date is not'%Y-%m-%d'.\") \r\n \r\n \r\n if to_date < from_date:\r\n raise ValueError(\"The date range you entered is in reversed order.\")\r\n lst_col_data_ticker = ['open','high','low','close','volume']\r\n for i in data_type: \r\n if i not in lst_col_data_ticker:\r\n raise ValueError(\"The list of variable types you entered includes a non-existent type in the database.\")\r\n \r\n if os.path.exists(\"./data\")==False:\r\n os.makedirs(\"./data\")\r\n if os.path.exists(\"./data/{}.csv\".format(ticker_name)):\r\n data_ticker = pd.read_csv(\"./data/{}.csv\".format(ticker_name), sep=',', encoding='utf8', parse_dates=['timestamp'], index_col='timestamp')\r\n else:\r\n fetch_ticker(ticker_name, \"full\")\r\n data_ticker = pd.read_csv(\"./data/{}.csv\".format(ticker_name),sep=',', encoding='utf8', parse_dates=['timestamp'], index_col='timestamp')\r\n \r\n ready_data_ticker = data_ticker[to_date:from_date]\r\n ready_data_ticker = ready_data_ticker[data_type]\r\n if ready_data_ticker.empty:\r\n raise ValueError(\"The date range you entered is not included in the database existing in your download file.\")\r\n ready_data_ticker['ticker_name'] = ticker_name\r\n return(ready_data_ticker)", "title": "" }, { "docid": "08532632968867137b0f8c80b4f9ade5", "score": "0.52268094", "text": "def get_stocks_data_for_period(codes: typing.List[str], start: str, end: str) -> pd.DataFrame:\n try:\n return pandas_datareader.get_data_yahoo(codes, start, end)\n except Exception as e:\n raise_retrieval_errors(codes, e)", "title": "" }, { "docid": "f015303f452a92acbc92d0dcc305a70d", "score": "0.5219683", "text": "def get_stock_history_info(cls, stock_symbol: str, period=None) -> list:\n if stock_symbol.upper() in cls.cash_stocks:\n return []\n\n history_url = cls.history_url + stock_symbol\n params = {\"apikey\": cls.api_token}\n\n _daily_trades = []\n try:\n res = requests.get(history_url, params=params)\n if res:\n _daily_trades = res.json().get(\"historical\", [])\n else:\n pass\n\n except requests.exceptions.ConnectionError:\n logger.info(f\"connection error: {history_url} {stock_symbol} {params}\")\n\n # if there is history info, convert date string and provide date and\n # create list of trade_info tuples\n trade_tuple = namedtuple(\"trade_tuple\", \"date open close low high volume\")\n daily_trades = []\n if _daily_trades:\n for trade in _daily_trades:\n daily_trades.append(\n trade_tuple(\n date=datetime.datetime.strptime(trade.get(\"date\"), \"%Y-%m-%d\"),\n open=trade.get(\"open\"),\n close=trade.get(\"close\"),\n low=trade.get(\"low\"),\n high=trade.get(\"high\"),\n volume=trade.get(\"volume\"),\n )\n )\n\n else:\n # if not succeeded try with fallback site on marketstack\n daily_trades = get_history_marketstack(stock_symbol, period)\n\n # or (last resort) try alpha vantage\n if not daily_trades:\n daily_trades = get_history_alpha_vantage(stock_symbol)\n\n return daily_trades", "title": "" }, { "docid": "f57c36465b4e9621ca1ab187594fc873", "score": "0.521621", "text": "def PortfolioBacktestingRolling(portfolio):\n \n return None", "title": "" }, { "docid": "651071e8b469ce01fe82f65816119854", "score": "0.5215719", "text": "def trading_dates(self):\n return self._price.index", "title": "" }, { "docid": "67e00859a0bae7aacdcfaaa5b35adb3f", "score": "0.52153206", "text": "def extract_daily_return(self):\r\n df = self.portfolio\r\n daily_return = df['daily_return'].groupby(df.index).first()\r\n return daily_return", "title": "" }, { "docid": "5f060e123f395698b8798c448d355e61", "score": "0.5211756", "text": "def returns(assets, start, end, periods=1, field='close'):\n df = prices(assets, start, end, field, periods)\n df = df.sort_index().pct_change(1).iloc[1:]\n return df", "title": "" }, { "docid": "e31b5d165353b863613b9343a5bd1aff", "score": "0.52079546", "text": "def get_sp() :\n df = pdr.data.DataReader(\"^gspc\",\n start='2010-07-19',\n data_source='yahoo')\n df.reset_index(inplace = True)\n df['Date'] = pd.to_datetime(df['Date'])\n\n price = [df['Close'][0],df['Close'][0], df['Close'][0]]\n for i in range(1,len(df)) :\n if (df['Date'][i] - df['Date'][i-1]).days == 1 :\n price.append(df['Close'][i])\n elif (df['Date'][i] - df['Date'][i-1]).days == 2 :\n price.append(df['Close'][i-1])\n price.append(df['Close'][i])\n elif (df['Date'][i] - df['Date'][i-1]).days == 3 :\n price.append(df['Close'][i-1])\n price.append(df['Close'][i-1])\n price.append(df['Close'][i])\n elif (df['Date'][i] - df['Date'][i-1]).days == 4 :\n price.append(df['Close'][i-1])\n price.append(df['Close'][i-1])\n price.append(df['Close'][i-1])\n price.append(df['Close'][i])\n elif (df['Date'][i] - df['Date'][i-1]).days == 5 :\n price.append(df['Close'][i-1])\n price.append(df['Close'][i-1])\n price.append(df['Close'][i-1])\n price.append(df['Close'][i-1])\n price.append(df['Close'][i])\n price.append(df['Close'].iloc[-1])\n #price.append(df['Close'].iloc[-1])\n #price.append(df['Close'].iloc[-1])\n df_sp = df_.copy()\n df_sp['sp'] = price\n\n pct = [0]\n for x in range(1,len(df_sp)) :\n mouv = (df_sp['sp'].iloc[x] - df_sp['sp'].iloc[x-1])/ df_sp['sp'].iloc[x-1]\n pct.append(mouv)\n\n df_sp['sp'] = pct\n\n return df_sp['sp']", "title": "" }, { "docid": "6a4012856922dd0e033f506102a7601c", "score": "0.5205811", "text": "def init(asset_list):\n myport = Portfolio()\n for contract in asset_list:\n print(contract)\n mT.get_one_option(contract, myport)\n mT.get_one_stock(contract, myport)\n\n return myport", "title": "" }, { "docid": "71d3f13122f9d57a8844c51379a7767b", "score": "0.5192063", "text": "def get_todays_stats(stock_name):\n\n latest_date = get_latest_date_for_stock(stock_name)\n if latest_date:\n return get_stock_stats(stock_name, latest_date)", "title": "" }, { "docid": "158be8294f7eaafbee52c3557d5ae6ad", "score": "0.51919585", "text": "def fetchPriceHistory(self, dialog):\n\t\tstockstr = self.m_symbolinput.Value.encode('ascii')\n\t\tstocks = stockstr.split(\",\")\n\t\tstocks = [s.strip() for s in stocks]\n\t\t\n\t\t\"\"\"Get the historical prices and\n\t\tcreate a new Asset object for each stock symbol\"\"\"\n\t\tcurSymbols = [a.symbol for a in self.portfolio.assets]\n\t\tinc = 100/len(stocks)\n\t\ti = 0\n\t\tfor s in stocks:\n\t\t\tif s not in curSymbols:\n\t\t\t\twx.CallAfter(dialog.Update, i*inc, \"Fetching %s historical data\"%(s))\n\t\t\t\ti = i+1\n\t\t\t\ttry:\n\t\t\t\t\tprices = u.getHistoricalPrices(s)\n\t\t\t\t\tasset = fin.Asset(s, prices)\n\t\t\t\t\tself.portfolio.addAsset(asset)\n\t\t\t\texcept:\n\t\t\t\t\tmessage = \"Could not find data for %s \\n Perhaps it's not a real stock symbol?\"%(s)\t\t\t\n\t\t\t\t\twx.MessageBox(message, 'Error', wx.OK | wx.ICON_INFORMATION)\n\t\t\t\t\tcontinue\n\t\t\t\t\n\t\tself.m_symbolinput.SetValue(\"\")\n\t\tself.updateGridSymbols()\n\t\twx.CallAfter(dialog.Destroy)", "title": "" }, { "docid": "1db0dec44bcb1f68d3b2c270deaef82c", "score": "0.5190075", "text": "def get_split_dates(self, ticker: str):\n end_point = f\"https://api.polygon.io/v2/reference/splits/{ticker}?apiKey={self.API_KEY}\"\n content = requests.get(end_point)\n data = content.json()[\"results\"]\n return pd.DataFrame(data)", "title": "" }, { "docid": "13ef4827af8bc239b68402ce7fb28287", "score": "0.5188483", "text": "def main():\n\n\tstocks=[\"NKE\",\"EXPE\"]\n\tpresent_day='2020-09-18'\n\n\tstock_data_daily(stocks,present_day)", "title": "" }, { "docid": "cf740f5798ba6db17317b29c7878953e", "score": "0.5183204", "text": "def list_price_date(query_params):\n\n price_date = split_date(query_params.get(\"price_date\"))\n\n try:\n price_year = int(price_date[0])\n price_month = int(price_date[1])\n price_day = int(price_date[2])\n\n s_date = datetime(\n year=price_year,\n month=price_month,\n day=price_day,\n hour=0,\n minute=0,\n second=0,\n ).replace(tzinfo=pytz.UTC)\n\n except Exception:\n raise APIException(detail=\"Provide proper date\")\n\n return PriceList.objects.filter(price_date=s_date)", "title": "" }, { "docid": "97e5f9a1afb978c278197506ff963b8e", "score": "0.5181273", "text": "def compile_historical_prices(symbol, start_year=2015, end_year=dt.date.today().year):\r\n symbol_list = construct_futures_symbols(symbol, start_year, end_year)\r\n prices = download_historical_prices(symbol_list[0])\r\n for sym in symbol_list[1:]:\r\n next_prices = download_historical_prices(sym)\r\n prices = next_prices.append(prices)\r\n return prices", "title": "" }, { "docid": "3301db418c38533c3c0e567af836ee61", "score": "0.5180864", "text": "def extract_stock_data():\r\n print 'Extracting stock data from database'\r\n stocks_db = extract_stocks()\r\n\r\n # stock_list = list of stock objects\r\n stock_list = []\r\n\r\n # populate stock_list with data \r\n for stock in stocks_db:\r\n symbol = list(stock)[0].strip()\r\n name = list(stock)[1].strip()\r\n\r\n prices = {}\r\n prices_tuple = extract_pricehistory(symbol)\r\n for dt in prices_tuple:\r\n daily_price_details = {}\r\n daily_price_details['open'] = list(dt)[2]\r\n daily_price_details['close'] = list(dt)[3]\r\n daily_price_details['high'] = list(dt)[4]\r\n daily_price_details['low'] = list(dt)[5]\r\n daily_price_details['volume'] = list(dt)[6]\r\n\r\n prices[list(dt)[1]] = daily_price_details\r\n stock_list.append(Stock(name,symbol,prices))\r\n\r\n return stock_list", "title": "" }, { "docid": "26aba09409e975d04ac2e207bd0b0810", "score": "0.51757616", "text": "def make_return_df(stock, start, end=None, interval=30, dirpath='./', ignore_missing_data=False, difference=False):\n if 60 % interval != 0:\n raise ValueError(f'Interval of {interval} not a divisor of 60.')\n\n end = end if end else start\n dirpath = dirpath if dirpath[-1] == '/' else dirpath + '/'\n data = []\n for date in trading_daterange(start, end):\n filename = dirpath + 'xetra/' + f'{stock}-{date}.feather' # We always assume that the file is in ./xetra\n if not ignore_missing_data:\n if not os.path.isfile(filename):\n # If data does not yet exist, we download it first.\n download(date, 'xetra', open('apikey', 'r').readline().strip(), dirpath, stock)\n if os.path.isfile(filename):\n df = pandas.read_feather(filename, columns=data_columns['xetra'])\n data.extend(df.values.tolist())\n buckets = OrderedDict()\n for row in data:\n date = row[col_index('Date')]\n hour, minutes = row[col_index('Time')].split(':')\n if int(hour) < 7: # Price adjustements are recorded as pre-open trades. We omit them from analysis.\n continue\n price = row[col_index('EndPrice')]\n minute_val = interval * (int(minutes) // interval)\n minute_val = str(minute_val) if minute_val >= 10 else '0' + str(minute_val)\n key = f'{date} {hour}:{minute_val}'\n # by using a dictionary we let the data structure do the bucketing for us.\n if key not in buckets:\n buckets[key] = (int(minutes), price)\n else:\n # Here we are only keeping the first data point in the window, in order to calculate the return later.\n continue\n return_list = []\n price_items = list(buckets.items())\n yesterday = price_items[0][0].split(' ')[0] # first date in the sequence.\n for index in range(1, len(buckets)):\n old_price = price_items[index-1][1]\n key, price = price_items[index]\n date, time = key.split(' ')\n if date != yesterday: # We omit returns from inter-day trading.\n yesterday = date\n continue\n if difference:\n ret = price[1] - float(old_price[1])\n else:\n ret = (price[1] - float(old_price[1])) / float(old_price[1])\n return_list.append([stock, date, time, ret])\n return_df = pandas.DataFrame(return_list, columns=['Mnemonic', 'Date', 'Time', 'Return'])\n return return_df", "title": "" }, { "docid": "4593af1575ee8295bb5d443971e534d1", "score": "0.5175149", "text": "def get_data(symbols, dates):\n df = pd.DataFrame(index=dates,)\n if ('SPY' not in symbols): # add SPY for reference, if absent\n symbols.insert(0, 'SPY')\n\n for symbol in symbols:\n\n df_temp = pd.read_csv(symbol_to_path(symbol,base_dir=base_dir), index_col='Date',\n parse_dates=True, usecols=['Date', 'Adj Close','Volume'], na_values=['nan'])\n\n\n df = df.join(df_temp)\n if symbol == 'SPY': # drop dates SPY did not trade\n\n df = df.rename(columns={'Adj Close': 'BasePair_Close','Volume':'BasePair_Volume'})\n df = df.dropna(subset=[\"BasePair_Volume\"])\n else:\n df = df.rename(columns={'Adj Close': 'Close'})\n\n return df", "title": "" }, { "docid": "f84f9d5c9e3940f122d79c0891ad9c45", "score": "0.51728326", "text": "def stocks():", "title": "" }, { "docid": "f84f9d5c9e3940f122d79c0891ad9c45", "score": "0.51728326", "text": "def stocks():", "title": "" }, { "docid": "f84f9d5c9e3940f122d79c0891ad9c45", "score": "0.51728326", "text": "def stocks():", "title": "" }, { "docid": "222505714a147cac08ab6e7bd3ac0cc8", "score": "0.51610863", "text": "def get_previous_day_stats(stock_name, date = get_nth_date()):\n\n sql_result = get_historical_data(stock_name, date)\n\n return get_values(sql_result)", "title": "" }, { "docid": "44bcb7b26b88f2d5f03f8944d168a61d", "score": "0.51578224", "text": "def get_portfolio_historic_position_values(self, shares=None):\n if shares is None:\n shares = self._shrs\n \n positions = shares.keys()\n periods = self._hld_per \n\n prices = {}; portfolio = {}\n for position in positions:\n frame = self._get_historic_data(position, periods[position]['start'], periods[position]['end'])\n prices[position] = frame['adjustedClose'] * shares[position]\n \n return pandas.DataFrame(prices)", "title": "" }, { "docid": "b0f67d27e408c5f662cb7a0065ec13cd", "score": "0.51474005", "text": "def get_daily(symbol, api_name):\r\n if api_name == 'alphavantage':\r\n parameters = {\r\n 'function': 'TIME_SERIES_DAILY',\r\n 'symbol': symbol,\r\n 'outputsize': 'full',\r\n 'datatype': 'csv',\r\n 'apikey': apikey.API_KEY\r\n }\r\n data = get_data_alphavantage(parameters, 'date')\r\n elif api_name == 'tdameritrade':\r\n parameters = {\r\n 'period_type':client.Client.PriceHistory.PeriodType.YEAR,\r\n 'period': client.Client.PriceHistory.Period.TWENTY_YEARS,\r\n 'frequency_type': client.Client.PriceHistory.FrequencyType.DAILY,\r\n 'frequency': client.Client.PriceHistory.Frequency.DAILY,\r\n 'apikey': tdam_keys.CLIENT_ID,\r\n 'symbol': symbol.upper(),\r\n 'need_extended_hours_data': False\r\n }\r\n data = get_data_tdameritrade(parameters)\r\n\r\n elif api_name == 'yfinance':\r\n parameters = {\r\n 'symbols': symbol,\r\n 'period': 'max',\r\n 'interval': '1d',\r\n 'group_by': 'ticker',\r\n 'auto_adjust': True,\r\n 'prepost': True,\r\n 'threads': True,\r\n 'proxy': None,\r\n 'actions': True,\r\n }\r\n data = get_data_yfinance(parameters)\r\n elif api_name == 'alpaca':\r\n parameters = {\r\n 'symbol': symbol.upper(),\r\n 'multiplier': 1,\r\n 'timespan': 'day',\r\n '_from': '1900-01-01',\r\n 'to': '2100-01-01',\r\n 'timestamp_type': 'date'\r\n }\r\n data = get_data_alpaca(parameters)\r\n return data", "title": "" }, { "docid": "e87dc4c36f1535ac96a41a99e2b671fc", "score": "0.51448876", "text": "def get_daily_open_close(self, ticker, date=datetime.now()):\n end_point = f\"https://api.polygon.io/v1/open-close/{ticker}/\" \\\n f\"{date.strftime('%Y-%m-%d')}?apiKey={self.API_KEY}\"\n print(end_point)\n content = requests.get(end_point)\n data = content.json()\n return data", "title": "" }, { "docid": "25aa7bd5ac4d1536c935cf2c4304f549", "score": "0.51352066", "text": "def construct_futures_symbols(symbol, start_year=2015, end_year=dt.date.today().year):\r\n futures = []\r\n # append expiration month code to symbol name\r\n futures_info = get_futures_info()\r\n months = futures_info['ExpMonths'].loc[futures_info['Symbol'] == symbol].values[0]\r\n if futures_info.loc[futures_info['Symbol'] == symbol]['YearLimit'].values[0] > start_year:\r\n start_year = int(futures_info.loc[futures_info['Symbol'] == symbol]['YearLimit'].values[0])\r\n end_year += math.ceil(futures_info.loc[futures_info['Symbol'] == symbol]['TradedContract'].values[0] / len(months))\r\n\r\n for y in range(start_year, end_year + 1):\r\n for m in months:\r\n futures.append(\"%s%s%s\" % (symbol, m, y))\r\n return futures", "title": "" }, { "docid": "bea7bb7ff7a1dc9e65397d1e1288bb7c", "score": "0.51350594", "text": "def get_search_list(start_date, end_date, min_date, max_date, \n prev_month_length, num_days, num_years):\n # import necessary modules\n import numpy as np\n import datetime as dt\n \n # create empty lists\n search_dates = []\n sorted_search_dates = []\n \n # iterate through dates and return complete list in dt format\n for i in np.arange(num_years):\n for j in np.arange(num_days + 1):\n if end_date.day - j <= 0:\n search_dates.append((dt.date(max_date.year - i, \n end_date.month - 1,\n prev_month_length - (abs(end_date.day - j)))))\n else:\n search_dates.append((dt.date(max_date.year - i, \n end_date.month,\n end_date.day - j)))\n \n # iterate through list and return list of lists with dt objects by day\n for i in np.arange(num_days + 1):\n temp_list = []\n for j in np.arange(num_years):\n if j == 0:\n temp_list.append(search_dates[i])\n else:\n temp_list.append(search_dates[i + (j * 7)])\n \n sorted_search_dates.append(temp_list)\n \n return sorted_search_dates;", "title": "" }, { "docid": "102003ea31d7eb572a9839cf7dc4a5b6", "score": "0.5134799", "text": "def loadPortfolio(self):\n\n # load the db from file\n db = TinyDB(self.portfolioDBFile)\n\n\n # for each asset in the db\n for asset in db:\n\n\n if asset['assetType'] == 'COMMON':\n\n # create the asset\n newAsset = st.CommonStock(asset['assetID'],\n asset['purchaseDate'],\n asset['purchasePrice'],\n asset['saleDate'],\n asset['salePrice'],\n asset['volume'],\n asset['percentOwnership'],\n asset['priceFeedRef'],\n asset['priceFeedType'])\n\n elif asset['assetType'] == 'PREFFERED':\n\n # create the asset\n newAsset = st.PreferredStock(asset['assetID'],\n asset['purchaseDate'],\n asset['purchasePrice'],\n asset['saleDate'],\n asset['salePrice'],\n asset['volume'],\n asset['percentOwnership'],\n asset['priceFeedRef'],\n asset['priceFeedType'])\n\n\n # append the nes asset to the list of assets in the portfolio\n self.assets.append(newAsset)", "title": "" } ]
0733abbd7642aa08c3140ea6d494350f
Creates a AXIC card
[ { "docid": "ba0cb07a746135e2e85079c710aade0b", "score": "0.5038087", "text": "def add_axic(self, nharmonics, comment='') -> AXIC:\n axic = AXIC(nharmonics, comment=comment)\n self._add_methods._add_axic_object(axic)\n return axic", "title": "" } ]
[ { "docid": "33301934c51f8083554de3d42b4d471c", "score": "0.6408132", "text": "def test_basicCardCreation():\n\tc = Card(\"A\",\"Diamond\")\n\tassert c.getName() == \"A\"\n\tassert c.getValue() == [1,11]\n\tassert c.getSuit() == \"Diamond\"", "title": "" }, { "docid": "d93da30fcd080c31d68bff466b490a3a", "score": "0.6281857", "text": "def create_account():\n print(\"Please choose one kind of card\\n\"\n \"[1]: Card\\n\"\n \"[2]: CreditCard\\n\")\n kind = input()\n if 1 == int(kind):\n card = Card()\n elif 2 == int(kind):\n card = CreditCard()\n print(\"Please set credit limit(between 10,000 to 100,000):\")\n card.set_creditLimit(int(input()))\n else:\n print(\"Please input true value\")\n return\n print(\"Please enter your name:\\n\")\n card.set_owner(str(input()))\n print(\"Please enter your person ID:\\n\")\n card.set_personID(str(input()))\n print(\"Please enter your phone number:\\n\")\n card.set_phoneNumber(str(input()))\n print(\"Please set your password:\\n\")\n card.set_password(str(input()))\n add_card(card)", "title": "" }, { "docid": "80926e90e7c3b5cbcfe5b1aba07efa18", "score": "0.6219326", "text": "def createCard(self, name):\n self.cards[name] = Card(name)", "title": "" }, { "docid": "96d34581047bb1362558c01055ac7532", "score": "0.6019334", "text": "def add_card(cls, card, comment=''):\n eid = integer(card, 1, 'eid')\n pid = integer_or_blank(card, 2, 'pid', eid)\n ga = integer_or_blank(card, 3, 'ga')\n gb = integer_or_blank(card, 4, 'gb')\n x1_g0 = integer_double_or_blank(card, 5, 'x1_g0')\n cid = integer_or_blank(card, 8, 'cid')\n if isinstance(x1_g0, integer_types):\n g0 = x1_g0\n x = None\n elif isinstance(x1_g0, float):\n g0 = None\n x1 = x1_g0\n x2 = double_or_blank(card, 6, 'x2', 0.0)\n x3 = double_or_blank(card, 7, 'x3', 0.0)\n x = [x1, x2, x3]\n else:\n #raise RuntimeError('invalid CGAP...x1/g0 = %r' %(x1_g0))\n g0 = None\n x = [None, None, None]\n assert len(card) <= 9, f'len(CGAP card) = {len(card):d}\\ncard={card}'\n return CGAP(eid, pid, [ga, gb], x, g0, cid=cid, comment=comment)", "title": "" }, { "docid": "f327795957215f3d4e04074dd0526d27", "score": "0.5925844", "text": "def create_card(self, title=None, subtitle=None, content=None, card_type=\"Simple\"):\n card = {\"type\": card_type}\n if title: card[\"title\"] = title\n if subtitle: card[\"subtitle\"] = subtitle\n if content: card[\"content\"] = content\n return card", "title": "" }, { "docid": "8e8447fa3b429b6c849b02cb6a441bd9", "score": "0.5854015", "text": "def add_card(cls, card, icard=0, comment=''):\n sid = integer(card, 1, 'sid')\n eid = integer(card, 2, 'eid')\n\n t = []\n ifield = 3\n for var in ['A', 'B', 'C']:\n ti = double_or_blank(card, ifield, 'T(%s)' % var, default=0.0)\n t.append(ti)\n ifield += 1\n\n tpy = []\n tpz = []\n for var in ['A', 'B', 'C']:\n tpyi = double_or_blank(card, ifield, 'TPY(%s)' % var, default=0.0)\n tpzi = double_or_blank(card, ifield + 1, 'TPZ(%s)' % var, default=0.0)\n tpy.append(tpyi)\n tpz.append(tpzi)\n ifield += 2\n tc = []\n td = []\n te = []\n tf = []\n for var in ['A', 'B', 'C']:\n tci = double_or_blank(card, ifield, 'TC(%s)' % var, default=0.0)\n tdi = double_or_blank(card, ifield + 1, 'TD(%s)' % var, default=0.0)\n tei = double_or_blank(card, ifield + 2, 'TE(%s)' % var, default=0.0)\n tfi = double_or_blank(card, ifield + 3, 'TF(%s)' % var, default=0.0)\n tc.append(tci)\n td.append(tdi)\n te.append(tei)\n tf.append(tfi)\n ifield += 4\n ifield += 1 # skip the None\n\n list_fields = card[ifield:]\n eids = expand_thru_by(list_fields, set_fields=True, sort_fields=True,\n require_int=True, allow_blanks=False)\n return TEMPB3(sid, eid, t, tpy, tpz, tc, td, te, tf, eids, comment=comment)", "title": "" }, { "docid": "b1bf0d5731ec5397853bb466cd199917", "score": "0.5833742", "text": "def card_factory(rank, suit):\n if rank ==1: return AceCard(\"A\", suit)\n elif 2 <= rank < 11 : return NumberCard(str(rank), suit)\n elif 11<= rank < 14 : \n name = { 11: 'J', 12: 'Q', 13: 'K' }[rank]\n return FaceCard(name, suit)\n else:\n raise Exception(\"Rank out of range\")", "title": "" }, { "docid": "c804f8ab2d66c393c0c80ef51a24190a", "score": "0.5795089", "text": "def create_base():\n mac = Dot15d4FCS() / Dot15d4Data()\n mac.fcf_panidcompress = 1\n mac.fcf_ackreq = 1\n mac.fcf_srcaddrmode = \"Short\"\n\n nwk = ZigbeeNWK()\n nwk.discover_route = 1\n nwk.flags = \"security\"\n nwk.radius = 30\n\n sec = ZigbeeSecurityHeader()\n sec.key_type = 1\n\n return mac / nwk / sec", "title": "" }, { "docid": "f7a13f6ddcca4c26247027ba28b6011b", "score": "0.5766956", "text": "def add_card(cls, card, comment=''):\n eid = integer(card, 1, 'eid')\n pid = integer(card, 2, 'pid')\n nids = [\n integer(card, 3, 'n1'), integer(card, 4, 'n2'),\n integer(card, 5, 'n3'), integer(card, 6, 'n4'),\n integer(card, 7, 'n5'), integer(card, 8, 'n6'),\n integer(card, 9, 'n7'), integer(card, 10, 'n8'),\n integer(card, 11, 'n9'), integer(card, 12, 'n10'),\n integer_or_blank(card, 13, 'n11'),\n integer_or_blank(card, 14, 'n12'),\n integer_or_blank(card, 15, 'n13'),\n integer_or_blank(card, 16, 'n14'),\n integer_or_blank(card, 17, 'n15'),\n integer_or_blank(card, 18, 'n16'),\n integer_or_blank(card, 19, 'n17'),\n integer_or_blank(card, 20, 'n18')\n ]\n assert len(card) <= 21, f'len(CRAC2D card) = {len(card):d}\\ncard={card}'\n return CRAC2D(eid, pid, nids, comment=comment)", "title": "" }, { "docid": "680bfce3efa1288bc11fd71d94dd1083", "score": "0.5712112", "text": "def make_card_image(rank, suit):\n # Open a blank card corresponding to the given suit\n card = get_blank_card(suit)\n\n # Get the symbol corresponding to the rank\n rank_symbol = get_rank_symbol(rank)\n # Generate the cords based off the size of the rank symbol and the card\n top_cords, bottom_cords = get_rank_cords(card.size, rank_symbol.size)\n # Add the symbol to the top of the card\n add_symbol(card, rank_symbol, top_cords)\n # Flip the symbol\n rank_symbol = rank_symbol.rotate(180)\n # Add the symbol to the bottom of the card\n add_symbol(card, rank_symbol, bottom_cords)\n\n # Get the symbol corresponding to the suit\n suit_symbol = get_suit_symbol(suit)\n # Generate the cords based off the size of the suit symbol and the card\n top_cords, bottom_cords = get_suit_cords(card.size, suit_symbol.size)\n # Add the symbol to the top of the card\n add_symbol(card, suit_symbol, top_cords)\n # Flip the symbol\n suit_symbol = suit_symbol.rotate(180)\n # Add the symbol to the bottom of the card\n add_symbol(card, suit_symbol, bottom_cords)\n card = prepare_for_tkinter(card)\n return card", "title": "" }, { "docid": "5a3868002c2c57d7072c922d5c41f6c3", "score": "0.5669748", "text": "def add_card(cls, card, comment=''):\n eid = integer(card, 1, 'eid')\n pid = integer_or_blank(card, 2, 'pid', eid)\n Type = string(card, 3, 'Type')\n ida = integer(card, 4, 'ida')\n idb = integer(card, 5, 'idb')\n gs = integer_or_blank(card, 6, 'gs')\n ga = integer_or_blank(card, 7, 'ga')\n gb = integer_or_blank(card, 8, 'gb')\n xs = double_or_blank(card, 9, 'xs')\n ys = double_or_blank(card, 10, 'ys')\n zs = double_or_blank(card, 11, 'zs')\n assert len(card) <= 12, f'len(CFAST card) = {len(card):d}\\ncard={card}'\n #if self.Type=='PROP': # PSHELL/PCOMP ida & idb\n return CFAST(eid, pid, Type, ida, idb, gs=gs, ga=ga, gb=gb,\n xs=xs, ys=ys, zs=zs, comment=comment)", "title": "" }, { "docid": "dfe494b5d5d1de6d651e20fd289a7397", "score": "0.56686795", "text": "def add_card(cls, card, comment=''):\n sid = integer(card, 1, 'sid')\n qflux = double(card, 2, 'qflux')\n eids = []\n j = 1\n for i in range(3, len(card)):\n eid = integer_or_string(card, i, 'eid%i' % j)\n eids.append(eid)\n j += 1\n return QBDY1(sid, qflux, eids, comment=comment)", "title": "" }, { "docid": "1147eece851917597acd68b6e5fb5c2f", "score": "0.5625343", "text": "def add_card(cls, card, icard: int, comment=''):\n offset = icard * 4\n eid = integer(card, 1+offset, 'eid')\n nodes = [\n integer(card, 2+offset, 'g1'),\n integer(card, 3+offset, 'g2'),\n ]\n #assert len(card) <= 4, f'len(PLOTEL card) = {len(card):d}\\ncard={card}'\n assert len(card) <= 8, f'len(PLOTEL card) = {len(card):d}\\ncard={card}'\n return PLOTEL(eid, nodes, comment=comment)", "title": "" }, { "docid": "9705aaef9ff23980545548d04ab7e7fd", "score": "0.56251705", "text": "def getXML(self):\n card = self.createXmlElem(self.device, None, ['address'])\n sourceAttrs = {'mode': self.specParams['mode']}\n if sourceAttrs['mode'] != 'host':\n sourceAttrs['type'] = self.specParams['type']\n card.setAttrs(**sourceAttrs)\n return card", "title": "" }, { "docid": "725eefedbfa2fd24bb1b2c1da5b75714", "score": "0.5551582", "text": "def issueNewCard(self):\r\n \r\n # Generates a random 16 digit number using the random module\r\n self.cardNum = str(random.randint(1000000000000000,9999999999999999))\r\n\r\n # Generates todays date using the datetime module\r\n today = datetime.datetime.now()\r\n\r\n # Adds 3 years to todays date\r\n cardExpDate = today + datetime.timedelta(days=3*365)\r\n\r\n # Accesses the month and the last two digits of the year in the tuple cardExp\r\n self.cardExp = (cardExpDate.month,cardExpDate.strftime(\"%y\"))", "title": "" }, { "docid": "7c26a664cf987904d2d15b558d32b182", "score": "0.5516958", "text": "def add_card(cls, card, comment=''):\n eid = integer(card, 1, 'eid')\n pid = integer(card, 2, 'pid')\n # required 1-10, 19-28\n # optional 11-18, 29-36, 37-64\n # all/none 37-46\n nids = fields(integer_or_blank, card, 'nid', 3, 67) # cap at +3 = 67\n assert len(card) <= 67, f'len(CRAC3D card) = {len(card):d}\\ncard={card}'\n return CRAC3D(eid, pid, nids, comment=comment)", "title": "" }, { "docid": "61a47ddfaab5762686a403cddc485c15", "score": "0.54782665", "text": "def test_add_cards(setup_hand_with_ace):\n assert setup_hand_with_ace.cards == ['A', '6']\n setup_hand_with_ace.add_card(card='K')\n assert setup_hand_with_ace.cards == ['A', '6', 'K']", "title": "" }, { "docid": "c094d93d6c74dbe160fbce284bcba3a9", "score": "0.5456914", "text": "def add_card(cls, card, icard=0, comment=''):\n nfields = len(card) - 1\n assert nfields % 2 == 0, 'card=%s' % card\n i = 2 * icard\n sid = integer(card, i + 1, 'sid')\n temperature = double(card, i + 2, 'temp')\n return TEMPD(sid, temperature, comment=comment)", "title": "" }, { "docid": "3f1b0c1ccfabade252ce0ba9ea168c4d", "score": "0.54451716", "text": "def build(self):\r\n for s in ['spades', 'hearts', 'clubs', 'diamonds']:\r\n for v in range(1, 14):\r\n self.cards.append(Card(s, v))", "title": "" }, { "docid": "c57949d53ee22cfc7fef4b94e63dac33", "score": "0.5433112", "text": "def add_card(cls, card, comment=''):\n sid = integer(card, 1, 'sid')\n q0 = double(card, 2, 'q0')\n cntrlnd = integer_or_blank(card, 3, 'cntrlnd', 0)\n\n nfields = card.nfields\n eids = fields(integer_or_string, card, 'eid', i=4, j=nfields)\n return QBDY3(sid, q0, cntrlnd, eids, comment=comment)", "title": "" }, { "docid": "a236e4f9e9b61aaf709429be930fd3ee", "score": "0.543065", "text": "def create():\n\n\tdeck = ['deck', []]\n\tfor i in range(1, 27):\n\t\tif i < 14:\n\t\t\tsuit = 1\n\t\telif i >= 14:\n\t\t\ti -= 13\n\t\t\tsuit = 2\n\t\tdeck[1].append(card_mod.create(i, suit))\n\tdeck[1].append(card_mod.create(27, 1))\n\tdeck[1].append(card_mod.create(27, 2))\n\treturn deck", "title": "" }, { "docid": "026fa40aaaefc70e62232d1276cbacb4", "score": "0.54225516", "text": "def do_chassis_create(cc, args):\n field_list = ['description', 'extra']\n fields = dict((k, v) for (k, v) in vars(args).items()\n if k in field_list and not (v is None))\n fields = utils.args_array_to_dict(fields, 'extra')\n chassis = cc.chassis.create(**fields)\n\n field_list.append('uuid')\n data = dict([(f, getattr(chassis, f, '')) for f in field_list])\n cliutils.print_dict(data, wrap=72)", "title": "" }, { "docid": "3c718d4c241e14c5ea14cdf67aae21d3", "score": "0.53867716", "text": "def build(self) -> None:\n for suit in [\"Spades\", \"Hearts\", \"Clubs\", \"Diamonds\"]:\n for value in range(1, 14):\n self.cards.append(Card(suit, value))", "title": "" }, { "docid": "a34a82225aa85b374d8e4cf905d29936", "score": "0.53508824", "text": "async def xcard(self, ctx):\n await send(ctx, \"X-Card - Let's change things up...\")\n await send(ctx, file=discord.File('./images/xcard.png'))", "title": "" }, { "docid": "d07c59b5f55437d5b7b72ff8a00bf5f8", "score": "0.53265655", "text": "def create_cards():\n return [ suit + rank for suit in \"CDHS\" for rank in \"A23456789TJQK\" ]", "title": "" }, { "docid": "cae51747ba8e2a2b18e70cbde8a6ac14", "score": "0.5292166", "text": "async def card(self, ctx, card: str, count=1):\n card_conv = {\n 'standard' : StandardCard,\n 'shadow' : ShadowCard,\n 'tarot' : TarotCard,\n 'uno' : UnoCard\n }\n\n if len(card) > 0:\n card_type = card\n else:\n card_type = 'standard'\n\n cards = card_conv[card_type]\n deck = Deck(cards)\n deck.create()\n deck.shuffle()\n hand = deck.deal(count)\n if type(hand) is list:\n title = '🎴 Card Hand ' + card_type[0].upper() + card_type[1:]\n embed = make_embed(title, hand)\n await self.bot.send_message(ctx.message.channel, embed=embed)\n else:\n await self.bot.say(\"Error parsing cards.\")", "title": "" }, { "docid": "e3b5d605cf3b3fc90efc806d41cf17ea", "score": "0.52821326", "text": "def create(self):\r\n settings = dict()\r\n settings['card'] = None\r\n settings['method'] = 'post'\r\n self.render('card/config.html', **settings)", "title": "" }, { "docid": "2f0c463f8be1d53cd43ac9330a5eca8a", "score": "0.52737725", "text": "def test_default_card_creation():\n test_card = Card()\n ace_spades = Card(1, 'Spade')\n nose.assert_equal(test_card, ace_spades)", "title": "" }, { "docid": "2563592f8c45417a75a3ff2ec8a8830b", "score": "0.5265753", "text": "def format(self,card):\n pass", "title": "" }, { "docid": "7da34da8e11085e023611ba70c83ce2b", "score": "0.5264288", "text": "def test_card_is_ace(self):\n card = Card(\"A\", \"♡\")\n self.assertTrue(card.ace())\n card = Card(\"8\", \"♡\")\n self.assertFalse(card.ace())", "title": "" }, { "docid": "5bea620e2bd496858082132512fee131", "score": "0.526386", "text": "def create_deck(self):\r\n for suit in self.suits:\r\n for rank in self.ranks:\r\n self.deck.append(Card(suit=suit, rank=rank[0], value=rank[1]))\r\n for joker in self.jokers:\r\n self.deck.append(Joker(size=joker[0], value=joker[1]))", "title": "" }, { "docid": "e21049d56632c02831d1888e09578ef7", "score": "0.5256451", "text": "def __init__(self):\n self.deck = []\n for color in ['red', 'blue', 'green', 'yellow']:\n self.deck.append(UnoCard('0', color, 'standard')) # one 0 of each color\n for i in range(2):\n # other action couples\n self.deck.append(UnoCard('skip', color, 'action'))\n self.deck.append(UnoCard('reverse', color, 'action'))\n self.deck.append(UnoCard('draw2', color, 'action'))\n for n in range(1, 10): # two of each of 1-9 of each color\n self.deck.append(UnoCard(str(n), color, 'standard'))\n for i in range(8):\n self.deck.append(UnoCard(None, None, 'wild'))\n self.deck.append(UnoCard(None, None, 'wild4'))\n random.shuffle(self.deck) # shuffle the deck\n self.draw = False", "title": "" }, { "docid": "7af6e34b305ead2abb72b0c8efc1a9f4", "score": "0.52318764", "text": "def SCardIntroduceCardType(hcontext, cardname, primaryprovider, providerlist, atr, mask):\n return _scard.SCardIntroduceCardType(hcontext, cardname, primaryprovider, providerlist, atr, mask)", "title": "" }, { "docid": "bfe8bfee943df85a30e1dc2d529394ac", "score": "0.5205885", "text": "def add_card(cls, card, comment=''):\n sid = integer(card, 1, 'eid')\n flag = string(card, 2, 'flag')\n\n q0 = double(card, 3, 'q0')\n af = double_or_blank(card, 4, 'af')\n nnodes_required, nnodes_max = cls.flag_to_nnodes[flag]\n\n grids = []\n if nnodes_required == nnodes_max:\n for i in range(nnodes_required):\n grid = integer(card, 5 + i, 'grid%i' % (i + 1))\n grids.append(grid)\n else:\n int_node_count = 0\n for i in range(nnodes_max):\n grid = integer_or_blank(card, 5 + i, 'grid%i' % (i + 1))\n if grid is not None:\n int_node_count += 1\n grids.append(grid)\n if int_node_count < nnodes_required:\n msg = 'int_node_count=%s nnodes_required=%s' % (int_node_count, nnodes_required)\n raise RuntimeError(msg)\n return QHBDY(sid, flag, q0, grids, af=af, comment=comment)", "title": "" }, { "docid": "ceb7162588c01280dcab59d86c8f5c98", "score": "0.5202678", "text": "def __init__(self, name, description, updated, notes, card_type,\n card_number, expiry_date, ccv, pin):\n Account.__init__(self, name, description, updated, notes)\n self.card_type = card_type\n self.card_number = card_number\n self.expiry_date = expiry_date\n self.ccv = ccv\n self.pin = pin", "title": "" }, { "docid": "db07d4ad7ba60547629deb406c342adb", "score": "0.519682", "text": "def admin_create_account(self, account_name, card_id, amount):\n\n return self.modify('INSERT INTO cards(account_name, card_id, balance) \\\n values (?, ?, ?);', (account_name, card_id, amount,))", "title": "" }, { "docid": "5fa23f4500e599c45b297894715c96ec", "score": "0.51877475", "text": "def add_card(self, card, comment=''):\n eid = integer(card, 1, 'eid')\n pid = integer_or_blank(card, 2, 'pid', eid)\n nids = [integer_or_blank(card, 3, 'n1'),\n integer_or_blank(card, 4, 'n2'),\n integer_or_blank(card, 5, 'n3'),\n integer_or_blank(card, 6, 'n4')]\n assert len(card) <= 7, 'len(CSHEAR card) = %i\\ncard=%s' % (len(card), card)\n self.add(eid, pid, nids)", "title": "" }, { "docid": "fab276fc5571ea48577588b354e3f123", "score": "0.5169482", "text": "def make_bic_code(owner_code, serial):\n return iso6346.create(owner_code, serial=str(serial).zfill(6))", "title": "" }, { "docid": "d9f59710cddf2078da57d8b8a6865616", "score": "0.51626337", "text": "def show_card(self, card):\n pass", "title": "" }, { "docid": "4b28cf6c855b32d221185aea0fffef58", "score": "0.51583165", "text": "def card(self, credit_card, address=None, email=None):\n return AuthorizeCreditCard(self, credit_card, address=address,\n email=email)", "title": "" }, { "docid": "340f56561fd27c0ee41b065ff00ed3f5", "score": "0.51559776", "text": "def create_account(accountname, accountemail, accountrole, access_to_billing, scp, root_id, accountbilling):\n account_id = 'None'\n client = get_client('organizations')\n try:\n create_account_response = client.create_account(Email=accountemail, AccountName=accountname,\n RoleName=accountrole,\n IamUserAccessToBilling=access_to_billing,\n Tags=[\n {\n \"Key\": \"AccountBilling\",\n \"Value\": accountbilling\n }\n ])\n except botocore.exceptions.ClientError as exception:\n print(exception)\n sys.exit(1)\n # time.sleep(30)\n create_account_status_response = client.describe_create_account_status(\n CreateAccountRequestId=create_account_response.get('CreateAccountStatus').get('Id'))\n account_id = create_account_status_response.get('CreateAccountStatus').get('AccountId')\n\n while account_id is None:\n create_account_status_response = client.describe_create_account_status(\n CreateAccountRequestId=create_account_response.get('CreateAccountStatus').get('Id'))\n account_id = create_account_status_response.get('CreateAccountStatus').get('AccountId')\n return (create_account_response, account_id)", "title": "" }, { "docid": "790b11408d6cbabc8564c97fb1fe53ed", "score": "0.51519084", "text": "def make_bic_code(owner_code, serial):\n return iso6346.create(owner_code,\n serial=str(serial).zfill(6),\n category=\"R\")", "title": "" }, { "docid": "b6d49b1f7a7af5d13b7f9d8090fcf48b", "score": "0.5145252", "text": "def create_awg_object(slot_number, chassis_number=0):\n if isinstance(slot_number, int) and slot_number in range(2, 11):\n awg = keySD.SD_AOU()\n open_error_code = awg.openWithSlot(AWG_TYPE, chassis_number,\n slot_number)\n if open_error_code < 0:\n raise Exception(\"While opening the AWG in slot number %i,\"\n % slot_number + \" error code %i was encountered.\"\n % open_error_code + \" See the SD_Error() class in\"\n \" location: \" + KEYSIGHT_LIBRARY_PATH +\n \" for more details.\")\n return awg\n else:\n raise Exception(\"slot_number must be an integer from 2 to 10.\")", "title": "" }, { "docid": "d61806916c9556d9fd05e38073a79589", "score": "0.51427764", "text": "def _create_deck(self):\n # We are useing 2 decks so %13+1 will keep the cards in range\n for i in range(0, (13 * self.deck_count)):\n card_value = (i % 13) + 1\n self.deck.append(Spade(card_value))\n self.deck.append(Heart(card_value))\n self.deck.append(Diamond(card_value))\n self.deck.append(Club(card_value))", "title": "" }, { "docid": "ab4005ec6f2911326b7bf46c9fe2ba7c", "score": "0.513912", "text": "def _create_card(self, board_name, list_name):\n trello_board_id = self._find_board_id_by_name(board_name)\n trello_list_id = self._find_list_id_by_name(trello_board_id, list_name)\n api_url = \"https://api.trello.com/1/cards\"\n query_string = {\"idList\": trello_list_id, \"name\": \"bug1\", \"desc\": \"this card add auto\", \"keepFromSource\": \"all\"}\n query_string.update(self._get_queryparameters_keys())\n response_data = requests.request(\"POST\", api_url, params=query_string)\n card_data = json.loads(response_data.text)\n return card_data['id']", "title": "" }, { "docid": "cd42c27c08679b4a8bafc46dccf1d625", "score": "0.5121647", "text": "def tacacsplus_provider_create(handle, name, order=\"lowest-available\",\n key=None, port=\"49\", timeout=\"5\", retries=\"1\",\n enc_key=None, descr=None, **kwargs):\n\n from ucscsdk.mometa.aaa.AaaTacacsPlusProvider import \\\n AaaTacacsPlusProvider\n\n mo = AaaTacacsPlusProvider(\n parent_mo_or_dn=ucsc_base_dn + \"/tacacs-ext\",\n name=name,\n order=order,\n key=key,\n port=port,\n timeout=timeout,\n retries=retries,\n enc_key=enc_key,\n descr=descr)\n\n mo.set_prop_multiple(**kwargs)\n\n handle.add_mo(mo, True)\n handle.commit()\n return mo", "title": "" }, { "docid": "b2b34ca36c7bd14dc26208df6be0a4c8", "score": "0.51209587", "text": "def add_card(handler_input, response):\n # type: (HandlerInput, Response) -> None\n response.card = SimpleCard(\n title=skill_name,\n content=convert_speech_to_text(response.output_speech.ssml))", "title": "" }, { "docid": "ee282d154a9562788dd4fa045cef7077", "score": "0.5119999", "text": "def test_card_validity(self):\n card = Card(\"3\",\"♡\")\n with self.assertRaises(AssertionError):\n card = Card(\"13\",\"♡\")\n with self.assertRaises(AssertionError):\n card = Card(\"A\", \"x\")", "title": "" }, { "docid": "7c0bc0098164069f503723600d80a5f9", "score": "0.51156795", "text": "def encode_card(_c):\n assert(len(_c) == 2)\n _temp = np.zeroes((17, 17))\n # Note, ideally a matrix of shape (4, 13) should suffice\n # But zero-padding to 17x17 helps with convolutions and max pooling\n # (According to the paper, and also the referenced work by Clark and Storkey 2014)\n _temp[self.suits.index(_c[1]), self.cards.index(_c[0])] = 1\n return np.array(_temp)", "title": "" }, { "docid": "85bd741d265484ec73ca6b30de462fd9", "score": "0.51075107", "text": "def make_vcard(self) -> VCardTemp:\n return VCardTemp()", "title": "" }, { "docid": "58bee400ba84ab3ccb681f8e68becf66", "score": "0.5101189", "text": "def create_charge(self, card, **kwargs):\n stripe.api_key = self.get_api_key()\n capture = 'false'\n if kwargs.get('capture'):\n if not kwargs.get('capture', 'x') == 'false':\n capture = 'true'\n print \"Trying to create order capture = %s\" % capture\n if self.get_price() > 0:\n amount = self.to_cents()\n if not amount:\n raise Exception(\"Invalid Amount %s\" % amount)\n try:\n stripe.api_key = self.get_api_key()\n if not self.stripe_customer:\n self.stripe_customer = stripe.Customer.create(\n description='Temporal customer',\n card=card\n )\n self.stripe_object = stripe.Charge.create(\n currency=\"usd\",\n amount=amount,\n capture=capture,\n customer=self.stripe_customer.id\n )\n except stripe.CardError, e:\n raise Exception(\"Charge Card Error: %s\" % e)\n except Exception, e:\n raise Exception(\"Charge Exception: %s\" % e)\n self.stripe_id = self.stripe_object.__dict__.get('id')\n stripe.api_key = None\n return self.stripe_id", "title": "" }, { "docid": "86d684e3ce4e4e6f996d5d12afa76511", "score": "0.50954676", "text": "def create_account(self, account, name):\n\n raise PrivilegeError(\"Action not allowed in mode atm\")", "title": "" }, { "docid": "783fe26f6d0f7f4cef7671e3e0aa85c2", "score": "0.5080931", "text": "def __init__(self, card_suit, card_value):\n self.suit = card_suit\n self.value = card_value", "title": "" }, { "docid": "38eb73a1d1fb86602b3fc1b748a45526", "score": "0.50732064", "text": "def add_card(self, card: Card):\n super().add_card(card)", "title": "" }, { "docid": "22dfe76de4b651e918b9bec37f6823d1", "score": "0.5070166", "text": "def create_account(self, account_name, amount):\n try:\n amount = int(amount)\n except ValueError:\n logging.info('amount must be a integer')\n return False\n\n card_id = str(uuid.uuid4())\n\n if self.db_obj.admin_create_account(account_name, card_id, amount):\n logging.info('admin create account success')\n r = os.urandom(32)\n rand_key = os.urandom(32)\n\n return xmlrpclib.Binary(r + rand_key + card_id)\n\n logging.info('admin create account failed')\n return False", "title": "" }, { "docid": "ab7a437796c7c9ff5636cd5f816b51f3", "score": "0.5061067", "text": "def add_card(cls, card, comment=''):\n sid = integer(card, 1, 'sid')\n eid = integer(card, 2, 'eid')\n\n qfluxs = []\n j = 1\n for i in range(3, len(card)):\n q = double_or_blank(card, i, 'qFlux%i' % j)\n qfluxs.append(q)\n j += 1\n\n assert len(qfluxs) > 0, qfluxs\n qfluxs = wipe_empty_fields(qfluxs)\n return QBDY2(sid, eid, qfluxs, comment=comment)", "title": "" }, { "docid": "8b9d30bdc2d277f41f715da9537dd632", "score": "0.5058855", "text": "def test_create_eip(self):\n # billing = Billing('Postpaid', 'ByBandwidth', None, None)\n self.the_client.create_eip(1, 'Test')", "title": "" }, { "docid": "6008e9562e4a9e27cc639c96817910bd", "score": "0.5055296", "text": "def create_deck():\n\n return [PlayingCard(num, suit) for num in range(1, 14) for suit in SUITS]", "title": "" }, { "docid": "99ae44a6f41c301489e9d48cf166e0f9", "score": "0.5051738", "text": "def add_card(cls, card, comment=''):\n sid = integer(card, 1, 'sid')\n q0 = double(card, 2, 'q0')\n t_source = double_or_blank(card, 3, 't_source')\n ce = integer_or_blank(card, 4, 'ce', 0)\n vector_tableds = [\n integer_double_or_blank(card, 5, 'e1_tabled1', 0.0),\n integer_double_or_blank(card, 6, 'e2_tabled2', 0.0),\n integer_double_or_blank(card, 7, 'e3_tabled3', 0.0),\n ]\n control_id = integer_or_blank(card, 8, 'control_id', 0)\n\n i = 1\n eids = []\n for ifield in range(9, len(card)):\n eid = integer_or_string(card, ifield, 'eid_%i' % i)\n eids.append(eid)\n assert eid != 0, card\n i += 1\n elements = expand_thru_by(eids)\n return QVECT(sid, q0, elements, t_source=t_source,\n ce=ce, vector_tableds=vector_tableds, control_id=control_id,\n comment=comment)", "title": "" }, { "docid": "2c63268efddf2bcd42b40837160ba52c", "score": "0.5042334", "text": "def __init__(self):\n self.cards = []\n for suit in range(4):\n for rank in range(1, 14):\n card = Card(suit, rank)\n self.cards.append(card)", "title": "" }, { "docid": "2c63268efddf2bcd42b40837160ba52c", "score": "0.5042334", "text": "def __init__(self):\n self.cards = []\n for suit in range(4):\n for rank in range(1, 14):\n card = Card(suit, rank)\n self.cards.append(card)", "title": "" }, { "docid": "3b1018ba6b83b3b4b1391526d7363d62", "score": "0.5031029", "text": "def create_cards(file_path: str) -> None:\n subprocess.call(\n [\n \"python2.7\",\n \"mtgencode/decode.py\",\n \"-e\",\n \"rfields\",\n \"-g\",\n \"encoded.txt\",\n \"card.txt\",\n ]\n )\n subprocess.call(\n [\n \"python2.7\",\n \"mtgencode/decode.py\",\n \"-e\",\n \"rfields\",\n \"-mse\",\n \"encoded.txt\",\n \"MSE/card\",\n ]\n )", "title": "" }, { "docid": "3cc369d3785b87ec6568380b195eb8c0", "score": "0.5021049", "text": "def create_customer_card(self,\n customer_id,\n body):\n\n return super().new_api_call_builder.request(\n RequestBuilder().server('default')\n .path('/v2/customers/{customer_id}/cards')\n .http_method(HttpMethodEnum.POST)\n .template_param(Parameter()\n .key('customer_id')\n .value(customer_id)\n .should_encode(True))\n .header_param(Parameter()\n .key('Content-Type')\n .value('application/json'))\n .body_param(Parameter()\n .value(body))\n .header_param(Parameter()\n .key('accept')\n .value('application/json'))\n .body_serializer(APIHelper.json_serialize)\n .auth(Single('global'))\n ).response(\n ResponseHandler()\n .deserializer(APIHelper.json_deserialize)\n .is_api_response(True)\n .convertor(ApiResponse.create)\n ).execute()", "title": "" }, { "docid": "c97551e0d76e3f2b65ef8f3bc82bae0f", "score": "0.50027335", "text": "def add_inital_deck():\n attack1 = card.new(\"Baseball Bat\", card.IMAGE_DEFAULT, card.effect.Attack(0, 1))\n attack2 = card.new(\"Sword\", card.IMAGE_DEFAULT, card.effect.Attack(0, 2))\n attack3 = card.new(\"Axe\", card.IMAGE_DEFAULT, card.effect.Attack(0, 5))\n damage1 = card.new(\"Smack\", card.IMAGE_DEFAULT, card.effect.Damage(0, 2))\n damage2 = card.new(\"punch\", card.IMAGE_DEFAULT, card.effect.Damage(0, 3))\n damage3 = card.new(\"Kick\", card.IMAGE_DEFAULT, card.effect.Damage(0, 4))\n shield1 = card.new(\"Wooden Shield\", card.IMAGE_DEFAULT, card.effect.Sheild(0, 2))\n shield2 = card.new(\"Chainmail\", card.IMAGE_DEFAULT, card.effect.Sheild(0, 3))\n shield3 = card.new(\"Half Plate\", card.IMAGE_DEFAULT, card.effect.Sheild(0, 4))\n steal = card.new(\"Pickpocket\", card.IMAGE_DEFAULT, card.effect.Steal(0, 0))\n\n DECK.add_card(attack1.copy()) # attack cards\n DECK.add_card(attack1.copy())\n DECK.add_card(attack1.copy())\n DECK.add_card(attack1.copy())\n DECK.add_card(attack2.copy())\n DECK.add_card(attack2.copy())\n DECK.add_card(attack2.copy())\n DECK.add_card(attack2.copy())\n DECK.add_card(attack3.copy()) # more powerful cards are rarer\n DECK.add_card(attack3.copy())\n\n DECK.add_card(damage1.copy()) # damage cards\n DECK.add_card(damage1.copy())\n DECK.add_card(damage1.copy())\n DECK.add_card(damage1.copy())\n DECK.add_card(damage2.copy())\n DECK.add_card(damage2.copy())\n DECK.add_card(damage2.copy())\n DECK.add_card(damage2.copy())\n DECK.add_card(damage3.copy())\n DECK.add_card(damage3.copy())\n\n DECK.add_card(shield1.copy()) # sheild cards\n DECK.add_card(shield1.copy())\n DECK.add_card(shield1.copy())\n DECK.add_card(shield1.copy())\n DECK.add_card(shield2.copy())\n DECK.add_card(shield2.copy())\n DECK.add_card(shield2.copy())\n DECK.add_card(shield2.copy())\n DECK.add_card(shield3.copy())\n DECK.add_card(shield3.copy())\n\n DECK.add_card(steal.copy()) # Add a bunch of steal cards\n DECK.add_card(steal.copy())\n DECK.add_card(steal.copy())\n DECK.add_card(steal.copy())\n DECK.add_card(steal.copy())\n DECK.add_card(steal.copy())\n DECK.add_card(steal.copy())\n DECK.add_card(steal.copy())\n DECK.add_card(steal.copy())\n DECK.add_card(steal.copy())", "title": "" }, { "docid": "ee9cf7e3e57e03e878e5b50bd17a40e1", "score": "0.49981585", "text": "def create_adaptive_card(body: list, actions: list = None) -> dict:\n adaptive_card: dict = {\n 'contentType': 'application/vnd.microsoft.card.adaptive',\n 'content': {\n '$schema': 'http://adaptivecards.io/schemas/adaptive-card.json',\n 'version': '1.0',\n 'type': 'AdaptiveCard',\n 'msteams': {\n 'width': 'Full'\n },\n 'body': body\n }\n }\n if actions:\n adaptive_card['content']['actions'] = actions\n return adaptive_card", "title": "" }, { "docid": "0c1dba4c96bb8b1c2bd90c01a9b70eaa", "score": "0.49932575", "text": "def cmd_select_card(self, rca=0):\n cmd = sdio_utils.init_cmd(cmd_num=7)\n cmd[39:24] = rca\n yield self.phy.acquire_cmd_lock()\n yield self.phy.send_cmd(cmd)\n self.log.debug(\"CMD select card: 'b%s\" %cmd.binstr)\n response = yield self.get_cmd_response(cmd)\n self.phy.release_cmd_lock()\n self.log.debug(\"Response: 'b%s\" %response.binstr)\n raise ReturnValue(response)", "title": "" }, { "docid": "c3dd743b73c9f092069fd09c0c2435d9", "score": "0.49775198", "text": "def main():\n h = Hand()\n h.addCard(5)\n print(h)", "title": "" }, { "docid": "5bc66a062d2e00772961620368549270", "score": "0.49721953", "text": "def add_asic(self, ker_obj, acc_id, area_ratio, tech_model):\n area = self.sys_area * area_ratio\n kid = ker_obj.name\n\n if kid not in self.asic_dict:\n # add a new ASIC accelerator\n self.asic_dict[kid] = dict()\n\n if acc_id not in self.asic_dict[kid]:\n if self.thru_core_area < area:\n raise MPSoCError('not enough area for this ASIC {0}'.format(acc_id))\n self.thru_core_area = self.thru_core_area - area\n asic_ucore = Accelerator(acc_id=acc_id, ker_obj=ker_obj, area=area,\n tech=self.tech, tech_model=tech_model)\n self.asic_dict[kid][acc_id] = asic_ucore\n else:\n raise MPSoCError('Accelerator {0} for kernel {1} already exist'.format(\n acc_id, kid))\n\n # need to update dim_perf later\n self.dim_perf = None", "title": "" }, { "docid": "822a284627df5706bee3d965b4e670ea", "score": "0.49608168", "text": "def simple_card(self, title=None, content=None):\n self.response.card = _Card(type='Simple', title=title, content=content)\n return self", "title": "" }, { "docid": "9aa101c4f6c23cf096b86dbb227b6c53", "score": "0.495708", "text": "def generate_creative(generate_advertiser, generate_concept, api):\n advertiser = generate_advertiser\n concept = generate_concept\n creative_props = {\n \"ad_server_type\": 'TERMINALONE',\n \"advertiser_id\": 147517,\n \"concept_id\": 965193,\n \"file_type\": 'jpg',\n \"height\": 250,\n \"name\": 'bulk-edit-creative test',\n \"width\": 250,\n \"tpas_ad_tag_name\": \"tag name\",\n \"external_identifier\": \"q908t97fuv\",\n \"status\": 1\n }\n creative = api.new(\"atomic_creative\", properties=creative_props)\n creative.save()\n return creative", "title": "" }, { "docid": "1089e5fb53b0f1d08c55864076414e7a", "score": "0.49464613", "text": "def is_ace(card):\r\n ####################################################\r\n if card == \"A\":\r\n return(True)\r\n else:\r\n return(False)", "title": "" }, { "docid": "58d32b926c829b8f757f6dc73777e074", "score": "0.4941972", "text": "def test_create_account():\n params = sp.CreateAccountParams(\n from_pubkey=Account().public_key(),\n new_account_pubkey=Account().public_key(),\n lamports=123,\n space=1,\n program_id=PublicKey(1),\n )\n assert sp.decode_create_account(sp.create_account(params)) == params", "title": "" }, { "docid": "a71ff6a69dc0e4bf733372985ed14209", "score": "0.4938751", "text": "def build_deck():\n suits = {\"H\", \"S\", \"C\", \"D\"}\n return [Card(rank, suit) for rank in range(2,15) for suit in suits]", "title": "" }, { "docid": "37683bbe88799d1af79e3c94ed7b27b0", "score": "0.49335334", "text": "def test_is_soft_with_ace(setup_hand_with_ace):\n assert setup_hand_with_ace.is_soft()\n setup_hand_with_ace.add_card(card='A')\n assert setup_hand_with_ace.is_soft()\n setup_hand_with_ace.add_card(card='9')\n assert not setup_hand_with_ace.is_soft()", "title": "" }, { "docid": "26f66b4fc9737f554ed3225f5e23dd70", "score": "0.4931371", "text": "def __init__(self, card=card.Card):\n self.cards = []\n suits = ['s', 'h', 'd', 'c']\n ranks = ['A', '2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q',\n 'K']\n\n for suit in suits:\n for rank in ranks:\n self.cards.append(card(rank,suit))", "title": "" }, { "docid": "5495422192dd8884378273d990456147", "score": "0.49303305", "text": "def SCardControl(hcard, dwControlCode, inbuffer):\n return _scard.SCardControl(hcard, dwControlCode, inbuffer)", "title": "" }, { "docid": "a47414725a9fdd62119fc88aca7b9b4e", "score": "0.49139902", "text": "def test_cards(setup_hand_with_ace):\n assert setup_hand_with_ace.cards == ['A', '6']", "title": "" }, { "docid": "6e40c03556469f135e40b916ee34687f", "score": "0.49117264", "text": "def __init__(self):\n self.cards = []\n for suit in SUITS:\n for face in FACES:\n self.cards.append(Card(face, suit))\n random.shuffle(self.cards) # shuffle로 랜덤 제공", "title": "" }, { "docid": "a56114b07baf7a70e6ca7dcd1b4406be", "score": "0.49017635", "text": "def createDeck(self, deck):\n wr, r = self.send_as_json(action = \"createDeck\", params = dict(\n deck = deck\n ))\n self._check(wr, r, 'Error creating deck \"%s\"', deck)\n return r", "title": "" }, { "docid": "6955fd59e6a017c3eccd9c681fdd3cd8", "score": "0.48957136", "text": "def declare_card(self, id, card):\n self.cards[id] = card\n self.nb_cards += 1\n self.stack_len += 1\n self.sorted_card_id.append(id)", "title": "" }, { "docid": "4f33cedd1eacacdcf91f994507b1506c", "score": "0.4894115", "text": "def create_cards():\n\n cards = list(range(2, 15)) * 4\n random.shuffle(cards)\n return cards", "title": "" }, { "docid": "820a78cc999419e6c6b8cd99485e5cac", "score": "0.48936287", "text": "def create(self, request):\n aad = Aad()\n aad.dom = request.data[\"dom\"]\n aad.manufacturer = request.data[\"manufacturer\"]\n aad.model = request.data[\"model\"]\n aad.serialNumber = request.data[\"serialNumber\"]\n aad.notes = request.data[\"notes\"]\n\n\n try:\n aad.save()\n serializer = AadSerializer(aad, context={'request': request})\n return Response(serializer.data)\n except ValidationError as ex:\n return Response({\"reason\": ex.message}, status=status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "71645559d96aaa93f5326fe9905b1454", "score": "0.48896414", "text": "def add_card(cls, card, comment=''):\n sid = integer(card, 1, 'sid')\n qvol = double(card, 2, 'qvol')\n control_point = integer_or_blank(card, 3, 'control_id', 0)\n\n i = 1\n eids = []\n for ifield in range(4, len(card)):\n eid = integer_or_string(card, ifield, 'eid_%i' % i)\n eids.append(eid)\n i += 1\n elements = expand_thru_by(eids)\n return QVOL(sid, qvol, control_point, elements, comment=comment)", "title": "" }, { "docid": "7423e414c3d39b38ce874e3d5324556e", "score": "0.48809987", "text": "def dai_examples() -> ui.FormCard:\n\n card = ui.form_card(\n box=ui.box(zone='dai_examples', height='565px'),\n items=[\n ui.separator(label='Driverless AI'),\n ui.text(content='<center><i>Will be added soon</i></center>')\n ]\n )\n\n return card", "title": "" }, { "docid": "31db5546d59a01c42196e797234c4f96", "score": "0.48798275", "text": "def makeBarcode(barcode):\n lines = []\n lines.append(\"^XA\") # start of label\n # download and store format, name of format,\n # end of field data (FS = field stop)\n lines.append(\"^DFFORMAT^FS\")\n lines.append(\"^LH0,0\") # label home position (label home = LH)\n # AF = assign font F, field number 1 (FN1),\n # print text at position field origin (FO) rel. to home\n lines.append(\"^FO40,20^AFN 78,39^FN1^FS\")\n # BC=barcode 128, field number 2, Normal orientation,\n # height 70, no interpreation line.\n lines.append(\"^XZ\") # end format\n\n lines.append(\"^XA\") # start of label format\n lines.append(\"^XFFORMAT^FS\") # label home position\n lines.append(\"^FN1^FD{0}^FS\".format(barcode)) # this is the printed barcode\n lines.append(\"^XZ\")\n return lines", "title": "" }, { "docid": "4e2e1a9fd5503ded7d4de6a0a6e519c2", "score": "0.48777467", "text": "def create_asset(domain, asset_name, asset_precision):\n tx = iroha_config.IROHA_ADMIN.transaction(\n [iroha_config.IROHA_ADMIN.command('CreateAsset',\n asset_name=asset_name,\n domain_id=domain,\n precision=asset_precision)])\n IrohaCrypto.sign_transaction(tx, iroha_config.ADMIN_PRIVATE_KEY)\n send_transaction_and_print_status(tx)", "title": "" }, { "docid": "9c84955c867aa810007d4dbc2f45e5ba", "score": "0.48758438", "text": "def __init__(self, name, deck):\n self.name = name\n self.hand = [deck.deal_card() for i in range(7)]", "title": "" }, { "docid": "71e44a48b1783c223c4076017a1b4adc", "score": "0.48744822", "text": "def create_account_with_assets(domain, name, public_key, asset_name, asset_qty):\n asset_id = asset_name + '#' + domain\n # 1. Create account\n tx = iroha_config.IROHA_ADMIN.transaction(\n [iroha_config.IROHA_ADMIN.command('CreateAccount',\n account_name=name,\n domain_id=domain,\n public_key=public_key)])\n IrohaCrypto.sign_transaction(tx, iroha_config.ADMIN_PRIVATE_KEY)\n send_transaction_and_print_status(tx)\n\n # 2. Create credit for the user\n tx = iroha_config.IROHA_ADMIN.transaction([iroha_config.IROHA_ADMIN.command('AddAssetQuantity',\n asset_id=asset_id,\n amount=asset_qty)])\n IrohaCrypto.sign_transaction(tx, iroha_config.ADMIN_PRIVATE_KEY)\n send_transaction_and_print_status(tx)\n\n # 3. Transfer credit to the user\n dest_account_id = name + '@' + domain\n tx = iroha_config.IROHA_ADMIN.transaction([\n iroha_config.IROHA_ADMIN.command('TransferAsset',\n src_account_id='admin@test',\n dest_account_id=dest_account_id,\n asset_id=asset_id,\n description='initial credit',\n amount=asset_qty)])\n IrohaCrypto.sign_transaction(tx, iroha_config.ADMIN_PRIVATE_KEY)\n send_transaction_and_print_status(tx)", "title": "" }, { "docid": "fe6afbab1661df52d763e404f38c8ede", "score": "0.48736462", "text": "def __init__(self, cardConnectionComponent):\n self.component = cardConnectionComponent", "title": "" }, { "docid": "02ed8275e16e1c309aae3c54c630e43f", "score": "0.4864674", "text": "def create_cards(num_cards):\n\n cards = []\n\n for i in range(num_cards):\n color = fake.safe_color_name()\n # ensure color is never white or yellow so the font is legible \n while color == 'white' or color == 'yellow':\n color = fake.safe_color_name()\n word = fake.unique.word()\n cards.extend([{'id': str(i) + 'a',\n 'word': word,\n 'color': color},\n {'id': str(i) + 'b',\n 'word': word,\n 'color': color}])\n return cards", "title": "" }, { "docid": "781ed2b558f62620d21a8ace4440264d", "score": "0.48641574", "text": "def make_txt_card(self):\n self.process_lines = self._get_process_lines()\n self.n_systematics, self.systematics_lines = self._get_systematics_lines()\n\n txt_card = \"\"\"\n Datacard for event category: {cat}\n {card_header}\n\n ------------------------------------------------------------\n imax 1 number of bins\n jmax {jmax} number of processes minus 1\n kmax {kmax} number of nuisance parameters\n ------------------------------------------------------------\n {shapes_line}\n ------------------------------------------------------------\n bin cat_{cat}\n observation {n_observed}\n ------------------------------------------------------------\n bin {process_cat}\n process {process_name}\n process {process_number}\n rate {process_rate}\n ------------------------------------------------------------\n \"\"\".format(cat = self.d_input['category'],\n jmax = (len(self.process_list)-1),\n kmax = self.n_systematics,\n shapes_line = self._get_shapes_line(),\n n_observed = self._get_observed_rate(),\n process_cat = self.process_lines['bin'],\n process_name = self.process_lines['name'],\n process_number = self.process_lines['number'],\n process_rate = self.process_lines['rate'],\n card_header = self.card_header\n )\n\n txt_card = textwrap.dedent(txt_card)\n txt_card+= textwrap.dedent(self.systematics_lines)\n print txt_card\n\n file_datacard_name = os.path.join(self.out_dir,self.datacard_name+'.txt')\n if self.lumi_scaling != 1.0:\n file_datacard_name = file_datacard_name.replace('.txt',\n '.lumi_scale_{0:3.2f}.txt'\n .format(self.lumi_scaling))\n\n with open(file_datacard_name, 'w') as file_datacard:\n file_datacard.write(textwrap.dedent(txt_card))\n #file_datacard.write(textwrap.dedent(self.systematics_lines))\n self.log.info('Datacard text saved: {0}'.format(file_datacard_name))", "title": "" }, { "docid": "367598baec3d221f9d0e2723ceadb1bc", "score": "0.4863732", "text": "def __init__(self, provider, account, address):\n super().__init__(provider, account, address, SANITY_RATES_CODE.abi)", "title": "" }, { "docid": "31da87a5ac2cd218396650c48129b7c9", "score": "0.4862362", "text": "def create_cards():\n #This is the request that gets all the cards from the API\n server_cards3 = requests.get('https://rws-cards-api.herokuapp.com/api/v1/cards/')\n #this turns the json into a dictionary\n api_cards = server_cards3.json()\n for x in range(78):\n api_cardsx = api_cards['cards'][x]\n card_name = api_cardsx['name']\n #card_name_short = api_cardsx['name_short']\n card_number = api_cardsx['value_int'] \n card_desc = api_cardsx['desc']\n card_meaning_up = api_cardsx['meaning_up']\n card_meaning_reversed = api_cardsx['meaning_rev']\n card_type = api_cardsx['type'] \n if card_type == 'minor':\n card_suit = api_cardsx['suit']\n else: \n card_suit = None\n value = str(api_cards['cards'][x]['value_int'])\n \n \"\"\"this figures out if the card's value integer is one digit or two,\n if the card type is major or minor, the suit, and then creates the correct\n naming convention to pull the correct image from the server\n \"\"\"\n if len(value) == 1:\n value = \"0\" + value\n if card_type == \"major\":\n card_image = \"/static/cards/m\" + value + \".jpg\"\n elif card_type == \"minor\":\n suit_char = card_suit[0]\n card_image = \"/static/cards/\" + suit_char + value + \".jpg\" \n \n print(card_name, card_number, card_desc, card_image)\n \n card = Card(card_name=card_name, \n card_number=card_number, \n card_meaning_up=card_meaning_up, \n card_meaning_reversed=card_meaning_reversed,\n card_desc=card_desc, \n card_suit=card_suit,\n card_type=card_type,\n card_image=card_image\n )\n db.session.add(card)\n db.session.commit()", "title": "" }, { "docid": "3faeb931d4bb73123571eb3bff649c5d", "score": "0.48601422", "text": "def create(self, **params):\n if not 'card' in params and 'name' in params:\n params = {'card': params}\n return Token(self.__client, self.__client.post('/tokens', params))", "title": "" }, { "docid": "49aba9e99809d3a553fa164f8f23ce92", "score": "0.48599455", "text": "def __init__(self, card, parent=None):\n self.card = card\n self.parent = parent\n self.reset()", "title": "" }, { "docid": "ad9dfe3a3179647e421d0dce802d1219", "score": "0.4856285", "text": "def add_flash_card():\n card_name = input(\"Enter Card Name: \")\n definition = input(\"Enter Card Definition: \")\n save_to_file = input(\"Save To File (Y/N)?: \").lower()\n\n if save_to_file == 'y':\n save_to_file = True\n else:\n save_to_file = False\n\n update_flash_cards(card_name, definition, save_to_file)", "title": "" }, { "docid": "15a948471601ae76af0e470009926c58", "score": "0.48497504", "text": "def __init__(self, cards):\n\n if type(cards) is str:\n cards = cards.split()\n\n self.cards = [c if type(c) is Card else Card(c) for c in cards]\n \n assert len(self.cards) == 5, \"Hands must have 5 cards.\"", "title": "" }, { "docid": "b8d6a33e3d64805370b864283cbbb647", "score": "0.48472223", "text": "def create_charge_credit_card_xml(data: dict) -> str:\n data = credit_card_xml_string.format(**data)\n return remove_none_tags(data)", "title": "" }, { "docid": "36ff0ff93f275d9332207722abda013a", "score": "0.48457614", "text": "def place_card(self, card):\n\t\tself.cards.append(card)", "title": "" }, { "docid": "cf5a72bb83da5f1e15de00a4d79a480b", "score": "0.4839856", "text": "def create_barcode_opp(trello_db, barcode, desc=''):\n print \"Creating learning opportunity for barcode {}.\".format(unicode(barcode))\n opp = {\n 'type': 'barcode',\n 'opp_id': generate_opp_id(),\n 'barcode': barcode,\n 'desc': desc,\n 'created_dt': datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n }\n\n trello_db.insert('learning_opportunities', opp)\n return opp", "title": "" } ]
d9bcdbc72f4aa3fd4e0f5c9765f49d60
An array of string values. If the operator is In or NotIn, the values array must be nonempty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
[ { "docid": "83bedbf18fa8b31ed46d6fdafb101161", "score": "0.0", "text": "def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"values\")", "title": "" } ]
[ { "docid": "44858bb949c773b36d20a908517608f3", "score": "0.58816344", "text": "def getOperatorQuery(values:list):\n if(values[0]=='>'):\n return {'$gt':float(values[1])}\n elif(values[0]=='>='):\n return {'$gte':float(values[1])}\n elif(values[0]=='<'):\n return {'$lt':float(values[1])}\n elif(values[0]=='<='):\n return {'$lte':float(values[1])}\n elif(values[0]=='='):\n return {'$eq':float(values[1])}\n elif(values[0]=='!='):\n return {'$ne':float(values[1])}", "title": "" }, { "docid": "ee269c804e32b283130c142ad0d603e1", "score": "0.5820587", "text": "def operator_unify(operator):\n unified_operator = []\n for term in operator:\n const = term[0]\n vars_set = term[1]\n power = term[2]\n if type(power) is list:\n unified_operator.append([const, vars_set, power])\n else:\n unified_operator.append([const, [vars_set], [power]])\n return unified_operator", "title": "" }, { "docid": "c7fdbef61bdfc825be4ae096626650dc", "score": "0.57979757", "text": "def filter_vals(self):\n if any([self.filter.value, self.filter.operator]):\n keys = [\n \"operator\",\n \"value\",\n \"ignore_case_flag\",\n \"not_flag\",\n \"all_values_flag\",\n \"max_age_seconds\",\n \"value_type\",\n ]\n vals = [\"{}: {!r}\".format(k, getattr(self.filter, k)) for k in keys]\n else:\n vals = []\n return vals", "title": "" }, { "docid": "dfe2b84d57f50757a169efbb2e3d1893", "score": "0.5611642", "text": "def operators(self) -> Set[str]:\n return self.set_creator(self.OP_SET_OPTION)\n # Task 1.3", "title": "" }, { "docid": "704d75a67b6a042c55ed0a5954302cac", "score": "0.53698593", "text": "def operator_names():\n return ExtensionManager('cosmic_ray.operators').names()", "title": "" }, { "docid": "cc6532fc64fba015a10846fb5a946108", "score": "0.52851593", "text": "def get_operators(self):\n return self.SELECTION_OPERATORS + self.VALID_OPERATORS", "title": "" }, { "docid": "cc6532fc64fba015a10846fb5a946108", "score": "0.52851593", "text": "def get_operators(self):\n return self.SELECTION_OPERATORS + self.VALID_OPERATORS", "title": "" }, { "docid": "e4f5cdd58baf4fcad4b944eb490fc412", "score": "0.52644604", "text": "def get_operators(self):\n raise NotImplementedError", "title": "" }, { "docid": "c79a0dcc9735fd7fde3c6c5cb8632fe0", "score": "0.5223429", "text": "def plainFlag(self,values):\n #TODO do we keep this input argument as an array?\n elementary_flags = []\n for i in range(len(self.criteria)):\n current_values = []\n for j in range(len(self.inputsForCriterion[i])):\n current_values.append(values[self.inputsForCriterion[i][j]])\n new_elementary_flag = self.criteria[i].flag(*current_values)\n elementary_flags.append(new_elementary_flag)\n return self._interpreter(elementary_flags)", "title": "" }, { "docid": "97a68cfb37d95ecacdce1dd5d814662f", "score": "0.52177423", "text": "def set_op_values(transformer, ops, op_values):\n for op in ops:\n set_op_value(transformer, op, op_values[op.uuid.bytes])", "title": "" }, { "docid": "1a3253fdb32fe09076082a9505ab72d4", "score": "0.5203258", "text": "def check_operation(data):\n if not set(data.keys()).issubset(['in', 'eq', 'not_in']):\n raise ValidationError(\"Its operation should be `eq`, `in` or `not_in`.\")", "title": "" }, { "docid": "2b814d61fa47e7238ab09b248337751f", "score": "0.51989794", "text": "def _get_predicate(field, operator, values=None):\n return {'field': field, 'operator': operator, 'values': values}", "title": "" }, { "docid": "1fcda78a6afa8c268dcd76fcd905ffdd", "score": "0.51647455", "text": "def in_list(field, values):\n choices = \", \".join([\"'%s'\" % escape(x) for x in values])\n return \"%s IN (%s)\" % (sql_field, choices)", "title": "" }, { "docid": "dca478bbbd0af994e0604647036bd498", "score": "0.5159862", "text": "def test_series_operators_comp_str_scalar(self):\n S = pd.Series(['aa', 'aa', '', '', 'b', 'b', 'cccc', None, 'dd', 'ddd', None])\n\n scalar_values = ['a', 'aa', 'ab', 'ba', '']\n comparison_binops = ('<', '>', '<=', '>=', '!=', '==')\n for operator in comparison_binops:\n test_impl = _make_func_use_binop1(operator)\n hpat_func = self.jit(test_impl)\n for scalar in scalar_values:\n with self.subTest(left=S, right=scalar, operator=operator):\n pd.testing.assert_series_equal(hpat_func(S, scalar), test_impl(S, scalar))", "title": "" }, { "docid": "d79b1c929e0d55fdb2361ccb1f2bd4bc", "score": "0.5097348", "text": "def _render_single_unary_expr(self, schema_fields, operator, value):\n right_hand = value.str_value\n if right_hand[0] != u'\"':\n right_hand = u'\"{}\"'.format(right_hand)\n if value.stem:\n right_hand = right_hand + u'~'\n\n if schema_fields == self.GLOBAL_SEARCH:\n # We need to add all text, atom and html fields to query_fields list\n self.has_string_values = True\n if value.has_number_value:\n # We need to add all number fields to query_fields list\n self.has_number_values = True\n if value.has_date_value:\n # We need to add all date fields to query_fields list\n self.has_date_values = True\n return u'NOT {}'.format(right_hand) if value.not_ else right_hand\n\n # Connect by OR rendered statement for each field type\n statements = []\n for schema_field in schema_fields:\n # Skip if value is not applicable for field type.\n if schema_field.type == _SOLR_TYPE.TEXT_FIELD:\n if operator != parser.EQUALS:\n # Can't compare using > < >= <=\n continue\n elif schema_field.type == _SOLR_TYPE.ATOM_FIELD:\n if operator != parser.EQUALS or value.stem:\n # Can't stem atom field or compare using > < >= <=\n continue\n elif schema_field.type == _SOLR_TYPE.NUMBER_FIELD:\n if not value.has_number_value or value.stem:\n # Can't search text against number field\n continue\n elif schema_field.type == _SOLR_TYPE.DATE_FIELD:\n if not value.has_date_value or value.stem:\n # Can't search text against date field\n continue\n elif schema_field.type == _SOLR_TYPE.GEO_FIELD:\n logger.warning('Geo location queries are not supported yet.')\n continue\n else: # schema_field.type is not queryable\n continue\n rendered = self._render_field_operator_value(\n schema_field, operator, right_hand\n )\n statements.append(u'NOT {}'.format(rendered) if value.not_ else rendered)\n\n if not statements:\n # e.g.: searching \"word\" against filed with ONLY date type.\n raise self.NotApplicableValue()\n\n if len(statements) == 1:\n return statements[0]\n\n # e.g.: searching \"1999-10-20\" against field with type date and text.\n # Any match should be enough.\n return u'({})'.format(u' OR '.join(statements))", "title": "" }, { "docid": "32857fccb67d48eae284c37f6a40ff82", "score": "0.50827897", "text": "def parse(self, data):\n val = data.get(self.fname, missing)\n if not isinstance(val, dict):\n return (self.operators['$eq'], self.field.deserialize(val)),\n\n return tuple(\n (\n self.operators[op],\n (self.field.deserialize(val)) if op not in self.list_ops else [\n self.field.deserialize(v) for v in val])\n for (op, val) in val.items() if op in self.operators\n )", "title": "" }, { "docid": "de8e8cf35483fde2644e902e4a86701e", "score": "0.5074726", "text": "def ops(self):\n return [self.op(i) for i in range(self.countOperand)]", "title": "" }, { "docid": "0843acb5441a943ee896025a2461d1fc", "score": "0.50740886", "text": "def _is_operator(self, value):\n is_operator = False\n for op_list in self.pemdas:\n is_operator = True if value in op_list else is_operator\n return is_operator", "title": "" }, { "docid": "4a9f3118f23b0372e317413017a945b8", "score": "0.50346273", "text": "def operator(args):\n try:\n check = False # Check if the operator is \"==\"\n for i in range(len(args)):\n if args[i] == \"=\" :\n if args[i+1] == \"=\":\n check = True \n j = i # The index of the \"==\" operator\n break\n\n else:\n # Reassignment '=' operator\n create_BF(args)\n return \n\n\n if check: equal(args, j) # equality operator, a special case, is called\n else:\n commands = args.split()\n\n for i in range(len(commands)):\n if commands[i] in _workspace:\n # This is a BF\n exp = \"(\" + _workspace[commands[i]].expression() + \")\"\n # Repacing the name with the actual expression\n commands[i] = exp\n\n new_exp = \"\"\n for i in commands:\n # Form the new BF\n new_exp += i + \" \"\n\n create_BF(new_exp)\n\n except:\n printc(\"Please check the syntax.\", fail)", "title": "" }, { "docid": "294f9c1dd81d4c154895a9d557304e74", "score": "0.5021817", "text": "def get_operators(self):\n return self.SELECTION_OPERATORS", "title": "" }, { "docid": "63d9112da12c28960573b8170d418408", "score": "0.50189644", "text": "def logic_subset(self, op=None):\n if op is None:\n return self.logic\n else:\n return set(x for x in self.logic if x.op in op)", "title": "" }, { "docid": "2cdb58ef44e05737f157dacd44b40c50", "score": "0.49929684", "text": "def _append_query_filter(filterList, operator, left, right):\n if operator == '!=' or operator == '<>':\n filterList.append(left != right)\n elif operator == '==':\n filterList.append(left == right)\n elif operator == '>':\n filterList.append(left > right)\n elif operator == '<':\n filterList.append(left < right)\n elif operator == '>=':\n filterList.append(left >= right)\n elif operator == '<=':\n filterList.append(left <= right)\n\n return filterList", "title": "" }, { "docid": "db8b8412449b61826ff1f94e39601408", "score": "0.4946166", "text": "def __prepare_rest_search(self, values, not_values):\n to_return = []\n if values is not None:\n if isinstance(values, list):\n to_return += values\n else:\n to_return.append(values)\n if not_values is not None:\n if isinstance(not_values, list):\n to_return += ['!{}'.format(v) for v in not_values]\n else:\n to_return.append('!{}'.format(not_values))\n return to_return", "title": "" }, { "docid": "dd50fe9c0c663b44c9b9d4e7420c5b10", "score": "0.4944279", "text": "def operands(self):\n return tuple(set([\n call[argid]\n for call in self.calls\n if isinstance(call, signature.Call)\n for argid in call.sig.dataargs()\n if isinstance(call[argid], str)\n ]))", "title": "" }, { "docid": "d1a509481df24e78fce370c1da9369ab", "score": "0.49435666", "text": "def getbinaryoperators(self):\n bin_ops = []\n for tt in TokenType:\n if self.getbinaryoperatorprecedence(tt) > 0:\n bin_ops.append(tt)\n return bin_ops", "title": "" }, { "docid": "31c491f10ded744c3d5c6a88787c4d62", "score": "0.49261737", "text": "def __get_sql_filter_equivalent(self, field, operation, value):\n result = \"\"\n if operation in (\"not in\", \"in\"):\n prepared_value = self.__prepare_filter_string(str(value))\n if type(value) in (str, unicode):\n if prepared_value == value:\n ## It was just a regular string, need to add quotes so it's not evaluated to a variable\n prepared_value = '\"' + prepared_value + '\"'\n prepared_value = '[' + prepared_value + ']'\n if operation == \"not in\":\n result = result + \"not_(\" + field + \".in_(\" + prepared_value + \"))\"\n elif operation == \"in\":\n result = result + field + \".in_(\" + prepared_value + \")\"\n elif operation == \"like\":\n result = result + field + \".ilike('\"\n filter_value = self.__prepare_filter_string(str(value).lstrip().rstrip())\n if '%' not in filter_value:\n filter_value = '%' + filter_value + '%'\n result = result + filter_value + \"')\"\n else:\n result = result + field\n result = result + operation\n prepared_value = self.__prepare_filter_string(str(value))\n if type(value) in (str, unicode, datetime.datetime):\n if prepared_value == str(value):\n ## It was just a regular string, need to add quotes so it's not evaluated to a variable\n prepared_value = '\"' + prepared_value + '\"'\n result = result + prepared_value\n return result", "title": "" }, { "docid": "0e3f864c5be976fee1d2ed006904848a", "score": "0.4919128", "text": "def queryIN(field, list, valuesAreText):\n q = field + \" IN (\"\n\n first = True\n\n for val in list:\n if not first: q += \", \"\n\n if valuesAreText:\n q += \"'{}'\".format(val)\n else:\n q += \"{}\".format(val)\n\n first = False\n q += \")\"\n\n return q", "title": "" }, { "docid": "3a489a7d28658287b72432344843cfbc", "score": "0.4917467", "text": "def operands_list(self):\n return [self]", "title": "" }, { "docid": "77b6bcf81ae0fa38863da98e5db12682", "score": "0.49143612", "text": "def visit_BinOp(self, node):\n if node.op.type == OR:\n # TODO: list operation\n left_arr = self.visit(node.left)\n right_arr = self.visit(node.right)\n return left_arr + list(set(right_arr) - set(left_arr))\n elif node.op.type == AND:\n # TODO:\n left_arr = self.visit(node.left)\n right_arr = self.visit(node.right)\n return list(set(left_arr) & set(right_arr))\n elif node.op.type == XOR:\n # TODO:\n left_arr = self.visit(node.left)\n right_arr = self.visit(node.right)\n return list(set(left_arr) ^ set(right_arr))", "title": "" }, { "docid": "70c269e478f45f72a9f1065440e3fd99", "score": "0.49006182", "text": "def _render_unary_exprs(self, schema_fields, operator, value_or_values_group):\n if isinstance(value_or_values_group, parser.Value):\n value = value_or_values_group\n return self._render_single_unary_expr(schema_fields, operator, value)\n\n # Process nested tree.\n values_group = value_or_values_group\n nested_unary_exprs = []\n for element in values_group.elements:\n try:\n rendered = self._render_unary_exprs(schema_fields, operator, element)\n nested_unary_exprs.append(rendered)\n except self.NotApplicableValue:\n # e.g.: searching \"word\" against filed with date type.\n if operator == parser.AND:\n # There's no sense to continue.\n raise\n continue\n\n if not nested_unary_exprs:\n # e.g.: searching ONLY text against filed with date type.\n raise self.NotApplicableValue()\n\n if len(nested_unary_exprs) == 1:\n return nested_unary_exprs[0]\n\n operator = u' AND ' if values_group.operator == parser.AND else u' OR '\n return u'({})'.format(operator.join(nested_unary_exprs))", "title": "" }, { "docid": "73a5ab5863c3333544f4b53272dc6add", "score": "0.48751187", "text": "def _create_operation_expression(self, op, args):\n # Check interval operator\n if op is _OPER_INTERVAL:\n return tuple(args)\n # Check unary operations on constant value\n if (op is Oper_minus) and (len(args) == 1) and is_number(args[0]):\n return -args[0]\n if (op is Oper_plus) and (len(args) == 1) and is_number(args[0]):\n return args[0]\n\n try:\n return _create_operation(op, args)\n except Exception as e:\n lastex = Exception(\"No valid operation found for {}: {}\".format(op.cpo_name, e))\n self._raise_exception(str(lastex))", "title": "" }, { "docid": "9a97d25eceb659185c4666f026bb292f", "score": "0.48351872", "text": "def convertOperStr(operStr):\n import operator as op\n\n filterOperStr = None\n ops = {'!=':op.ne, '<=':op.le, '>=':op.ge, '>':op.gt, '<':op.lt,\n '==':op.eq, '=':op.eq}\n\n if type(operStr) is str:\n for op in ops:\n if op in operStr:\n if filterOperStr is None:\n filterOperStr = op\n elif len(op) > len(filterOperStr):\n # Pick longer match\n filterOperStr = op\n else:\n for k, v in iteritems(ops):\n if v == operStr:\n filterOperStr = k\n if filterOperStr is None:\n return None, None\n\n return ops[filterOperStr], filterOperStr", "title": "" }, { "docid": "b5c955f218bc50c887031829c88a1c86", "score": "0.48271897", "text": "def isoperator(self, val: str) -> bool:\n return val in self.operators", "title": "" }, { "docid": "47409f8f5d66dcaa67ad5ba891705ae6", "score": "0.48067355", "text": "def invalid_comparison(left, right, op):\n if op is operator.eq:\n res_values = np.zeros(left.shape, dtype=bool)\n elif op is operator.ne:\n res_values = np.ones(left.shape, dtype=bool)\n else:\n raise TypeError(\"Invalid comparison between dtype={dtype} and {typ}\"\n .format(dtype=left.dtype, typ=type(right).__name__))\n return res_values", "title": "" }, { "docid": "d0c0a453c7243f34189576be5b0b0843", "score": "0.47818208", "text": "def _set_operator_specified_filters(self, operator):\n filters = QueryFilterCollection()\n composed_filter = Q()\n for filter_key in self.SUPPORTED_FILTERS:\n operator_key = operator + \":\" + filter_key\n filter_value = self.parameters.get_filter(operator_key)\n logical_operator = operator\n if filter_value and len(filter_value) < 2:\n logical_operator = \"or\"\n if filter_value and not TagQueryHandler.has_wildcard(filter_value):\n filter_obj = self.filter_map.get(filter_key)\n if isinstance(filter_obj, list):\n for _filt in filter_obj:\n filt_filters = QueryFilterCollection()\n for item in filter_value:\n q_filter = QueryFilter(parameter=item, logical_operator=logical_operator, **_filt)\n filt_filters.add(q_filter)\n composed_filter = composed_filter | filt_filters.compose()\n else:\n for item in filter_value:\n q_filter = QueryFilter(parameter=item, logical_operator=logical_operator, **filter_obj)\n filters.add(q_filter)\n if filters:\n composed_filter = composed_filter & filters.compose()\n\n return composed_filter", "title": "" }, { "docid": "79c65dfd05d57fc436447831e60a5b1b", "score": "0.47803184", "text": "def get_operators(self):\n return self.NUMBER_OPERATORS", "title": "" }, { "docid": "79c65dfd05d57fc436447831e60a5b1b", "score": "0.47803184", "text": "def get_operators(self):\n return self.NUMBER_OPERATORS", "title": "" }, { "docid": "79c65dfd05d57fc436447831e60a5b1b", "score": "0.47803184", "text": "def get_operators(self):\n return self.NUMBER_OPERATORS", "title": "" }, { "docid": "79c65dfd05d57fc436447831e60a5b1b", "score": "0.47803184", "text": "def get_operators(self):\n return self.NUMBER_OPERATORS", "title": "" }, { "docid": "79c65dfd05d57fc436447831e60a5b1b", "score": "0.47803184", "text": "def get_operators(self):\n return self.NUMBER_OPERATORS", "title": "" }, { "docid": "79c65dfd05d57fc436447831e60a5b1b", "score": "0.47803184", "text": "def get_operators(self):\n return self.NUMBER_OPERATORS", "title": "" }, { "docid": "b6d9529bb503998a9d761500bada4081", "score": "0.47729293", "text": "def operator(self, operator):\n allowed_values = [\"==\", \"!=\", \">\", \">=\", \"<\", \"<=\", \"!\"] # noqa: E501\n if operator not in allowed_values:\n raise ValueError(\n \"Invalid value for `operator` ({0}), must be one of {1}\" # noqa: E501\n .format(operator, allowed_values)\n )\n\n self._operator = operator", "title": "" }, { "docid": "a58fa98ed4b501a8d16992fb21fc99e1", "score": "0.47674966", "text": "def prepare_query_label_value(labels):\n if not labels:\n return None\n return [job_model.convert_to_label_chars(label) for label in labels]", "title": "" }, { "docid": "9a5182878022820f5beb0be02c98fc63", "score": "0.4762113", "text": "def operators(self) -> Operators:\n return self.__config.operators", "title": "" }, { "docid": "6a9dd3caeea87dad8653636b59bc3e82", "score": "0.4761453", "text": "def strip_multi_value_operators(string):\n # the search API source code lists many operators in the tokenNames\n # iterable, but it feels cleaner for now to explicitly name only the ones\n # we are interested in here\n if string:\n string = re.sub(r'^(OR|AND)', '', string)\n string = re.sub(r'(OR|AND)$', '', string)\n string = string.strip()\n return string", "title": "" }, { "docid": "c1e8e0e42095b9975d51f30e4e1de2f6", "score": "0.47081515", "text": "def test_compare_value(self):\n\n # initialize a dictionary, where key's are the comparison operators\n # and value is a tuple. The first element of the tuple is\n # the value to use in the query code, and the second element\n # of the tuple is the string that should appear in the results\n cases = {\n '<': (11, 'boxingday'),\n '<=': (10, 'boxingday'),\n '>': (100000, 'stpatricks'),\n '>=': (100000, 'independenceday'),\n '==': (10000, 'newyears'),\n '!=': (100001, 'boxingday')\n }\n\n # for each key and value pair\n for operator, (value, answer) in cases.items():\n # submit the operators_comparefields query\n # pass key as value for OPERATOR variable\n # pass first element of value tuple as VALUE variable\n o = str(quilt_test_core.call_quilt_script('quilt_submit.py', [\n '-y', 'operators_comparevalue', '-v', 'OPERATOR', operator,\n '-v', 'VALUE', str(value)]))\n # Assure proper execution, and get results from quilt_history\n o = self.check_query_and_get_results3(o)\n # check results contain one instance of second part of value\n # tuple\n self.contains(o, answer, 1)", "title": "" }, { "docid": "7d28976a59b82c2b0e98fcf71b661bf2", "score": "0.47022095", "text": "def clean_strings(value, ops):\n for function in ops:\n value = function(value)\n return value", "title": "" }, { "docid": "d684f8dc3b4aaad8d88b6a5459fe6f2e", "score": "0.46970624", "text": "def operands(self) -> tuple[BaseTerm, ...]:\n ...", "title": "" }, { "docid": "4af6a1746873ba69315ba0cdf7221cf0", "score": "0.46923366", "text": "def operator_or(*args):\n\n return ('or',) + args", "title": "" }, { "docid": "2f4fdeae6dba694e60bc74774b487ff6", "score": "0.4691151", "text": "def unary_operator_check(expr_list):\r\n if expr_list[0] in ('-', '+'):\r\n expr_list[0] += 'u'\r\n for index in range(1, len(expr_list)):\r\n if expr_list[index] in ('-', '+') and\\\r\n (expr_list[index - 1] in ('(', ',') or expr_list[index-1] in operation_dict):\r\n expr_list[index] += 'u'", "title": "" }, { "docid": "40a2fc944dd79b4b7ba3d5cadd52c7a1", "score": "0.4685797", "text": "def comparison_operator(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"comparison_operator\")", "title": "" }, { "docid": "c9b783d487ffeaf02a975a5d09d955bd", "score": "0.46847063", "text": "def apply_operations(value_and_ops, operations):\n value, current_ops = value_and_ops\n result = []\n for operation in operations:\n try:\n result.append((operation.apply(value), current_ops + [operation]))\n except ValueError:\n pass\n return result", "title": "" }, { "docid": "56de1d04b65dc4382224e5545d2ff604", "score": "0.46840078", "text": "def __unicode__(self):\n if self.bool_op == BooleanOperator.AND:\n op_str = \"AND \"\n elif self.bool_op == BooleanOperator.OR:\n op_str = \"OR \"\n elif self.bool_op == BooleanOperator.NOT:\n op_str = \"NOT \"\n elif self.bool_op == BooleanOperator.AND_NOT:\n op_str = \"AND NOT \"\n elif self.bool_op == BooleanOperator.OR_NOT:\n op_str = \"OR NOT \"\n else:\n op_str = \"\"\n return op_str + \"(\" + \" \".join([str(c) for c in self.conditions]) + \")\"", "title": "" }, { "docid": "628dbd2e6f1d53c6b4fa71096932bf3d", "score": "0.4682278", "text": "def _handle_is_operator(self, Op, expr):\n\n lhs = self._print(expr.lhs)\n rhs = self._print(expr.rhs)\n a = expr.args[0]\n b = expr.args[1]\n\n if Nil() in expr.args:\n lhs = ObjectAddress(expr.lhs) if isinstance(expr.lhs, Variable) else expr.lhs\n rhs = ObjectAddress(expr.rhs) if isinstance(expr.rhs, Variable) else expr.rhs\n\n lhs = self._print(lhs)\n rhs = self._print(rhs)\n return '{} {} {}'.format(lhs, Op, rhs)\n\n if (a.dtype is NativeBool() and b.dtype is NativeBool()):\n return '{} {} {}'.format(lhs, Op, rhs)\n else:\n errors.report(PYCCEL_RESTRICTION_IS_ISNOT,\n symbol=expr, severity='fatal')", "title": "" }, { "docid": "034453e5a7f6bd6400e428afd73b4ea8", "score": "0.46618357", "text": "def eval(self, values):\n str_value = self.expr\n for symbol, value in values.items():\n str_value = str_value.replace(symbol, str(value))\n return str_value", "title": "" }, { "docid": "ff44b753f3cd31f25408aafb706cea10", "score": "0.46609142", "text": "def _SimplifyLogical(op, left, right):\n inv = 'AND' if op == 'OR' else 'OR'\n if left[0] == op:\n if right[0] == inv:\n return left + [right]\n if right[0] == op:\n right = right[1:]\n return left + right\n if left[0] == inv:\n if right[0] in [op, inv]:\n return [op, left, right]\n return [op, left] + right\n if right[0] == inv:\n return [op] + left + [right]\n if right[0] == op:\n right = right[1:]\n return [op] + left + right", "title": "" }, { "docid": "dd3bca34262f5183ba137f026bf8ee8c", "score": "0.46579126", "text": "def sql_expression_generic_binary(\n self,\n expression,\n params,\n context,\n operator):\n query = []\n query.append(\n self.sql_expression(expression.operands[0], params, operator))\n query.append(' ')\n query.append(operator)\n query.append(' ')\n query.append(\n self.sql_expression(expression.operands[1], params, operator))\n return self.sql_bracket(''.join(query), context, operator)", "title": "" }, { "docid": "3fb2edde369a2aa504b5f63e212bbc1a", "score": "0.46565998", "text": "def _get_set_clause(self):\n statement_items, data = [], []\n for field, value in sorted(self._data.items()):\n\n if not isinstance(field, Field):\n field = self._model.get_field(field)\n if not field:\n continue\n elif field.model != self._model:\n continue\n\n statement = self._db.interpolation\n if value is None and not field._nullable:\n raise ValueError('[UpdateQuery._get_set_clause] ' +\n 'field \"%s\" is not nullable' % field.name)\n\n statement_items.append(self._db.operations.get(**{\n 'name': 'eq',\n 'column': field.column_name,\n 'value': statement}))\n data.append(field.db_value(value))\n\n comma = self._db.op_connectors.get(ExprConnector.Comma)\n return comma.join(statement_items), data", "title": "" }, { "docid": "b96dc5d9b9d6947dd3bfc58a67b5cad0", "score": "0.46564654", "text": "def parse_comparison_operator(op, exp, is_negative=False):\n items = exp.items()\n op = ops.get(op, op)\n op = inverted_ops.get(op, op) if is_negative else op\n if len(items) == 1:\n key, value = items[0]\n key = complex_format_field(key)\n return \"\\\"%s\\\"%s%s\" % (key, op, escape_value(value, key))\n else:\n raise ValueError(\"Expression should be a dict with size equals 1.\")", "title": "" }, { "docid": "982f7a49d2ef9f55450bdfef587cfb85", "score": "0.46559846", "text": "def _get_opstr(op, cls):\n # numexpr is available for non-sparse classes\n subtyp = getattr(cls, '_subtyp', '')\n use_numexpr = 'sparse' not in subtyp\n\n if not use_numexpr:\n # if we're not using numexpr, then don't pass a str_rep\n return None\n\n return {operator.add: '+',\n radd: '+',\n operator.mul: '*',\n rmul: '*',\n operator.sub: '-',\n rsub: '-',\n operator.truediv: '/',\n rtruediv: '/',\n operator.floordiv: '//',\n rfloordiv: '//',\n operator.mod: None, # TODO: Why None for mod but '%' for rmod?\n rmod: '%',\n operator.pow: '**',\n rpow: '**',\n operator.eq: '==',\n operator.ne: '!=',\n operator.le: '<=',\n operator.lt: '<',\n operator.ge: '>=',\n operator.gt: '>',\n operator.and_: '&',\n rand_: '&',\n operator.or_: '|',\n ror_: '|',\n operator.xor: '^',\n rxor: '^',\n divmod: None,\n rdivmod: None}[op]", "title": "" }, { "docid": "d32fab7d978c7d0637d70c01f62e0c08", "score": "0.4655145", "text": "def operator_pattern(operators):\n # Sort operators and reverse them so that operators sharing a prefix\n # always have the shortest forms last. Otherwise, the shorter forms\n # will match operators early, i.e. `<` will match `<=` and ignore\n # the `=`, causing a parse error.\n operators = sorted(operators, key=lambda op: op.symbol, reverse=True)\n operator_literals = ['\"{}\"'.format(op.symbol) for op in operators]\n return ' / '.join(operator_literals)", "title": "" }, { "docid": "03669678a9bfcda632244528fb6058b4", "score": "0.46514067", "text": "def build_mutations(ops, to_ops):\n return [\n (idx, to_op)\n for idx, from_op in enumerate(ops)\n # note: mutations to self are excluded.\n # None is a special mutation meaning to delete the operator!\n # 1) when to_op is None isinstance(from_op, None) will blow up because\n # the second parameter needs to be a class\n # 2) when to_op != None we do the isinstance() check to figure out\n # whether or not to include the operator in the list of possible mutations\n #\n # The `if to_op is None or isinstance(from_op, to_op)` expression handles both\n # scenarios very elegantly. First we handle 1) and if this is True the rest of\n # the expression is not evaluated and None is returned. Else we're in scenario 2)\n # where the left part of the expression is False so the right part is evaluated.\n # Since the left part of the expression has confirmed that to_op != None then\n # we're confident that the isinstance() method will always work.\n for to_op in to_ops(from_op) if to_op is None or not isinstance(from_op, to_op)\n ]", "title": "" }, { "docid": "70b73761d1d7f3318c748c504100c39a", "score": "0.4648952", "text": "def is_comparison_operator(o):\n if o in comparison_operators.values():\n return True\n return False", "title": "" }, { "docid": "29f6383ea32dcdfd1ced1f94a46b81dd", "score": "0.46406338", "text": "def get_operator(cls, operator_as_str):\n if operator_as_str.upper() == 'AND':\n return AND()\n elif operator_as_str.upper() == 'OR':\n return OR()\n elif operator_as_str.upper() == 'NOT':\n return NOT()\n elif operator_as_str.upper() == 'EQ':\n return EQ()\n elif operator_as_str.upper() == 'GT':\n return GT()\n elif operator_as_str.upper() == 'LT':\n return LT()\n elif operator_as_str.upper() == 'BETWEEN':\n return BETWEEN()\n elif operator_as_str.upper() == 'IN':\n return IN()\n else:\n raise Exception(\"Not a valid operator %s\" % operator_as_str)\n # Similarly more classes can be added", "title": "" }, { "docid": "4d18b08111836d85dc95b432819902da", "score": "0.46294394", "text": "def _make_opsargs(self, tokens):\n\n # a doubly linked list containing operands and their arguments.\n opsargs = dllist()\n # a list of the positions of the unary operators within (opsargs)\n unary_ops = []\n # a dict which maps precedences to pairs (assoc, posns), (posns) is\n # the positions of the binary operators which have that precedence\n # and (assoc) is their associativity.\n binary_ops = {}\n def add_unary(token):\n pos = opsargs.append(token)\n unary_ops.append(pos)\n def add_binary(token):\n info = Parsing.binary_ops[token.type]\n posn = opsargs.append(token)\n _, posns = binary_ops.setdefault(info.prec, (info.assoc, []))\n posns.append(posn)\n # The flag below is needed to determine if a product operator ought to\n # be inserted.\n last_is_expr_or_unop = False\n for token in tokens:\n category = Parsing.token_category(token)\n if category == 'primitive':\n pattern = self.primitive_to_Pattern(token)\n if last_is_expr_or_unop:\n add_binary(Token('product', None))\n opsargs.append(pattern)\n last_is_expr_or_unop = True\n elif category == 'operator':\n if Parsing.is_unary_op(token):\n add_unary(token)\n last_is_expr_or_unop = True\n else:\n add_binary(token)\n last_is_expr_or_unop = False\n else: # (token) is a parenthesis\n pattern = self._parse(tokens.subtokens())\n paren = token.type\n if paren == '(':\n self.context.numgrps += 1\n pattern._grpis.append(self.context.numgrps)\n elif paren == '(?:':\n pass\n elif paren in ('(?=', '(?!'):\n pattern = self.lookahead_to_Pattern(paren, pattern)\n elif paren == ')':\n raise ValueError(f'Superfluous closing parenthesis.')\n else:\n raise AssertionError(f'This should not happen: {token}')\n if last_is_expr_or_unop:\n add_binary(Token('product', None))\n opsargs.append(pattern)\n last_is_expr_or_unop = True\n\n binary_ops = [posn for precedence, posn in sorted(binary_ops.items(), reverse=True)]\n return opsargs, unary_ops, binary_ops", "title": "" }, { "docid": "623154b5df03c6bd76a25d3457d9e55b", "score": "0.4626317", "text": "def _validate_entries(entries):\n if not isinstance(entries, np.ndarray):\n raise TypeError(\"operator entries must be NumPy array\")\n if np.any(np.isnan(entries)):\n raise ValueError(\"operator entries must not be NaN\")\n elif np.any(np.isinf(entries)):\n raise ValueError(\"operator entries must not be Inf\")", "title": "" }, { "docid": "0da965129b8b763b152a072bce3ece53", "score": "0.46230605", "text": "def in2post_fix(cls, infix_tokens):\n opStack = Stack()\n postfixList = []\n\n for token in infix_tokens:\n if token == \"(\":\n opStack.push(token)\n elif token == \")\":\n topToken = opStack.pop()\n while topToken != \"(\":\n postfixList.append(topToken)\n topToken = opStack.pop()\n elif cls.isOperator(token):\n # On doit ajouter la condition == str sinon python ne veut pas tester l'appartenance à la chaine de caractère. \n while (not opStack.isEmpty()) and (cls.PRIORITY[opStack.peek()] >= cls.PRIORITY[token]):\n postfixList.append(opStack.pop())\n opStack.push(token)\n else:\n postfixList.append(token)\n\n while not opStack.isEmpty():\n postfixList.append(opStack.pop())\n\n return postfixList", "title": "" }, { "docid": "96fd1f79c0412048c9874d92464e7a06", "score": "0.46177408", "text": "def parse_binary_expr(self, operand_parser, operators):\n result = operand_parser()\n\n while True:\n operator = None\n second_token = None\n\n for i in operators:\n if type(i) == tuple:\n operator = self.consume_string(i[0])\n second_token = self.consume_string(i[1])\n if operator is None or second_token is None:\n break\n else:\n operator = self.consume_string(i)\n if operator is not None:\n break\n\n if operator is None:\n break\n\n rhs = operand_parser()\n\n if rhs is None:\n break\n\n result = BinaryExpr(result, Operator(operator, second_token), rhs)\n\n print(f\"\\n>>>> {result}\")\n\n return result", "title": "" }, { "docid": "f562144e52627f8267f2973c6459d50c", "score": "0.4595079", "text": "def stringify_conditions(conditions):\n\n stringified = []\n for cond in conditions:\n if isinstance(cond, basestring):\n stringified.append(cond)\n else:\n try:\n operator = '==' if cond['operator'] == '=' else cond['operator']\n stringified.append(' '.join([str(cond['left_operand']), operator, str(cond['right_operand'])]))\n except TypeError:\n stringified.extend(['(' + stringify_conditions(cond) + ')'])\n\n return ' '.join(stringified)", "title": "" }, { "docid": "d4c54e681d216fc476b938c21724b244", "score": "0.4589187", "text": "def __init__(__self__, *,\n name: str,\n operator: str,\n values: Sequence[str]):\n pulumi.set(__self__, \"name\", name)\n pulumi.set(__self__, \"operator\", operator)\n pulumi.set(__self__, \"values\", values)", "title": "" }, { "docid": "d4c54e681d216fc476b938c21724b244", "score": "0.4589187", "text": "def __init__(__self__, *,\n name: str,\n operator: str,\n values: Sequence[str]):\n pulumi.set(__self__, \"name\", name)\n pulumi.set(__self__, \"operator\", operator)\n pulumi.set(__self__, \"values\", values)", "title": "" }, { "docid": "ea46a17930fb28d33985925a2f8c4662", "score": "0.45871675", "text": "def dynamic_query(model, fields, values, operator):\n queries = []\n for (f, v) in zip(fields, values):\n # We only want to build a Q with a value\n if v != \"\":\n kwargs = {unicode(u'%s__icontains' % f) : unicode(u'%s' % v)}\n queries.append(Q(**kwargs))\n\n # Make sure we have a list of filters\n if len(queries) > 0:\n q = Q()\n # AND/OR awareness\n i = 0\n if operator == '':\n operator = ['or', 'or']\n\n operator.insert(0,'or')\n\n for query in queries:\n if operator[i] == 'and':\n q = q & query\n elif operator[i] == 'or':\n q = q | query\n else:\n q = None\n i = i+1\n\n if q:\n # We have a Q object, return the QuerySet\n return model.objects.filter(q)\n else:\n # Return an empty result\n return {}", "title": "" }, { "docid": "e9a25d76339659da4fc852dbeb4d2aae", "score": "0.4585303", "text": "def parse_bool_operator(op, expressions, is_negative=False):\n op = inverted_ops.get(op, op) if is_negative else op\n return \"(\" + \" {op} \".format(op=op).join(\n [parse_filter_expression(e, is_negative) for e in expressions]) + \")\"", "title": "" }, { "docid": "b7fbf7f17525fa3c44a186b9bfa9bf6c", "score": "0.45757428", "text": "def op (x, y, op = '+'):\n n = len(x)\n out = []\n for i in range(n):\n line = []\n for j in range(n):\n if op is '+':\n res = x[i][j] + y[i][j]\n elif op is '-':\n res = x[i][j] - y[i][j]\n line.append(res)\n out.append(line)\n return out", "title": "" }, { "docid": "3a6714795881c2efc137a350d32c4c67", "score": "0.45666215", "text": "def _process_binary(self, opsargs, binary_ops):\n\n def get_operand(posn):\n \"\"\"helper for the loop below.\"\"\"\n if posn is None:\n raise ValueError(f'Missing operand.')\n value = posn.value\n if not isinstance(value, Pattern):\n raise ValueError(f'Bad operand.')\n return value\n\n def squeeze_posn(binop_posn, pattern):\n left, right = binop_posn.prev, binop_posn.next\n binop_posn.value = pattern\n opsargs.remove(left)\n opsargs.remove(right)\n\n def optimize(binop_posns):\n \"\"\"Assumes all operators at (binop_posns) have the same precedence,\n and that this is currently the maximum precedence within the\n expression sequence.\"\"\"\n operator_type = binop_posns[0].value.type\n if operator_type == 'product':\n # For example, thanks to this optimization the regex string\n # r'ab' will be parsed to a Literal pattern 'ab' instead of a\n # Product with children 'a' and 'b'. For a more elaborate\n # example, r'abc|xyz' will be parsed to an Alternative with\n # Literal children 'abc' and 'xyz', whereas without the\n # optimization, the children will be Products each with three\n # single character leaves.\n squeezed_posns = [] # to be removed from (binop_posns) after the loop\n for binop_posn in binop_posns:\n operand1 = get_operand(binop_posn.prev)\n operand2 = get_operand(binop_posn.next)\n if (type(operand1) is type(operand2) is Literal\n and not (operand1._grpis or operand2._grpis)):\n new_literal = operand1._literal+operand2._literal\n pattern = Literal(new_literal, [], self.context)\n squeeze_posn(binop_posn, pattern)\n squeezed_posns.append(binop_posn)\n for posn in squeezed_posns:\n binop_posns.remove(posn)\n elif operator_type == '|':\n # Thanks to this optimization, the regex string r'[0-9]|[a-z]'\n # will compile to the CharClass r'[0-9a-z]', which is\n # semantically equivalent.\n squeezed_posns = [] # to be removed from (binop_posns) after the loop\n for binop_posn in binop_posns:\n operand1 = get_operand(binop_posn.prev)\n operand2 = get_operand(binop_posn.next)\n if (type(operand1) is type(operand2) is CharClass\n and not (operand1._grpis or operand2._grpis)):\n new_charset = operand1._chars | operand2._chars\n pattern = CharClass(new_charset, [], self.context)\n squeeze_posn(binop_posn, pattern)\n squeezed_posns.append(binop_posn)\n for posn in squeezed_posns:\n binop_posns.remove(posn)\n else:\n raise AssertionError('This should never happen.')\n\n for assoc, binop_posns in binary_ops:\n if assoc == 'right':\n binop_posns.reverse()\n optimize(binop_posns)\n for binop_posn in binop_posns:\n operand1 = get_operand(binop_posn.prev)\n operand2 = get_operand(binop_posn.next)\n pattern = self.binary_to_Pattern(binop_posn.value, operand1, operand2)\n squeeze_posn(binop_posn, pattern)", "title": "" }, { "docid": "91504a779fc019f611a3ff92abfa8337", "score": "0.45637816", "text": "def split_by_ops(self, line):\n \n op_items = []\n cur_op = \"\"\n # Split by whitespace \n items = line.split(\" \")\n # print items\n \n prev_item = items[0]\n for item in items:\n item = item.strip()\n item = item.strip(\"'\")\n item = item.strip('\"')\n if item == \"==\":\n cur_op = prev_item + \" == \"\n elif item == \"contains\":\n cur_op = prev_item + \" contains \"\n elif item == \"then\":\n # cur_op = cur_op + item\n op_items.append(cur_op)\n cur_op = \"\"\n elif not item == \"\":\n cur_op = cur_op + \" \" + item\n \n prev_item = item\n \n assignments = line.split(\"then\")[1]\n # print \"ASSIGNMENT\" + assignments\n # assignments = assignments.split(\"=\")\n \n return op_items, assignments", "title": "" }, { "docid": "ddd492fb849e13d1fea3e0bb8b23c837", "score": "0.4557089", "text": "def OR(args):\n standard_op(args, \"+\")", "title": "" }, { "docid": "d324d8b4cb6c04b83d7ea959bb0fb4d3", "score": "0.45567974", "text": "def gen_test_case_unknown_operators(self):\n cases = ['\\n\\n;; Unknown operators']\n\n for op in self.UNKNOWN_BINARY_OPS:\n cases.append(AssertMalformed.get_unknown_op_test(\n op, 'v128',\n SIMD.v128_const('0', self.LANE_TYPE),\n SIMD.v128_const('1', self.LANE_TYPE)\n ))\n if hasattr(self, 'UNKNOWN_UNARY_OPS'):\n for op in self.UNKNOWN_UNARY_OPS:\n cases.append(AssertMalformed.get_unknown_op_test(\n op, 'v128',\n SIMD.v128_const('-1', self.LANE_TYPE)\n ))\n return '\\n'.join(cases)", "title": "" }, { "docid": "ec95f800c9258bd9d983b0cbf73708b0", "score": "0.455571", "text": "def isin(self, values):\n\n return self._values.isin(values).values", "title": "" }, { "docid": "76c12729a2e7ca58fb32175d2c55a36c", "score": "0.4540446", "text": "def exec_op(self, op, input_values, *_,\n **__):\n if op.type == OpType.NONE or op.type == OpType.IDENTITY:\n return input_values\n\n op_kwargs, input_kwargs = get_full_kwargs(op)\n outputs = [\n hash((op.type, frozenset(op_kwargs.items()),\n frozenset(input_kwargs.items()), frozenset(input_values), idx))\n for idx in range(op.num_outputs)\n ]\n return outputs", "title": "" }, { "docid": "6e46fea2504b4df763a327ff09f15057", "score": "0.45396435", "text": "def parse_operands(self, opcode, op_array):\n\n # OP number and name\n op_no = opcode['opcode']\n\n # Treat jump instructions differently\n if 42 <= op_no and op_no <= 47:\n (op1, op2, result) = self.parse_jmp(opcode, op_array)\n\n else:\n op1 = self.parse_zval(opcode['op1'].val, opcode['op1_type'])\n op2 = self.parse_zval(opcode['op2'].val, opcode['op2_type'])\n result = self.parse_zval(opcode['result'].val, opcode['result_type'])\n\n return (op1, op2, result)", "title": "" }, { "docid": "04bc9a60795bbb3a08a4b599bacdb508", "score": "0.4533198", "text": "def operator_invert(ops): \n return([op.transpose([1,0,3,2]) for op in ops[::-1]])", "title": "" }, { "docid": "1a84f8cbfe08a0ed903df8a02121999d", "score": "0.45244703", "text": "def operations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"operations\")", "title": "" }, { "docid": "d33536cbf9b0f07ab7afeaab03b1ab0f", "score": "0.45175344", "text": "def _getOpQS(self, **args):\n\t\t# we don't use urllib.urlencode to not encode empty values like a=&b=val\n\t\tqString = \"&\".join(\"%s=%s\"%(k, urllib.quote(v)) \n\t\t\tfor k, v in args.iteritems() if v)\n\t\treturn \"%s\"%(qString)", "title": "" }, { "docid": "7de29a7ede1f603aa183daf3a1a1261b", "score": "0.451686", "text": "def test_compare_fields(self):\n\n # initialize a dictionary, where key's are the comparison operators\n # and value is a string that should appear in the results\n cases = {\n '<': 'boxingday',\n '<=': 'memorialday',\n '>': 'stpatricks',\n '>=': 'independenceday'\n }\n\n # for each key and value pair\n for key, value in cases.items():\n # submit the operators_comparefields query\n # pass key as value for operator variable\n o = str(quilt_test_core.call_quilt_script('quilt_submit.py', [\n '-y', 'operators_comparefields', '-v', 'OPERATOR', key]))\n\n # Assure proper execution, and get results from quilt_history\n o = self.check_query_and_get_results3(o)\n # check results contain one instance of \"value\"\n self.contains(o, value, 1)", "title": "" }, { "docid": "a99e82a6ae3e2ce7d9ede706f1859ed6", "score": "0.45153028", "text": "def operator(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"operator\")", "title": "" }, { "docid": "a99e82a6ae3e2ce7d9ede706f1859ed6", "score": "0.45153028", "text": "def operator(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"operator\")", "title": "" }, { "docid": "a99e82a6ae3e2ce7d9ede706f1859ed6", "score": "0.45153028", "text": "def operator(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"operator\")", "title": "" }, { "docid": "a99e82a6ae3e2ce7d9ede706f1859ed6", "score": "0.45153028", "text": "def operator(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"operator\")", "title": "" }, { "docid": "a99e82a6ae3e2ce7d9ede706f1859ed6", "score": "0.45153028", "text": "def operator(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"operator\")", "title": "" }, { "docid": "a99e82a6ae3e2ce7d9ede706f1859ed6", "score": "0.45153028", "text": "def operator(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"operator\")", "title": "" }, { "docid": "a99e82a6ae3e2ce7d9ede706f1859ed6", "score": "0.45153028", "text": "def operator(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"operator\")", "title": "" }, { "docid": "a99e82a6ae3e2ce7d9ede706f1859ed6", "score": "0.45153028", "text": "def operator(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"operator\")", "title": "" }, { "docid": "a99e82a6ae3e2ce7d9ede706f1859ed6", "score": "0.45153028", "text": "def operator(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"operator\")", "title": "" }, { "docid": "a99e82a6ae3e2ce7d9ede706f1859ed6", "score": "0.45153028", "text": "def operator(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"operator\")", "title": "" }, { "docid": "a99e82a6ae3e2ce7d9ede706f1859ed6", "score": "0.45153028", "text": "def operator(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"operator\")", "title": "" }, { "docid": "a99e82a6ae3e2ce7d9ede706f1859ed6", "score": "0.45153028", "text": "def operator(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"operator\")", "title": "" }, { "docid": "a99e82a6ae3e2ce7d9ede706f1859ed6", "score": "0.45153028", "text": "def operator(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"operator\")", "title": "" }, { "docid": "a99e82a6ae3e2ce7d9ede706f1859ed6", "score": "0.45153028", "text": "def operator(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"operator\")", "title": "" }, { "docid": "a99e82a6ae3e2ce7d9ede706f1859ed6", "score": "0.45153028", "text": "def operator(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"operator\")", "title": "" }, { "docid": "a99e82a6ae3e2ce7d9ede706f1859ed6", "score": "0.45153028", "text": "def operator(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"operator\")", "title": "" } ]
bcc66fe3430836eecca2a21f7952ef75
\remarksimplements the AbstractSceneObject._nativeCaches method to return a list of the native caches that are applied to this object \paramcacheTypefitler by the inputed cache type \return [ nativeCache, .. ]
[ { "docid": "bc30bd28eea3197355a87e3333426621", "score": "0.8371028", "text": "def _nativeCaches( self, cacheType = 0 ):\r\n\t\toutput = []\r\n\r\n\t\tfrom cross3d.constants import CacheType\r\n\r\n\t\t# store maxscript methods used\r\n\t\tclassof \t= mxs.classof\r\n\r\n\t\t# collect point cache modifiers\r\n\t\tif ( not cacheType or cacheType & CacheType.Point_Cache ):\r\n\t\t\tcls \t= mxs.Point_Cache\r\n\t\t\tfor modifier in self._nativePointer.modifiers:\r\n\t\t\t\tif ( classof(modifier) == cls ):\r\n\t\t\t\t\toutput.append(modifier)\r\n\r\n\t\t# collect transform cache controllers\r\n\t\tif ( not cacheType or cacheType & CacheType.Transform_Cache ):\r\n\t\t\tcls \t\t= mxs.Transform_Cache\r\n\t\t\tcontroller\t= self._nativePointer.controller\r\n\t\t\twhile ( classof( controller ) == cls ):\r\n\t\t\t\toutput.append( controller )\r\n\t\t\t\tcontroller = controller.basecontroller\r\n\r\n\t\treturn output", "title": "" } ]
[ { "docid": "5e8c0cf4aaac44379c07e7e302ae9f6a", "score": "0.5680656", "text": "def get_cache(self):", "title": "" }, { "docid": "8db0c5aae69f2fb6e30128c2aa022fab", "score": "0.55789125", "text": "def getCache(self, sType):\n dRet = self.ddCaches.get(sType, None);\n if dRet is None:\n dRet = dict();\n self.ddCaches[sType] = dRet;\n return dRet;", "title": "" }, { "docid": "450b216ca1aaae09cb51cab15ab4b856", "score": "0.54188037", "text": "def _get_obj_cache(self):\n if not hasattr(self._local_cache, 'objects'):\n self._local_cache.objects = {}\n for obj in self.container.get_objects(full_listing=True):\n self._local_cache.objects[obj.name] = obj\n return self._local_cache.objects", "title": "" }, { "docid": "90755e9b4581e8e8069bfcb36570c0b6", "score": "0.54123485", "text": "def _get_cache_per_layer(self, cache: List[mx.sym.Symbol]) -> List[Dict[str, Optional[mx.sym.Symbol]]]:\n if not cache: # first decoder step\n return [{'k': None, 'v': None} for _ in range(len(self.layers))]\n else:\n assert len(cache) == len(self.layers) * 2\n return [{'k': cache[2 * l + 0], 'v': cache[2 * l + 1]} for l in range(len(self.layers))]", "title": "" }, { "docid": "90755e9b4581e8e8069bfcb36570c0b6", "score": "0.54123485", "text": "def _get_cache_per_layer(self, cache: List[mx.sym.Symbol]) -> List[Dict[str, Optional[mx.sym.Symbol]]]:\n if not cache: # first decoder step\n return [{'k': None, 'v': None} for _ in range(len(self.layers))]\n else:\n assert len(cache) == len(self.layers) * 2\n return [{'k': cache[2 * l + 0], 'v': cache[2 * l + 1]} for l in range(len(self.layers))]", "title": "" }, { "docid": "dae324d3036d64d621e89cac86eff124", "score": "0.5405668", "text": "def cached_objs(self):\n return [i[:-4] for i in list(self.memory.keys()) if i.endswith('_obj')]", "title": "" }, { "docid": "b61e586794e69c44b1090f40c9ef9e69", "score": "0.5394986", "text": "def getCache(self):\n # Used e.g. by LSST\n\n return self.__cache", "title": "" }, { "docid": "0b0e6669410137cfeeeb5d589e0f0da4", "score": "0.5363616", "text": "def getCurrentCache(*args):\n return _coin.SoCacheElement_getCurrentCache(*args)", "title": "" }, { "docid": "aa1d1d9771b846fa74837c7da8eac141", "score": "0.5334388", "text": "def cachetype(self) :\n try :\n return self._cachetype\n except Exception as e:\n raise e", "title": "" }, { "docid": "b4dff0049cbca107c6dfeb7d100bb768", "score": "0.53256357", "text": "def SoCacheElement_getCurrentCache(*args):\n return _coin.SoCacheElement_getCurrentCache(*args)", "title": "" }, { "docid": "bdc874cdcbed4f4f2e27302d416045fd", "score": "0.52909166", "text": "def all_cached_geocodes(self):\n return self.cache", "title": "" }, { "docid": "993f329c72ae7265a773a1e1f6cfbd4c", "score": "0.5277034", "text": "def get_cache() -> list:\n return glob.glob(\"stickers/cache/*.png\")", "title": "" }, { "docid": "e70caef7a4160a98b653c14a32ebd3d1", "score": "0.52699375", "text": "def get_image_caches(self, audit_cache_size=False):\n\t\treturn self.app.api.network.call_remotes(self.app.api.network.image_interfaces.keys(), 'cache.admin_info', audit_cache_size)", "title": "" }, { "docid": "c9a48255dd674e04d5541a641336c8c1", "score": "0.52649987", "text": "def cached_objs():\n return [df.split('/')[-1][:-4] for df in\n sorted(glob(config.storage_path + '*' + '_obj'), key=os.path.getmtime)]", "title": "" }, { "docid": "c9a48255dd674e04d5541a641336c8c1", "score": "0.52649987", "text": "def cached_objs():\n return [df.split('/')[-1][:-4] for df in\n sorted(glob(config.storage_path + '*' + '_obj'), key=os.path.getmtime)]", "title": "" }, { "docid": "480d7c5b1c7225ddcbc413ca5cbf3d6f", "score": "0.5228628", "text": "def all(self):\n if self._cache is None:\n self._recache()\n return self._cache", "title": "" }, { "docid": "2980cb75e77a136cf8b05306797655b0", "score": "0.52134675", "text": "def cache_list(self, alg_list = None):\n all_algs = NIname.get_all_algs()\n if alg_list is None:\n alg_list = all_algs\n else:\n for alg in alg_list:\n if not alg in all_algs:\n self.loginfo(\"cache_list: Unknown algorithm name requested %s\" %\n alg)\n return None\n rslt = {}\n for alg in alg_list:\n mfd = \"%s%s%s\" % (self.storage_root, self.META_DIR, alg)\n cfd = \"%s%s%s\" % (self.storage_root, self.NDO_DIR, alg)\n entries = []\n\n # All cache entries are required to have metadata file,\n # and may have content file\n try:\n for dgst in os.listdir(mfd):\n ce = os.path.isfile(\"%s/%s\" % (cfd, dgst))\n entries.append( { \"dgst\": dgst, \"ce\": ce })\n except Exception, e:\n self.logerror(\"cache_list: error while listing for alg %s: %s\" %\n (alg, str(e)))\n return None\n rslt[alg] = entries\n\n return rslt", "title": "" }, { "docid": "9014c28b2869134489c3ae5d0993bb4f", "score": "0.5201501", "text": "def new_cache():\n return {\"anime\": []}", "title": "" }, { "docid": "fbfe3136514c7119959317aedb318db4", "score": "0.5195435", "text": "def cache(self):", "title": "" }, { "docid": "24cf65b8c1361db5059601c77e86f68e", "score": "0.51831317", "text": "def dynCache():\n pass", "title": "" }, { "docid": "a781c293a96667800bd376301be059ca", "score": "0.51740956", "text": "def make_cache(model, *args, **kwargs):\n return ListCache(DjangoBackingStore(model.objects), *args, **kwargs)", "title": "" }, { "docid": "ccb295cc88f4b1817a2c679875473ee9", "score": "0.50635993", "text": "def get_cache(self):\r\n return self._cache", "title": "" }, { "docid": "7c8727e27483adc8b5001daca0f99e02", "score": "0.50546545", "text": "def cache(self):\n if self._cache is None:\n self.read_cache()\n return self._cache", "title": "" }, { "docid": "e5a7b78cef53586b12baa4f0ff6e9a01", "score": "0.50522804", "text": "def cache_using(store: str):\n return {\n \"redis\": redis_cache_by_first_arg,\n \"memory\": memory_cache_by_first_arg,\n }[store.lower()]", "title": "" }, { "docid": "54d365325def5ece430ac5a01f47bb89", "score": "0.50422376", "text": "def caching(self):\n return self._caching", "title": "" }, { "docid": "cbb31bf334d8eddc5e5e8fc46cad702c", "score": "0.5042177", "text": "def cache_server(self):\n return create_cache(self, self.config['CACHE_SERVER'])", "title": "" }, { "docid": "9f8fd8c5679935af858639dcb726705c", "score": "0.50331175", "text": "def new_cache(self) :\r\n cache = np.zeros( (self.y, self.x, self.landcover), dtype=np.int16)\r\n idx = q.OrthoIndexer( [self.lat_idx,self.lon_idx, self.lc_idx])\r\n return q.UnitIndexNdArray(cache, idx)", "title": "" }, { "docid": "2c656e97efb5cec55f6066cb8ba6bbde", "score": "0.5018369", "text": "def cachefiles(self):\r\n cachefiles = set()\r\n for source in self:\r\n cachefiles.add(source.cachefile)\r\n return list(cachefiles)", "title": "" }, { "docid": "b445947d690ec1234f6cd859afe0fb92", "score": "0.5008611", "text": "def mergeCacheInfo(*args):\n return _coin.SoGLLazyElement_mergeCacheInfo(*args)", "title": "" }, { "docid": "63ef901b660a652823bd3447c180e4ce", "score": "0.50028527", "text": "def cachefiles(self):\r\n cachefiles = set(Collection.cachefiles.fget(self))\r\n if self.cachefile:\r\n cachefiles.add(self.cachefile)\r\n return list(cachefiles)", "title": "" }, { "docid": "bca59bca7ecf59bdae92c0021209b061", "score": "0.4998138", "text": "def cache(self):\n return self.__cache", "title": "" }, { "docid": "fe419c16049326ba41a976c860df488b", "score": "0.49969584", "text": "def allCached(Implementation, critic):\n # Don't catch KeyError here. Something is probably wrong if this\n # function is called when no objects of the type are cached.\n return critic._impl.lookup(Implementation)", "title": "" }, { "docid": "c2b95db8d6168ad5f3dec711bd690475", "score": "0.49858755", "text": "def _MakeCache(self, cache_manager, kind, max_size=None):\n return RamCache(cache_manager, kind, max_size=max_size)", "title": "" }, { "docid": "a20c9e0c8fea87a54776c409bbe188c7", "score": "0.49680647", "text": "def polyCacheMonitor(cacheValue=bool, nodeName=\"string\"):\n pass", "title": "" }, { "docid": "d2fa04a93e16823e85b7b61c2c2d11f3", "score": "0.49580044", "text": "def named_caches(self):\n return self._named_caches", "title": "" }, { "docid": "880d3990edda4f624ed6e5b954d9693b", "score": "0.49577844", "text": "def _get_cache(self, addr, name, rdtype, rdclass):\r\n return self._cache.get((name, rdtype, rdclass))", "title": "" }, { "docid": "1aa77c77c6948cbcfd8035a56ee68f67", "score": "0.49550202", "text": "def init_caches(self):\n\n # This cache idea was really clever I had no idea python had these collections\n # I mean keyed arrays as a language feature? That's stupid useful 10/10 well done\n self.font_cache = {}\n self.text_cache = {}\n self.image_cache = {}\n\n # Timeouts for caches so we don't hold on to variables we won't use\n self.CACHE_TIMEOUT = 3 * 60 * 60\n self.font_timeout = {}\n self.text_timeout = {}\n self.image_timeout = {}", "title": "" }, { "docid": "c32a006044c383e42eb871470a01024b", "score": "0.49479684", "text": "def cache(self) -> \"BlockMatrix\":\n self._java_matrix_wrapper.call(\"cache\")\n return self", "title": "" }, { "docid": "05dd9a0157a7106df30e4a315653f530", "score": "0.49433532", "text": "def cache(self):\n return self._cache", "title": "" }, { "docid": "1f29f703ceb49bb0e09ed2b62f141473", "score": "0.49264976", "text": "def get_all(self) -> List[T]:\n return self.__cache.values()", "title": "" }, { "docid": "2c7e483c597badede82fd9bce0101fda", "score": "0.491345", "text": "def cache_dict(self) -> dict:\n # TODO : add support for redis cache instead of using simple cache\n return {\n \"CACHE_TYPE\": \"simple\",\n \"CACHE_DEFAULT_TIMEOUT\": self.CACHE_DEFAULT_TIMEOUT,\n \"CACHE_KEY_PREFIX\": \"memberships_cache_\"\n }", "title": "" }, { "docid": "6a63727a9f5db09b87ebe6e7bf07ffdd", "score": "0.48720986", "text": "def getFileCache(self, *args):\r\n return _osgDB.Registry_getFileCache(self, *args)", "title": "" }, { "docid": "78ff47bab9144fc183ab2f09af5d1953", "score": "0.48718962", "text": "def get_cache(cls):\n return cls._cache", "title": "" }, { "docid": "4b74f30af20cf391e56ff412ec9ab5fa", "score": "0.48650697", "text": "def _combined_cache(self):\n result = {}\n result.update(self.cache)\n result.update(self.pickle_cache)\n return result", "title": "" }, { "docid": "ac3530d787d5c75edc00a176c7c50df7", "score": "0.48616445", "text": "def cache(self) -> Optional[pulumi.Input['FrontdoorRouteCacheArgs']]:\n return pulumi.get(self, \"cache\")", "title": "" }, { "docid": "ac3530d787d5c75edc00a176c7c50df7", "score": "0.48616445", "text": "def cache(self) -> Optional[pulumi.Input['FrontdoorRouteCacheArgs']]:\n return pulumi.get(self, \"cache\")", "title": "" }, { "docid": "434316a1a4a1981c09defd80a6b8003d", "score": "0.48395947", "text": "def cache(func):\n return _EternalCache(func).get_wrapper()", "title": "" }, { "docid": "2fd970b29843f024967dbeb89e13057a", "score": "0.4825806", "text": "def get_sobject_caches(setting_name):\n config_settings = context.get_settings()\n projects = config_settings[\"projects\"]\n settings = sublime.load_settings(setting_name)\n\n caches = []\n for p in projects:\n if settings.has(projects[p][\"username\"]):\n caches.append([p, projects[p][\"username\"]])\n\n return caches", "title": "" }, { "docid": "f594cd02ecb87e450434d436d6e815ba", "score": "0.48253423", "text": "def get_all_other_discovery_nodes_cached(redis) -> List[str]:\n\n return get_json_cached_key(redis, ALL_DISCOVERY_NODES_CACHE_KEY)", "title": "" }, { "docid": "a3902f0959bd22ec87249a73d5da4f40", "score": "0.48065647", "text": "def get_all(self):\n\n zmap, v = self._get()\n return zmap", "title": "" }, { "docid": "a48b89e03dee17212451861baa08bed3", "score": "0.4803971", "text": "def cache_get_all(connection: 'Connection', cache_info: CacheInfo, keys: Iterable) -> 'APIResult':\n return __cache_get_all(connection, cache_info, keys)", "title": "" }, { "docid": "825107b163faa8538373a26e6d37ecee", "score": "0.47901604", "text": "def get_all_other_content_nodes_cached(redis) -> List[Dict[str, str]]:\n\n return get_json_cached_key(redis, ALL_CONTENT_NODES_CACHE_KEY)", "title": "" }, { "docid": "c903b6826b7daa3112efea615f5a2269", "score": "0.47842526", "text": "def cache_type(self) -> pulumi.Input[Union[str, 'CacheType']]:\n return pulumi.get(self, \"cache_type\")", "title": "" }, { "docid": "b697b966c2ebf3afc0e3f042978f761e", "score": "0.4781758", "text": "def get_cache():\n\n path_cache = ONE_DIR + '/' + ONE_CACHE\n if os.path.exists(path_cache):\n with open(path_cache) as f:\n cache_json = json.load(f)\n # check last update time\n if cache_json.get('timestamp'):\n last_update = datetime.datetime.fromtimestamp(\n cache_json['timestamp'])\n delta = datetime.timedelta(seconds=CACHE_TIME)\n now = datetime.datetime.now()\n if (now - last_update) < delta:\n return cache_json['vms']", "title": "" }, { "docid": "7e2d760907ddb72d7768505bdf805d63", "score": "0.4776715", "text": "def cache(self) -> Cache:\n return Cache.current_cache", "title": "" }, { "docid": "48ff67ceb2a339d65193e1d959fdba00", "score": "0.47661215", "text": "def create_store_index(self):\n ret = []\n rrds = list(self._cache.keys())\n for rrd in rrds:\n try:\n indexes = list(self._cache[rrd]['indexes'].keys())\n for index in indexes:\n try:\n ret.append( (self._cache[rrd]['hadds'][index], \\\n self._cache[rrd]['uuids'][index], \\\n self._cache[rrd]['indexes'][index]) )\n except Exception:\n logger.exception('[%s] - Exception in create_store_index : rrd= %s, index = %s', self.__class__.__name__, rrd, index)\n except Exception:\n logger.exception('[%s] - Exception in create_store_index : rrd = %s', self.__class__.__name__, rrd)\n #~ logger.debug(\"[%s] - create_store_index %s\", self.__class__.__name__, ret)\n return ret", "title": "" }, { "docid": "7e440187c2e5b86e45c0d18931224791", "score": "0.4764966", "text": "def is_cacheable(**parameters):", "title": "" }, { "docid": "7affd1d9d94b9a94d4302946c5eb14d0", "score": "0.47335187", "text": "def cache(self):\n return dict()", "title": "" }, { "docid": "874bdf46d27006e25d15a5d07533b947", "score": "0.47318992", "text": "def cacheable(self) :\n try :\n return self._cacheable\n except Exception as e:\n raise e", "title": "" }, { "docid": "ac2419e5ce7aec8a3cc01d19a5c6a480", "score": "0.47265056", "text": "def populate_caches(self):\n\n raise CoreNotImplementedError()", "title": "" }, { "docid": "62fa88e6c82f006bda6d0399122c7b25", "score": "0.47248527", "text": "def get_cached(self): # 由于在data.py里有function_cache装饰,该函数并不总是读文件,而是读缓存优先\r\n cache_path = self.get_cache_path()\r\n\r\n if os.path.isfile(cache_path):\r\n return _get_from_file(cache_path)\r\n else:\r\n return self.get_local()", "title": "" }, { "docid": "81abfc03f273e28086bf2bcd6c75294a", "score": "0.47038114", "text": "def get_cached_models(cache_dir: Union[str, Path] = None) -> List[Tuple]:\n if cache_dir is None:\n cache_dir = TRANSFORMERS_CACHE\n elif isinstance(cache_dir, Path):\n cache_dir = str(cache_dir)\n\n cached_models = []\n for file in os.listdir(cache_dir):\n if file.endswith(\".json\"):\n meta_path = os.path.join(cache_dir, file)\n with open(meta_path, encoding=\"utf-8\") as meta_file:\n metadata = json.load(meta_file)\n url = metadata[\"url\"]\n etag = metadata[\"etag\"]\n if url.endswith(\".bin\"):\n size_MB = os.path.getsize(meta_path.strip(\".json\")) / 1e6\n cached_models.append((url, etag, size_MB))\n\n return cached_models", "title": "" }, { "docid": "55168d524a09baf071d7920b9b437cc6", "score": "0.4701363", "text": "def load_cached():\n return common.load_cached(force=True)", "title": "" }, { "docid": "911d7bc5921f104b9e8a664d17e86ece", "score": "0.46980327", "text": "def snmp_cache(self):\n # Return\n value = ('%s/snmp_cache') % self.root\n return value", "title": "" }, { "docid": "7cae08a318f6e696b02611e52cbedea7", "score": "0.46953034", "text": "def tdcache():\n return cachetools.cached(cache=tdcache.tensor_description_cache)", "title": "" }, { "docid": "d9066f7c25fc56a793cace4362f764d1", "score": "0.4693172", "text": "def post_cache_init(cls):\n for cached_function in cls._cached_functions:\n MemoryCache._accesses[cached_function] = list()", "title": "" }, { "docid": "06e00a37a8f0d9bd17610d9f39a115a5", "score": "0.46903", "text": "def list_of_catalogs_in_cache(catalog_type='subhalos'):\n\n from os import listdir\n from os.path import isfile, join\n\n catalog_path = get_catalogs_dir(catalog_type)\n\n return [ f.encode('utf-8') for f in listdir(catalog_path) if isfile(join(catalog_path,f)) ]", "title": "" }, { "docid": "265d512dcf3fdf0c02b402c10d3d89d8", "score": "0.46834534", "text": "def cache(self) -> pulumi.Output[Optional['outputs.FrontdoorRouteCache']]:\n return pulumi.get(self, \"cache\")", "title": "" }, { "docid": "fe690bf80bb793382a852454163525fc", "score": "0.46775416", "text": "def cache(self) -> cache.RedisCacheRegion:\n return self.get_cache('short-term', expiration_time=3600)", "title": "" }, { "docid": "04f7b14b9b05ca100b7762f7098bf85d", "score": "0.46637622", "text": "def _get_queryset_from_cache(self, cache_object):\n # TODO: make this work for people who have, and who don't have, instance caching\n model, keys, fields, length = cache_object\n \n results = self._get_objects_for_keys(model, keys)\n \n if fields:\n # TODO: optimize this so it's only one get_many call instead of one per select_related field\n # XXX: this probably isn't handling depth beyond 1, didn't test even depth of 1 yet\n for f in fields:\n field = model._meta.get_field(f)\n field_results = dict((r.id, r) for r in self._get_objects_for_keys(f.rel.to, [getattr(r, field.db_column) for r in results]))\n for r in results:\n setattr(r, f.name, field_results[getattr(r, field.db_column)])\n return results", "title": "" }, { "docid": "5fd8341f8c4b2666e1c10fa029486fc3", "score": "0.4661478", "text": "def get_function_cache(self, function_key: str) -> Cache:\n raise NotImplementedError", "title": "" }, { "docid": "f806124fd54f5536991039f69889d000", "score": "0.4659916", "text": "def _process_caches(self):\n for content_type in self.results_cache.copy().keys():\n if not self.results_cache[content_type]:\n continue\n thread = threading.Thread(target=self._process_cache, kwargs={'content_type': content_type}, daemon=True)\n thread.start()\n self._threads.append(thread)", "title": "" }, { "docid": "bf80c0ed05d6288d25146238d17679a7", "score": "0.4656698", "text": "def cached(*args, **kw):\n if len(args) == 1 and not kw and isfunction(args[0]):\n cache = CacheObject()\n return cache(args[0])\n else:\n return CacheObject(*args, **kw)", "title": "" }, { "docid": "f7aa95b1411d46527bbc9c9eaf0d36b6", "score": "0.46554214", "text": "def native_types(self) -> List[Type]:\n return list(set(self.get_native_types()))", "title": "" }, { "docid": "ec67e1d21871c04d7f5012eeeb069f21", "score": "0.46537822", "text": "def readCache(self):\n cacheFileName = self._cachefilename + \".cache\"\n print \"reading cache file: \" + cacheFileName\n if os.path.exists(cacheFileName):\n cache = file(cacheFileName,\"r\")\n data = cache.readlines()\n for line in data:\n if line[0] != '#' and line != \"\":\n items = line.split()\n rrep = DBSAlCaRecoRunInfo()\n rrep.setValues(items)\n self._infoPerRun.append(rrep)\n cache.close() \n if len(self._infoPerRun) != 0:\n self._lasCachedRun = self._infoPerRun[len(self._infoPerRun)-1].run()\n return self._infoPerRun[len(self._infoPerRun)-1].run()\n else:\n print \"Error: no file found\"\n # FIXME: throw exception\n return 1", "title": "" }, { "docid": "f8da61203443bb48534fdd12e2a18048", "score": "0.46491197", "text": "def _get_cache_files(\n cache_dir: Optional[str], cache_prefix_filename: str, num_shards: int = 10\n) -> cache_files.TFRecordCacheFiles:\n cache_dir = _get_cache_dir_or_create(cache_dir)\n return cache_files.TFRecordCacheFiles(\n cache_prefix_filename=cache_prefix_filename,\n cache_dir=cache_dir,\n num_shards=num_shards,\n )", "title": "" }, { "docid": "0a82ee69a7f7862c807e240f3f4ee03c", "score": "0.46395952", "text": "def _create_cache(self):\n if platform.startswith(\"win\"):\n return FilePersistenceWithDataProtection(self.location)\n if platform.startswith(\"darwin\"):\n return KeychainPersistence(self.location, \"msal_token\", \"msal_token_value\")\n if platform.startswith(\"linux\"):\n try:\n return LibsecretPersistence(\n self.location,\n schema_name=\"msal_token\",\n attributes={\n \"msal_token1\": \"msal_token_values\",\n \"msal_token2\": \"msal_token_values\",\n },\n )\n except (PersistenceNotFound, ImportError, ValueError):\n print(\"Unable to create encrypted token cache - using in memory cache.\")\n return None\n\n return FilePersistence(self.location)", "title": "" }, { "docid": "4a2a1fe818095ad59683c404369ca7e8", "score": "0.4636926", "text": "def _initialize_cache(self):\n np = self.dest.get_number_of_particles()\n self.particle_cache = [ LongArray() for i in range(np) ]", "title": "" }, { "docid": "feea30f1d7cb7a302201fe7a220800f1", "score": "0.463258", "text": "def get_cache(cls, key):\n return cls._instance(key)._cache", "title": "" }, { "docid": "148cfcf0eb017707341c92c9396bf4b9", "score": "0.46270737", "text": "def _load_from_cache(self):\n return cache.get(self.DATA_KEY, [])", "title": "" }, { "docid": "46d9df5612caa1614ecfb7c1ac426986", "score": "0.46094966", "text": "def SoGLLazyElement_mergeCacheInfo(*args):\n return _coin.SoGLLazyElement_mergeCacheInfo(*args)", "title": "" }, { "docid": "f77cd888082dc49e2c7b328a0d3bb504", "score": "0.46027553", "text": "def __iter__(self):\n return iter(self._cache)", "title": "" }, { "docid": "8819222dfa73f103e80065a8a87c56f8", "score": "0.46021098", "text": "def cache(table, n=None):\n\n return CacheView(table, n=n)", "title": "" }, { "docid": "82750971e3846e5221408096be9dfd78", "score": "0.45995778", "text": "def preCacheCall(*args):\n return _coin.SoGLLazyElement_preCacheCall(*args)", "title": "" }, { "docid": "6d613f2c15a1b26be19afa4c2e901f75", "score": "0.4593945", "text": "def cache_list_mem(self, alg_list = None):\n # Create a dictionary with the information\n list_dict = self.cache_list(alg_list)\n if list_dict is None:\n return None\n \n # JSON encode results\n js = json.dumps(list_dict)\n try:\n blk = pipc.SharedMemory(None, flags=pipc.O_CREX,\n mode=0600, size=len(js))\n f = mmap.mmap(blk.fd, blk.size)\n f.write(js)\n f.close\n blk.close_fd()\n except Exception, e:\n self.logerror(\"cache_list: Problem while writing to shared memory block: %s\" %\n str(e))\n return None\n\n # Ought to clean this block up periodically -\n # will go when process ends for now\n return blk.name", "title": "" }, { "docid": "0b68264d23e50856e33a90ea58142d4e", "score": "0.4579871", "text": "def list_domain_cache():\n cache = salt.cache.Cache(__opts__, syspaths.CACHE_DIR)\n return cache.list(\"venafi/domains\")", "title": "" }, { "docid": "e32b49c78ef79b6a567bda8330b6c37a", "score": "0.45794964", "text": "def cachetype(self, cachetype) :\n try :\n self._cachetype = cachetype\n except Exception as e:\n raise e", "title": "" }, { "docid": "e2fd4bdf4c6356b5c3afe6901fbb8a62", "score": "0.45701927", "text": "def cache_stats(self):\n return {\\\n \"hits\": self.get_hits(), \\\n \"misses\": self.get_misses(), \\\n \"file\": self._get_cache_file(), \\\n \"size\": self.get_cache_size(),\n }", "title": "" }, { "docid": "829a7a1e8f7d6189e07630e414c191d8", "score": "0.45691273", "text": "def build_cache(self):\n try: assert self.content\n except: raise ValueError\n\n for entry in self.content:\n date, *item, price = entry.split()\n \"\"\"\n This code is also good.\n \"\"\"\n\n # join item into single string\n item = ' '.join(item).lower()\n\n # cache\n if date in self.cache:\n self.cache[date].append((item, price))\n else:\n self.cache[date] = [(item, price)]\n\n # flatten cache\n for pairs in self.cache.items():\n for pair in pairs[1]:\n self.flatcache.append([pairs[0],pair[0],pair[1]])\n\n return self.cache or {}", "title": "" }, { "docid": "80d7241b5351ef2252ec8f105e69bfbf", "score": "0.45690262", "text": "def ex_pre_caching(\n self,\n node_creation_times=True,\n volume_creation_times=True):\n if not self._allow_caching:\n raise LibcloudError((\n \"Caching is disabled for {0} instance, use 'allow_caching' \"\n \"option.\"\n ).format(self.__class__.__name__))\n\n if volume_creation_times:\n # the volumes listing requires the nodes timestamps\n node_creation_times = True\n self._query_volume_creation_times()\n if node_creation_times:\n self._query_node_creation_times()\n\n # required to cache\n self._get_datastores_info_map()\n self._get_datacenter_ids_map()", "title": "" }, { "docid": "7b5b979a5616e632dbf508c3be68ac62", "score": "0.45658255", "text": "def set_class_cache(self, cache_flag):\n if not cache_flag:\n self.generate = mrg32k3a\n self.bsm = bsm\n else:\n self.generate = functools.lru_cache(maxsize=None)(mrg32k3a)\n self.bsm = functools.lru_cache(maxsize=None)(bsm)", "title": "" }, { "docid": "7b3a17487803bca214811262b95c6b37", "score": "0.45578736", "text": "def getFastRevReadClocks():\n\n import clocks.fastrevread\n reload(clocks.fastrevread)\n\n return clocks.fastrevread.readClocks", "title": "" }, { "docid": "953fca2631d604ae069aa721f42ed430", "score": "0.45572165", "text": "def cache(self, *args, **kwargs):\n return _cache_decorate(args, self, kwargs, None)", "title": "" }, { "docid": "af0110e6b58d3fa08b448637ada7294c", "score": "0.4556072", "text": "def get_cache():\n \n from django.conf import settings\n from django.core import cache\n \n cache_backend = cache.parse_backend_uri(settings.CACHE_BACKEND)[0]\n memcached_client = None\n \n if cache_backend == 'memcached':\n # We can get the actual memcached client object itself. This will\n # avoid the Django problem of storing binary data (as Django tries to\n # coerce everything to Unicode).\n \n # Here, we look for either `cache.cache._cache` or\n # `cache.cache._client`; I believe there is some discrepancy between\n # different versions of Django and where they put this.\n memcached_client = getattr(\n cache.cache, '_cache', getattr(cache.cache, '_client', None))\n \n memcached_client = memcached_client or B64CacheClient(cache.cache)\n \n return jinja2.MemcachedBytecodeCache(memcached_client)", "title": "" }, { "docid": "06750545d577299f05c7c8e30d7cbaa9", "score": "0.45491555", "text": "def beginCaching(*args):\n return _coin.SoGLLazyElement_beginCaching(*args)", "title": "" }, { "docid": "ea009cef62321d91272e331862f2c184", "score": "0.45294777", "text": "def get_cache(self, *args, **kwargs):\n response = None\n cache_key = self.cache_key(*args, **kwargs)\n if cache_key and not kwargs.get('__force_update', False):\n response = cache.get(cache_key)\n if response is None:\n response = self.default_response(*args, **kwargs)\n response = self.cacheable(response, *args, **kwargs)\n if cache_key:\n cache.set(cache_key, response, MARIMO_TIMEOUT)\n return response", "title": "" }, { "docid": "1149282f39c6df1d256935c1a85aa203", "score": "0.45230237", "text": "def _get_cache(self, fn):\n try:\n return getattr(self, cache_name)\n except AttributeError:\n setattr(self, cache_name, {})\n return getattr(self, cache_name)", "title": "" }, { "docid": "e7318eef446eb8393256562e2353dfcd", "score": "0.45201826", "text": "def inicializar_cache(tam_cache):\n # cria a memoria cache\n memoria_cache = {}\n\n # insere cada posicao da memória cache com o valor -1, indicando que a posicao nao foi usada\n for x in range(0, tam_cache):\n memoria_cache[x] = -1\n\n return memoria_cache", "title": "" }, { "docid": "d20766331492fc1b46f7d8c1dd55f22e", "score": "0.45102614", "text": "def load_cache():\n Location._geocode_cache = Cache.get_file_objects(Location._geocode_cache_name)", "title": "" }, { "docid": "95855959b28d33fe091689c2255e6a2c", "score": "0.45099545", "text": "def retrieve_from_cache(self, search_strings):\n cache_results = [\n self.cache.get(search_string, None)\n for search_string in search_strings\n ]\n return cache_results", "title": "" } ]